code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""Plots GridRad domains.
Specifically, plots number of convective days with GridRad data at each grid
point.
"""
import os.path
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from mpl_toolkits.basemap import Basemap
from gewittergefahr.gg_io import gridrad_io
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import plotting_utils
TOLERANCE = 1e-6
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
TIME_INTERVAL_SEC = 300
OVERALL_MIN_LATITUDE_DEG = 20.
OVERALL_MAX_LATITUDE_DEG = 55.
OVERALL_MIN_LONGITUDE_DEG = 230.
OVERALL_MAX_LONGITUDE_DEG = 300.
LAMBERT_CONFORMAL_STRING = 'lcc'
NUM_PARALLELS = 8
NUM_MERIDIANS = 6
RESOLUTION_STRING = 'l'
BORDER_COLOUR = numpy.full(3, 0.)
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 300
INPUT_DIR_ARG_NAME = 'input_gridrad_dir_name'
FIRST_DATE_ARG_NAME = 'first_spc_date_string'
LAST_DATE_ARG_NAME = 'last_spc_date_string'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
GRID_SPACING_ARG_NAME = 'grid_spacing_metres'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
INPUT_DIR_HELP_STRING = (
'Name of top-level input directory. GridRad files therein will be found by'
' `gridrad_io.find_file` and read by '
'`gridrad_io.read_field_from_full_grid_file`.')
SPC_DATE_HELP_STRING = (
'SPC date or convective day (format "yyyymmdd"). This script will look for'
' GridRad files in the period `{0:s}`...`{1:s}`.'
).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME)
COLOUR_MAP_HELP_STRING = (
'Name of colour scheme for gridded plot (must be accepted by '
'`pyplot.get_cmap`).')
GRID_SPACING_HELP_STRING = 'Spacing (metres) of Lambert conformal grid.'
OUTPUT_FILE_HELP_STRING = 'Path to output file. Figure will be saved here.'
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_DIR_ARG_NAME, type=str, required=True,
help=INPUT_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='YlOrRd',
help=COLOUR_MAP_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + GRID_SPACING_ARG_NAME, type=float, required=False, default=1e5,
help=GRID_SPACING_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _get_domain_one_file(gridrad_file_name):
"""Returns spatial domain for one file.
:param gridrad_file_name: Path to input file.
:return: domain_limits_deg: length-4 numpy array with
[min latitude, max latitude, min longitude, max longitude].
Latitudes are in deg N, and longitudes are in deg E.
"""
print('Reading metadata from: "{0:s}"...'.format(gridrad_file_name))
metadata_dict = gridrad_io.read_metadata_from_full_grid_file(
gridrad_file_name)
max_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN]
min_longitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN]
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
num_rows = metadata_dict[radar_utils.NUM_LAT_COLUMN]
num_columns = metadata_dict[radar_utils.NUM_LNG_COLUMN]
min_latitude_deg = max_latitude_deg - (num_rows - 1) * latitude_spacing_deg
max_longitude_deg = min_longitude_deg + (
(num_columns - 1) * longitude_spacing_deg
)
return numpy.array([
min_latitude_deg, max_latitude_deg, min_longitude_deg, max_longitude_deg
])
def _get_lcc_params(projection_object):
"""Finds parameters for LCC (Lambert conformal conic) projection.
:param projection_object: Instance of `pyproj.Proj`.
:return: standard_latitudes_deg: length-2 numpy array of standard latitudes
(deg N).
:return: central_longitude_deg: Central longitude (deg E).
:raises: ValueError: if projection is not LCC.
"""
projection_string = projection_object.srs
words = projection_string.split()
property_names = [w.split('=')[0][1:] for w in words]
property_values = [w.split('=')[1] for w in words]
projection_dict = dict(list(
zip(property_names, property_values)
))
if projection_dict['proj'] != LAMBERT_CONFORMAL_STRING:
error_string = 'Grid projection should be "{0:s}", not "{1:s}".'.format(
LAMBERT_CONFORMAL_STRING, projection_dict['proj']
)
raise ValueError(error_string)
central_longitude_deg = float(projection_dict['lon_0'])
standard_latitudes_deg = numpy.array([
float(projection_dict['lat_1']), float(projection_dict['lat_2'])
])
return standard_latitudes_deg, central_longitude_deg
def _get_basemap(grid_metadata_dict):
"""Creates basemap.
M = number of rows in grid
M = number of columns in grid
:param grid_metadata_dict: Dictionary created by
`grids.create_equidistant_grid`.
:return: basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
:return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under
Basemap projection (different than pyproj projection).
:return: basemap_y_matrix_metres: Same but for y-coordinates.
"""
x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices(
x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY],
y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY]
)
projection_object = grid_metadata_dict[grids.PROJECTION_KEY]
latitude_matrix_deg, longitude_matrix_deg = (
projections.project_xy_to_latlng(
x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres,
projection_object=projection_object)
)
standard_latitudes_deg, central_longitude_deg = _get_lcc_params(
projection_object)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=projections.DEFAULT_EARTH_RADIUS_METRES,
ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING,
llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0],
urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1]
)
basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object(
longitude_matrix_deg, latitude_matrix_deg)
return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres
def _plot_data(num_days_matrix, grid_metadata_dict, colour_map_object):
"""Plots data.
M = number of rows in grid
N = number of columns in grid
:param num_days_matrix: M-by-N numpy array with number of convective days
for which grid cell is in domain.
:param grid_metadata_dict: Dictionary created by
`grids.create_equidistant_grid`.
:param colour_map_object: See documentation at top of file.
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object: Axes handle (instance of
`matplotlib.axes._subplots.AxesSubplot`).
"""
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = (
_get_basemap(grid_metadata_dict)
)
num_grid_rows = num_days_matrix.shape[0]
num_grid_columns = num_days_matrix.shape[1]
x_spacing_metres = (
(basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) /
(num_grid_columns - 1)
)
y_spacing_metres = (
(basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) /
(num_grid_rows - 1)
)
matrix_to_plot, edge_x_coords_metres, edge_y_coords_metres = (
grids.xy_field_grid_points_to_edges(
field_matrix=num_days_matrix,
x_min_metres=basemap_x_matrix_metres[0, 0],
y_min_metres=basemap_y_matrix_metres[0, 0],
x_spacing_metres=x_spacing_metres,
y_spacing_metres=y_spacing_metres)
)
matrix_to_plot = numpy.ma.masked_where(matrix_to_plot == 0, matrix_to_plot)
plotting_utils.plot_coastlines(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_countries(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_states_and_provinces(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_parallels(
basemap_object=basemap_object, axes_object=axes_object,
num_parallels=NUM_PARALLELS)
plotting_utils.plot_meridians(
basemap_object=basemap_object, axes_object=axes_object,
num_meridians=NUM_MERIDIANS)
basemap_object.pcolormesh(
edge_x_coords_metres, edge_y_coords_metres, matrix_to_plot,
cmap=colour_map_object, vmin=1, vmax=numpy.max(num_days_matrix),
shading='flat', edgecolors='None', axes=axes_object, zorder=-1e12)
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=axes_object, data_matrix=num_days_matrix,
colour_map_object=colour_map_object, min_value=1,
max_value=numpy.max(num_days_matrix), orientation_string='horizontal',
extend_min=False, extend_max=False, padding=0.05)
tick_values = colour_bar_object.get_ticks()
tick_strings = ['{0:d}'.format(int(numpy.round(v))) for v in tick_values]
colour_bar_object.set_ticks(tick_values)
colour_bar_object.set_ticklabels(tick_strings)
axes_object.set_title('Number of convective days by grid cell')
return figure_object, axes_object
def _run(top_gridrad_dir_name, first_spc_date_string, last_spc_date_string,
colour_map_name, grid_spacing_metres, output_file_name):
"""Plots GridRad domains.
This is effectively the main method.
:param top_gridrad_dir_name: See documentation at top of file.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param colour_map_name: Same.
:param grid_spacing_metres: Same.
:param output_file_name: Same.
"""
colour_map_object = pyplot.get_cmap(colour_map_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
first_time_unix_sec = time_conversion.get_start_of_spc_date(
first_spc_date_string)
last_time_unix_sec = time_conversion.get_end_of_spc_date(
last_spc_date_string)
valid_times_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_time_unix_sec,
end_time_unix_sec=last_time_unix_sec,
time_interval_sec=TIME_INTERVAL_SEC, include_endpoint=True)
valid_spc_date_strings = [
time_conversion.time_to_spc_date_string(t) for t in valid_times_unix_sec
]
domain_min_latitudes_deg = []
domain_max_latitudes_deg = []
domain_min_longitudes_deg = []
domain_max_longitudes_deg = []
prev_domain_limits_deg = numpy.full(4, numpy.nan)
prev_spc_date_string = 'foo'
num_times = len(valid_times_unix_sec)
for i in range(num_times):
this_gridrad_file_name = gridrad_io.find_file(
unix_time_sec=valid_times_unix_sec[i],
top_directory_name=top_gridrad_dir_name,
raise_error_if_missing=False)
if not os.path.isfile(this_gridrad_file_name):
continue
these_domain_limits_deg = _get_domain_one_file(this_gridrad_file_name)
same_domain = (
valid_spc_date_strings[i] == prev_spc_date_string and
numpy.allclose(
these_domain_limits_deg, prev_domain_limits_deg, TOLERANCE
)
)
if same_domain:
continue
prev_domain_limits_deg = these_domain_limits_deg + 0.
prev_spc_date_string = valid_spc_date_strings[i]
domain_min_latitudes_deg.append(these_domain_limits_deg[0])
domain_max_latitudes_deg.append(these_domain_limits_deg[1])
domain_min_longitudes_deg.append(these_domain_limits_deg[2])
domain_max_longitudes_deg.append(these_domain_limits_deg[3])
print(SEPARATOR_STRING)
domain_min_latitudes_deg = numpy.array(domain_min_latitudes_deg)
domain_max_latitudes_deg = numpy.array(domain_max_latitudes_deg)
domain_min_longitudes_deg = numpy.array(domain_min_longitudes_deg)
domain_max_longitudes_deg = numpy.array(domain_max_longitudes_deg)
num_domains = len(domain_min_latitudes_deg)
grid_metadata_dict = grids.create_equidistant_grid(
min_latitude_deg=OVERALL_MIN_LATITUDE_DEG,
max_latitude_deg=OVERALL_MAX_LATITUDE_DEG,
min_longitude_deg=OVERALL_MIN_LONGITUDE_DEG,
max_longitude_deg=OVERALL_MAX_LONGITUDE_DEG,
x_spacing_metres=grid_spacing_metres,
y_spacing_metres=grid_spacing_metres, azimuthal=False)
unique_x_coords_metres = grid_metadata_dict[grids.X_COORDS_KEY]
unique_y_coords_metres = grid_metadata_dict[grids.Y_COORDS_KEY]
projection_object = grid_metadata_dict[grids.PROJECTION_KEY]
x_coord_matrix_metres, y_coord_matrix_metres = grids.xy_vectors_to_matrices(
x_unique_metres=unique_x_coords_metres,
y_unique_metres=unique_y_coords_metres)
latitude_matrix_deg, longitude_matrix_deg = (
projections.project_xy_to_latlng(
x_coords_metres=x_coord_matrix_metres,
y_coords_metres=y_coord_matrix_metres,
projection_object=projection_object)
)
num_grid_rows = latitude_matrix_deg.shape[0]
num_grid_columns = latitude_matrix_deg.shape[1]
num_days_matrix = numpy.full((num_grid_rows, num_grid_columns), 0)
for i in range(num_domains):
if numpy.mod(i, 10) == 0:
print('Have found grid points in {0:d} of {1:d} domains...'.format(
i, num_domains
))
this_lat_flag_matrix = numpy.logical_and(
latitude_matrix_deg >= domain_min_latitudes_deg[i],
latitude_matrix_deg <= domain_max_latitudes_deg[i]
)
this_lng_flag_matrix = numpy.logical_and(
longitude_matrix_deg >= domain_min_longitudes_deg[i],
longitude_matrix_deg <= domain_max_longitudes_deg[i]
)
num_days_matrix += numpy.logical_and(
this_lat_flag_matrix, this_lng_flag_matrix
).astype(int)
print(SEPARATOR_STRING)
figure_object, axes_object = _plot_data(
num_days_matrix=num_days_matrix, grid_metadata_dict=grid_metadata_dict,
colour_map_object=colour_map_object)
plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')
print('Saving figure to: "{0:s}"...'.format(output_file_name))
figure_object.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight')
pyplot.close(figure_object)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
top_gridrad_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME),
first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME),
last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
grid_spacing_metres=getattr(INPUT_ARG_OBJECT, GRID_SPACING_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
|
[
"gewittergefahr.gg_utils.time_periods.range_and_interval_to_list",
"numpy.array",
"gewittergefahr.gg_utils.grids.create_equidistant_grid",
"gewittergefahr.gg_utils.projections.project_xy_to_latlng",
"gewittergefahr.gg_utils.grids.xy_vectors_to_matrices",
"numpy.mod",
"gewittergefahr.gg_utils.time_conversion.time_to_spc_date_string",
"gewittergefahr.plotting.plotting_utils.plot_coastlines",
"argparse.ArgumentParser",
"gewittergefahr.gg_utils.grids.xy_field_grid_points_to_edges",
"gewittergefahr.gg_io.gridrad_io.find_file",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"gewittergefahr.plotting.plotting_utils.plot_meridians",
"numpy.max",
"gewittergefahr.gg_io.gridrad_io.read_metadata_from_full_grid_file",
"gewittergefahr.plotting.plotting_utils.plot_parallels",
"numpy.round",
"numpy.allclose",
"matplotlib.use",
"gewittergefahr.plotting.plotting_utils.plot_states_and_provinces",
"gewittergefahr.gg_utils.time_conversion.get_start_of_spc_date",
"matplotlib.pyplot.get_cmap",
"numpy.logical_and",
"gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary",
"mpl_toolkits.basemap.Basemap",
"gewittergefahr.gg_utils.time_conversion.get_end_of_spc_date",
"numpy.full",
"gewittergefahr.plotting.plotting_utils.plot_countries",
"matplotlib.pyplot.subplots",
"gewittergefahr.plotting.plotting_utils.label_axes"
] |
[((178, 199), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (192, 199), False, 'import matplotlib\n'), ((988, 1006), 'numpy.full', 'numpy.full', (['(3)', '(0.0)'], {}), '(3, 0.0)\n', (998, 1006), False, 'import numpy\n'), ((2059, 2084), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2082, 2084), False, 'import argparse\n'), ((3281, 3344), 'gewittergefahr.gg_io.gridrad_io.read_metadata_from_full_grid_file', 'gridrad_io.read_metadata_from_full_grid_file', (['gridrad_file_name'], {}), '(gridrad_file_name)\n', (3325, 3344), False, 'from gewittergefahr.gg_io import gridrad_io\n'), ((3965, 4056), 'numpy.array', 'numpy.array', (['[min_latitude_deg, max_latitude_deg, min_longitude_deg, max_longitude_deg]'], {}), '([min_latitude_deg, max_latitude_deg, min_longitude_deg,\n max_longitude_deg])\n', (3976, 4056), False, 'import numpy\n'), ((5816, 5961), 'gewittergefahr.gg_utils.grids.xy_vectors_to_matrices', 'grids.xy_vectors_to_matrices', ([], {'x_unique_metres': 'grid_metadata_dict[grids.X_COORDS_KEY]', 'y_unique_metres': 'grid_metadata_dict[grids.Y_COORDS_KEY]'}), '(x_unique_metres=grid_metadata_dict[grids.\n X_COORDS_KEY], y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY])\n', (5844, 5961), False, 'from gewittergefahr.gg_utils import grids\n'), ((6104, 6243), 'gewittergefahr.gg_utils.projections.project_xy_to_latlng', 'projections.project_xy_to_latlng', ([], {'x_coords_metres': 'x_matrix_metres', 'y_coords_metres': 'y_matrix_metres', 'projection_object': 'projection_object'}), '(x_coords_metres=x_matrix_metres,\n y_coords_metres=y_matrix_metres, projection_object=projection_object)\n', (6136, 6243), False, 'from gewittergefahr.gg_utils import projections\n'), ((6390, 6772), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""lcc"""', 'lat_1': 'standard_latitudes_deg[0]', 'lat_2': 'standard_latitudes_deg[1]', 'lon_0': 'central_longitude_deg', 'rsphere': 'projections.DEFAULT_EARTH_RADIUS_METRES', 'ellps': 'projections.SPHERE_NAME', 'resolution': 'RESOLUTION_STRING', 'llcrnrx': 'x_matrix_metres[0, 0]', 'llcrnry': 'y_matrix_metres[0, 0]', 'urcrnrx': 'x_matrix_metres[-1, -1]', 'urcrnry': 'y_matrix_metres[-1, -1]'}), "(projection='lcc', lat_1=standard_latitudes_deg[0], lat_2=\n standard_latitudes_deg[1], lon_0=central_longitude_deg, rsphere=\n projections.DEFAULT_EARTH_RADIUS_METRES, ellps=projections.SPHERE_NAME,\n resolution=RESOLUTION_STRING, llcrnrx=x_matrix_metres[0, 0], llcrnry=\n y_matrix_metres[0, 0], urcrnrx=x_matrix_metres[-1, -1], urcrnry=\n y_matrix_metres[-1, -1])\n", (6397, 6772), False, 'from mpl_toolkits.basemap import Basemap\n'), ((7676, 7750), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (7691, 7750), False, 'from matplotlib import pyplot\n'), ((8327, 8563), 'gewittergefahr.gg_utils.grids.xy_field_grid_points_to_edges', 'grids.xy_field_grid_points_to_edges', ([], {'field_matrix': 'num_days_matrix', 'x_min_metres': 'basemap_x_matrix_metres[0, 0]', 'y_min_metres': 'basemap_y_matrix_metres[0, 0]', 'x_spacing_metres': 'x_spacing_metres', 'y_spacing_metres': 'y_spacing_metres'}), '(field_matrix=num_days_matrix,\n x_min_metres=basemap_x_matrix_metres[0, 0], y_min_metres=\n basemap_y_matrix_metres[0, 0], x_spacing_metres=x_spacing_metres,\n y_spacing_metres=y_spacing_metres)\n', (8362, 8563), False, 'from gewittergefahr.gg_utils import grids\n'), ((8640, 8698), 'numpy.ma.masked_where', 'numpy.ma.masked_where', (['(matrix_to_plot == 0)', 'matrix_to_plot'], {}), '(matrix_to_plot == 0, matrix_to_plot)\n', (8661, 8698), False, 'import numpy\n'), ((8704, 8822), 'gewittergefahr.plotting.plotting_utils.plot_coastlines', 'plotting_utils.plot_coastlines', ([], {'basemap_object': 'basemap_object', 'axes_object': 'axes_object', 'line_colour': 'BORDER_COLOUR'}), '(basemap_object=basemap_object, axes_object=\n axes_object, line_colour=BORDER_COLOUR)\n', (8734, 8822), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((8840, 8957), 'gewittergefahr.plotting.plotting_utils.plot_countries', 'plotting_utils.plot_countries', ([], {'basemap_object': 'basemap_object', 'axes_object': 'axes_object', 'line_colour': 'BORDER_COLOUR'}), '(basemap_object=basemap_object, axes_object=\n axes_object, line_colour=BORDER_COLOUR)\n', (8869, 8957), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((8975, 9102), 'gewittergefahr.plotting.plotting_utils.plot_states_and_provinces', 'plotting_utils.plot_states_and_provinces', ([], {'basemap_object': 'basemap_object', 'axes_object': 'axes_object', 'line_colour': 'BORDER_COLOUR'}), '(basemap_object=basemap_object,\n axes_object=axes_object, line_colour=BORDER_COLOUR)\n', (9015, 9102), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((9121, 9240), 'gewittergefahr.plotting.plotting_utils.plot_parallels', 'plotting_utils.plot_parallels', ([], {'basemap_object': 'basemap_object', 'axes_object': 'axes_object', 'num_parallels': 'NUM_PARALLELS'}), '(basemap_object=basemap_object, axes_object=\n axes_object, num_parallels=NUM_PARALLELS)\n', (9150, 9240), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((9258, 9377), 'gewittergefahr.plotting.plotting_utils.plot_meridians', 'plotting_utils.plot_meridians', ([], {'basemap_object': 'basemap_object', 'axes_object': 'axes_object', 'num_meridians': 'NUM_MERIDIANS'}), '(basemap_object=basemap_object, axes_object=\n axes_object, num_meridians=NUM_MERIDIANS)\n', (9287, 9377), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((10803, 10835), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['colour_map_name'], {}), '(colour_map_name)\n', (10818, 10835), False, 'from matplotlib import pyplot\n'), ((10840, 10914), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'file_name': 'output_file_name'}), '(file_name=output_file_name)\n', (10886, 10914), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((10942, 11002), 'gewittergefahr.gg_utils.time_conversion.get_start_of_spc_date', 'time_conversion.get_start_of_spc_date', (['first_spc_date_string'], {}), '(first_spc_date_string)\n', (10979, 11002), False, 'from gewittergefahr.gg_utils import time_conversion\n'), ((11037, 11094), 'gewittergefahr.gg_utils.time_conversion.get_end_of_spc_date', 'time_conversion.get_end_of_spc_date', (['last_spc_date_string'], {}), '(last_spc_date_string)\n', (11072, 11094), False, 'from gewittergefahr.gg_utils import time_conversion\n'), ((11132, 11319), 'gewittergefahr.gg_utils.time_periods.range_and_interval_to_list', 'time_periods.range_and_interval_to_list', ([], {'start_time_unix_sec': 'first_time_unix_sec', 'end_time_unix_sec': 'last_time_unix_sec', 'time_interval_sec': 'TIME_INTERVAL_SEC', 'include_endpoint': '(True)'}), '(start_time_unix_sec=\n first_time_unix_sec, end_time_unix_sec=last_time_unix_sec,\n time_interval_sec=TIME_INTERVAL_SEC, include_endpoint=True)\n', (11171, 11319), False, 'from gewittergefahr.gg_utils import time_periods\n'), ((11624, 11648), 'numpy.full', 'numpy.full', (['(4)', 'numpy.nan'], {}), '(4, numpy.nan)\n', (11634, 11648), False, 'import numpy\n'), ((12833, 12870), 'numpy.array', 'numpy.array', (['domain_min_latitudes_deg'], {}), '(domain_min_latitudes_deg)\n', (12844, 12870), False, 'import numpy\n'), ((12902, 12939), 'numpy.array', 'numpy.array', (['domain_max_latitudes_deg'], {}), '(domain_max_latitudes_deg)\n', (12913, 12939), False, 'import numpy\n'), ((12972, 13010), 'numpy.array', 'numpy.array', (['domain_min_longitudes_deg'], {}), '(domain_min_longitudes_deg)\n', (12983, 13010), False, 'import numpy\n'), ((13043, 13081), 'numpy.array', 'numpy.array', (['domain_max_longitudes_deg'], {}), '(domain_max_longitudes_deg)\n', (13054, 13081), False, 'import numpy\n'), ((13156, 13472), 'gewittergefahr.gg_utils.grids.create_equidistant_grid', 'grids.create_equidistant_grid', ([], {'min_latitude_deg': 'OVERALL_MIN_LATITUDE_DEG', 'max_latitude_deg': 'OVERALL_MAX_LATITUDE_DEG', 'min_longitude_deg': 'OVERALL_MIN_LONGITUDE_DEG', 'max_longitude_deg': 'OVERALL_MAX_LONGITUDE_DEG', 'x_spacing_metres': 'grid_spacing_metres', 'y_spacing_metres': 'grid_spacing_metres', 'azimuthal': '(False)'}), '(min_latitude_deg=OVERALL_MIN_LATITUDE_DEG,\n max_latitude_deg=OVERALL_MAX_LATITUDE_DEG, min_longitude_deg=\n OVERALL_MIN_LONGITUDE_DEG, max_longitude_deg=OVERALL_MAX_LONGITUDE_DEG,\n x_spacing_metres=grid_spacing_metres, y_spacing_metres=\n grid_spacing_metres, azimuthal=False)\n', (13185, 13472), False, 'from gewittergefahr.gg_utils import grids\n'), ((13758, 13870), 'gewittergefahr.gg_utils.grids.xy_vectors_to_matrices', 'grids.xy_vectors_to_matrices', ([], {'x_unique_metres': 'unique_x_coords_metres', 'y_unique_metres': 'unique_y_coords_metres'}), '(x_unique_metres=unique_x_coords_metres,\n y_unique_metres=unique_y_coords_metres)\n', (13786, 13870), False, 'from gewittergefahr.gg_utils import grids\n'), ((13943, 14094), 'gewittergefahr.gg_utils.projections.project_xy_to_latlng', 'projections.project_xy_to_latlng', ([], {'x_coords_metres': 'x_coord_matrix_metres', 'y_coords_metres': 'y_coord_matrix_metres', 'projection_object': 'projection_object'}), '(x_coords_metres=x_coord_matrix_metres,\n y_coords_metres=y_coord_matrix_metres, projection_object=projection_object)\n', (13975, 14094), False, 'from gewittergefahr.gg_utils import projections\n'), ((14258, 14306), 'numpy.full', 'numpy.full', (['(num_grid_rows, num_grid_columns)', '(0)'], {}), '((num_grid_rows, num_grid_columns), 0)\n', (14268, 14306), False, 'import numpy\n'), ((15209, 15279), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(c)"""'}), "(axes_object=axes_object, label_string='(c)')\n", (15234, 15279), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((15475, 15502), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (15487, 15502), False, 'from matplotlib import pyplot\n'), ((11376, 11418), 'gewittergefahr.gg_utils.time_conversion.time_to_spc_date_string', 'time_conversion.time_to_spc_date_string', (['t'], {}), '(t)\n', (11415, 11418), False, 'from gewittergefahr.gg_utils import time_conversion\n'), ((11789, 11923), 'gewittergefahr.gg_io.gridrad_io.find_file', 'gridrad_io.find_file', ([], {'unix_time_sec': 'valid_times_unix_sec[i]', 'top_directory_name': 'top_gridrad_dir_name', 'raise_error_if_missing': '(False)'}), '(unix_time_sec=valid_times_unix_sec[i],\n top_directory_name=top_gridrad_dir_name, raise_error_if_missing=False)\n', (11809, 11923), False, 'from gewittergefahr.gg_io import gridrad_io\n'), ((14533, 14659), 'numpy.logical_and', 'numpy.logical_and', (['(latitude_matrix_deg >= domain_min_latitudes_deg[i])', '(latitude_matrix_deg <= domain_max_latitudes_deg[i])'], {}), '(latitude_matrix_deg >= domain_min_latitudes_deg[i], \n latitude_matrix_deg <= domain_max_latitudes_deg[i])\n', (14550, 14659), False, 'import numpy\n'), ((14720, 14850), 'numpy.logical_and', 'numpy.logical_and', (['(longitude_matrix_deg >= domain_min_longitudes_deg[i])', '(longitude_matrix_deg <= domain_max_longitudes_deg[i])'], {}), '(longitude_matrix_deg >= domain_min_longitudes_deg[i], \n longitude_matrix_deg <= domain_max_longitudes_deg[i])\n', (14737, 14850), False, 'import numpy\n'), ((9535, 9561), 'numpy.max', 'numpy.max', (['num_days_matrix'], {}), '(num_days_matrix)\n', (9544, 9561), False, 'import numpy\n'), ((9850, 9876), 'numpy.max', 'numpy.max', (['num_days_matrix'], {}), '(num_days_matrix)\n', (9859, 9876), False, 'import numpy\n'), ((12216, 12290), 'numpy.allclose', 'numpy.allclose', (['these_domain_limits_deg', 'prev_domain_limits_deg', 'TOLERANCE'], {}), '(these_domain_limits_deg, prev_domain_limits_deg, TOLERANCE)\n', (12230, 12290), False, 'import numpy\n'), ((14352, 14368), 'numpy.mod', 'numpy.mod', (['i', '(10)'], {}), '(i, 10)\n', (14361, 14368), False, 'import numpy\n'), ((10057, 10071), 'numpy.round', 'numpy.round', (['v'], {}), '(v)\n', (10068, 10071), False, 'import numpy\n'), ((14908, 14969), 'numpy.logical_and', 'numpy.logical_and', (['this_lat_flag_matrix', 'this_lng_flag_matrix'], {}), '(this_lat_flag_matrix, this_lng_flag_matrix)\n', (14925, 14969), False, 'import numpy\n')]
|
import random
import matplotlib.pyplot as plt
import gym
# from agents.actor_critic_agents.A2C import A2C
# from agents.actor_critic_agents.A3C import A3C
# from agents.actor_critic_agents.SAC import SAC
from agents.actor_critic_agents.SAC_Discrete import SAC_Discrete
# from agents.DQN_agents.DQN_HER import DQN_HER
# from agents.DQN_agents.DDQN import DDQN
# from agents.DQN_agents.DDQN_With_Prioritised_Experience_Replay import DDQN_With_Prioritised_Experience_Replay
# from agents.DQN_agents.DQN_With_Fixed_Q_Targets import DQN_With_Fixed_Q_Targets
# from agents.actor_critic_agents.DDPG import DDPG
from agents.actor_critic_agents.DDPG_HER import DDPG_HER
# from environments.Bit_Flipping_Environment import Bit_Flipping_Environment
from environments.Cache_server import Cache_server
# from agents.policy_gradient_agents.PPO import PPO
# from environments.Four_Rooms_Environment import Four_Rooms_Environment
# from agents.hierarchical_agents.SNN_HRL import SNN_HRL
# from agents.actor_critic_agents.TD3 import TD3
from agents.Trainer import Trainer
from utilities.data_structures.Config import Config
# from agents.DQN_agents.DQN import DQN
import numpy as np
import torch
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
config = Config()
config.seed = 1
# config.environment = Bit_Flipping_Environment(4)
config.environment = Cache_server()
config.num_episodes_to_run = 100
config.file_to_save_data_results = None
config.file_to_save_results_graph = None
config.visualise_individual_results = False
config.visualise_overall_agent_results = False
config.randomise_random_seed = False
config.runs_per_agent = 1
config.use_GPU = False
config.hyperparameters = {
"Actor_Critic_Agents": {
"learning_rate": 0.0005,
"linear_hidden_units": [50, 30, 30, 30],
"final_layer_activation": ["SOFTMAX", None],
"gradient_clipping_norm": 25.0,
"discount_rate": 1,
"epsilon_decay_rate_denominator": 10.0,
"normalise_rewards": False,
"automatically_tune_entropy_hyperparameter": True,
"add_extra_noise": False,
"min_steps_before_learning": 1,
"do_evaluation_iterations": True,
"clip_rewards": False,
"Actor": {
"learning_rate": 0.001,
# "linear_hidden_units": [20, 20],
"linear_hidden_units": [50,100],
# "final_layer_activation": "TANH",
"final_layer_activation": "Softmax",
"batch_norm": False,
"tau": 0.005,
"gradient_clipping_norm": 25
},
"Critic": {
"learning_rate": 0.01,
# "linear_hidden_units": [20, 20],
"linear_hidden_units": [50,100],
"final_layer_activation": "None",
"batch_norm": False,
"buffer_size": 100000,
"tau": 0.005,
"gradient_clipping_norm": 25
},
"batch_size": 3,
"mu": 0.0, # for O-H noise
"theta": 0.15, # for O-H noise
"sigma": 0.25, # for O-H noise
"action_noise_std": 0.2, # for TD3
"action_noise_clipping_range": 0.5, # for TD3
"update_every_n_steps": 20,
"learning_updates_per_learning_session": 10,
"HER_sample_proportion": 0.8,
"exploration_worker_difference": 1.0
},
}
# def test_agent_solve_RL_cache():
AGENTS = [SAC_Discrete]
trainer = Trainer(config, AGENTS)
results = trainer.run_games_for_agents()
for agent in AGENTS:
agent_results = results[agent.agent_name]
agent_results = np.max(agent_results[0][1][50:])
assert agent_results >= 0.0, "Failed for {} -- score {}".format(agent.agent_name, agent_results)
plt.plot(results["SAC"][0][0])
plt.plot(results["SAC"][0][1])
plt.show()
# test_agent_solve_RL_cache()
# def test_agents_can_play_games_of_different_dimensions():
# config.num_episodes_to_run = 10
# config.hyperparameters["DQN_Agents"]["batch_size"] = 3
# AGENTS = [A2C, A3C, PPO, DDQN, DQN_With_Fixed_Q_Targets, DDQN_With_Prioritised_Experience_Replay, DQN]
# trainer = Trainer(config, AGENTS)
# config.environment = gym.make("CartPole-v0")
# results = trainer.run_games_for_agents()
# for agent in AGENTS:
# assert agent.agent_name in results.keys()
#
# AGENTS = [SAC, TD3, PPO, DDPG]
# config.environment = gym.make("MountainCarContinuous-v0")
# trainer = Trainer(config, AGENTS)
# results = trainer.run_games_for_agents()
# for agent in AGENTS:
# assert agent.agent_name in results.keys()
#
# AGENTS = [DDQN, SNN_HRL]
# config.environment = Four_Rooms_Environment(15, 15, stochastic_actions_probability=0.25,
# random_start_user_place=True, random_goal_place=False)
# trainer = Trainer(config, AGENTS)
# results = trainer.run_games_for_agents()
# for agent in AGENTS:
# assert agent.agent_name in results.keys()
|
[
"utilities.data_structures.Config.Config",
"torch.manual_seed",
"environments.Cache_server.Cache_server",
"agents.Trainer.Trainer",
"matplotlib.pyplot.plot",
"random.seed",
"numpy.max",
"numpy.random.seed",
"matplotlib.pyplot.show"
] |
[((1183, 1197), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (1194, 1197), False, 'import random\n'), ((1198, 1215), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1212, 1215), True, 'import numpy as np\n'), ((1216, 1236), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (1233, 1236), False, 'import torch\n'), ((1247, 1255), 'utilities.data_structures.Config.Config', 'Config', ([], {}), '()\n', (1253, 1255), False, 'from utilities.data_structures.Config import Config\n'), ((1344, 1358), 'environments.Cache_server.Cache_server', 'Cache_server', ([], {}), '()\n', (1356, 1358), False, 'from environments.Cache_server import Cache_server\n'), ((3390, 3413), 'agents.Trainer.Trainer', 'Trainer', (['config', 'AGENTS'], {}), '(config, AGENTS)\n', (3397, 3413), False, 'from agents.Trainer import Trainer\n'), ((3676, 3706), 'matplotlib.pyplot.plot', 'plt.plot', (["results['SAC'][0][0]"], {}), "(results['SAC'][0][0])\n", (3684, 3706), True, 'import matplotlib.pyplot as plt\n'), ((3707, 3737), 'matplotlib.pyplot.plot', 'plt.plot', (["results['SAC'][0][1]"], {}), "(results['SAC'][0][1])\n", (3715, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3746, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3574), 'numpy.max', 'np.max', (['agent_results[0][1][50:]'], {}), '(agent_results[0][1][50:])\n', (3548, 3574), True, 'import numpy as np\n')]
|
import flask
from flask import Flask, url_for
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
# instantiating a class object
app = Flask(__name__)
model_path = 'vgg19.h5'
# load the model
model = load_model(model_path)
# model._make_predict_function()
# preprocessing function
def model_predict(img_path, model):
# load the image and set the size to 224,224
img = image.load_img(img_path, target_size=(224,224))
# change the image to array
x = image.img_to_array(img)
# add dimension so we could pass it as an input to the network
x = np.expand_dims(x, axis=0)
# scale the input
x = preprocess_input(x)
# make predictions
preds = model.predict(x)
return preds
from image import routes
|
[
"tensorflow.keras.preprocessing.image.load_img",
"flask.Flask",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.applications.imagenet_utils.preprocess_input"
] |
[((294, 309), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (299, 309), False, 'from flask import Flask, url_for\n'), ((362, 384), 'tensorflow.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (372, 384), False, 'from tensorflow.keras.models import load_model\n'), ((540, 588), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (554, 588), False, 'from tensorflow.keras.preprocessing import image\n'), ((629, 652), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (647, 652), False, 'from tensorflow.keras.preprocessing import image\n'), ((728, 753), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (742, 753), True, 'import numpy as np\n'), ((784, 803), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (800, 803), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions\n')]
|
#Copyright (c) 2017 <NAME>.
#Cura is released under the terms of the LGPLv3 or higher.
import gc
from UM.Job import Job
from UM.Application import Application
from UM.Mesh.MeshData import MeshData
from UM.Preferences import Preferences
from UM.View.GL.OpenGLContext import OpenGLContext
from UM.Message import Message
from UM.i18n import i18nCatalog
from UM.Logger import Logger
from UM.Math.Vector import Vector
from cura.Scene.BuildPlateDecorator import BuildPlateDecorator
from cura.Scene.CuraSceneNode import CuraSceneNode
from cura.Settings.ExtruderManager import ExtruderManager
from cura import LayerDataBuilder
from cura import LayerDataDecorator
from cura import LayerPolygon
import numpy
from time import time
from cura.Settings.ExtrudersModel import ExtrudersModel
catalog = i18nCatalog("cura")
## Return a 4-tuple with floats 0-1 representing the html color code
#
# \param color_code html color code, i.e. "#FF0000" -> red
def colorCodeToRGBA(color_code):
if color_code is None:
Logger.log("w", "Unable to convert color code, returning default")
return [0, 0, 0, 1]
return [
int(color_code[1:3], 16) / 255,
int(color_code[3:5], 16) / 255,
int(color_code[5:7], 16) / 255,
1.0]
class ProcessSlicedLayersJob(Job):
def __init__(self, layers):
super().__init__()
self._layers = layers
self._scene = Application.getInstance().getController().getScene()
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, -1)
self._abort_requested = False
self._build_plate_number = None
## Aborts the processing of layers.
#
# This abort is made on a best-effort basis, meaning that the actual
# job thread will check once in a while to see whether an abort is
# requested and then stop processing by itself. There is no guarantee
# that the abort will stop the job any time soon or even at all.
def abort(self):
self._abort_requested = True
def setBuildPlate(self, new_value):
self._build_plate_number = new_value
def getBuildPlate(self):
return self._build_plate_number
def run(self):
Logger.log("d", "Processing new layer for build plate %s..." % self._build_plate_number)
start_time = time()
view = Application.getInstance().getController().getActiveView()
if view.getPluginId() == "SimulationView":
view.resetLayerData()
self._progress_message.show()
Job.yieldThread()
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
Application.getInstance().getController().activeViewChanged.connect(self._onActiveViewChanged)
# The no_setting_override is here because adding the SettingOverrideDecorator will trigger a reslice
new_node = CuraSceneNode(no_setting_override = True)
new_node.addDecorator(BuildPlateDecorator(self._build_plate_number))
# Force garbage collection.
# For some reason, Python has a tendency to keep the layer data
# in memory longer than needed. Forcing the GC to run here makes
# sure any old layer data is really cleaned up before adding new.
gc.collect()
mesh = MeshData()
layer_data = LayerDataBuilder.LayerDataBuilder()
layer_count = len(self._layers)
# Find the minimum layer number
# When using a raft, the raft layers are sent as layers < 0. Instead of allowing layers < 0, we
# instead simply offset all other layers so the lowest layer is always 0. It could happens that
# the first raft layer has value -8 but there are just 4 raft (negative) layers.
min_layer_number = 0
negative_layers = 0
for layer in self._layers:
if layer.id < min_layer_number:
min_layer_number = layer.id
if layer.id < 0:
negative_layers += 1
current_layer = 0
for layer in self._layers:
# Negative layers are offset by the minimum layer number, but the positive layers are just
# offset by the number of negative layers so there is no layer gap between raft and model
abs_layer_number = layer.id + abs(min_layer_number) if layer.id < 0 else layer.id + negative_layers
layer_data.addLayer(abs_layer_number)
this_layer = layer_data.getLayer(abs_layer_number)
layer_data.setLayerHeight(abs_layer_number, layer.height)
layer_data.setLayerThickness(abs_layer_number, layer.thickness)
for p in range(layer.repeatedMessageCount("path_segment")):
polygon = layer.getRepeatedMessage("path_segment", p)
extruder = polygon.extruder
line_types = numpy.fromstring(polygon.line_type, dtype="u1") # Convert bytearray to numpy array
line_types = line_types.reshape((-1,1))
points = numpy.fromstring(polygon.points, dtype="f4") # Convert bytearray to numpy array
if polygon.point_type == 0: # Point2D
points = points.reshape((-1,2)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
else: # Point3D
points = points.reshape((-1,3))
line_widths = numpy.fromstring(polygon.line_width, dtype="f4") # Convert bytearray to numpy array
line_widths = line_widths.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
line_thicknesses = numpy.fromstring(polygon.line_thickness, dtype="f4") # Convert bytearray to numpy array
line_thicknesses = line_thicknesses.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
line_feedrates = numpy.fromstring(polygon.line_feedrate, dtype="f4") # Convert bytearray to numpy array
line_feedrates = line_feedrates.reshape((-1,1)) # We get a linear list of pairs that make up the points, so make numpy interpret them correctly.
# Create a new 3D-array, copy the 2D points over and insert the right height.
# This uses manual array creation + copy rather than numpy.insert since this is
# faster.
new_points = numpy.empty((len(points), 3), numpy.float32)
if polygon.point_type == 0: # Point2D
new_points[:, 0] = points[:, 0]
new_points[:, 1] = layer.height / 1000 # layer height value is in backend representation
new_points[:, 2] = -points[:, 1]
else: # Point3D
new_points[:, 0] = points[:, 0]
new_points[:, 1] = points[:, 2]
new_points[:, 2] = -points[:, 1]
this_poly = LayerPolygon.LayerPolygon(extruder, line_types, new_points, line_widths, line_thicknesses, line_feedrates)
this_poly.buildCache()
this_layer.polygons.append(this_poly)
Job.yieldThread()
Job.yieldThread()
current_layer += 1
progress = (current_layer / layer_count) * 99
# TODO: Rebuild the layer data mesh once the layer has been processed.
# This needs some work in LayerData so we can add the new layers instead of recreating the entire mesh.
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
if self._progress_message:
self._progress_message.setProgress(progress)
# We are done processing all the layers we got from the engine, now create a mesh out of the data
# Find out colors per extruder
global_container_stack = Application.getInstance().getGlobalContainerStack()
manager = ExtruderManager.getInstance()
extruders = list(manager.getMachineExtruders(global_container_stack.getId()))
if extruders:
material_color_map = numpy.zeros((len(extruders), 4), dtype=numpy.float32)
for extruder in extruders:
position = int(extruder.getMetaDataEntry("position", default="0")) # Get the position
try:
default_color = ExtrudersModel.defaultColors[position]
except IndexError:
default_color = "#e0e000"
color_code = extruder.material.getMetaDataEntry("color_code", default=default_color)
color = colorCodeToRGBA(color_code)
material_color_map[position, :] = color
else:
# Single extruder via global stack.
material_color_map = numpy.zeros((1, 4), dtype=numpy.float32)
color_code = global_container_stack.material.getMetaDataEntry("color_code", default="#e0e000")
color = colorCodeToRGBA(color_code)
material_color_map[0, :] = color
# We have to scale the colors for compatibility mode
if OpenGLContext.isLegacyOpenGL() or bool(Preferences.getInstance().getValue("view/force_layer_view_compatibility_mode")):
line_type_brightness = 0.5 # for compatibility mode
else:
line_type_brightness = 1.0
layer_mesh = layer_data.build(material_color_map, line_type_brightness)
if self._abort_requested:
if self._progress_message:
self._progress_message.hide()
return
# Add LayerDataDecorator to scene node to indicate that the node has layer data
decorator = LayerDataDecorator.LayerDataDecorator()
decorator.setLayerData(layer_mesh)
new_node.addDecorator(decorator)
new_node.setMeshData(mesh)
# Set build volume as parent, the build volume can move as a result of raft settings.
# It makes sense to set the build volume as parent: the print is actually printed on it.
new_node_parent = Application.getInstance().getBuildVolume()
new_node.setParent(new_node_parent) # Note: After this we can no longer abort!
settings = Application.getInstance().getGlobalContainerStack()
if not settings.getProperty("machine_center_is_zero", "value"):
new_node.setPosition(Vector(-settings.getProperty("machine_width", "value") / 2, 0.0, settings.getProperty("machine_depth", "value") / 2))
if self._progress_message:
self._progress_message.setProgress(100)
if self._progress_message:
self._progress_message.hide()
# Clear the unparsed layers. This saves us a bunch of memory if the Job does not get destroyed.
self._layers = None
Logger.log("d", "Processing layers took %s seconds", time() - start_time)
def _onActiveViewChanged(self):
if self.isRunning():
if Application.getInstance().getController().getActiveView().getPluginId() == "SimulationView":
if not self._progress_message:
self._progress_message = Message(catalog.i18nc("@info:status", "Processing Layers"), 0, False, 0, catalog.i18nc("@info:title", "Information"))
if self._progress_message.getProgress() != 100:
self._progress_message.show()
else:
if self._progress_message:
self._progress_message.hide()
|
[
"UM.Logger.Logger.log",
"UM.Preferences.Preferences.getInstance",
"cura.Scene.BuildPlateDecorator.BuildPlateDecorator",
"cura.LayerDataDecorator.LayerDataDecorator",
"cura.Scene.CuraSceneNode.CuraSceneNode",
"UM.Job.Job.yieldThread",
"cura.LayerDataBuilder.LayerDataBuilder",
"cura.LayerPolygon.LayerPolygon",
"cura.Settings.ExtruderManager.ExtruderManager.getInstance",
"UM.Mesh.MeshData.MeshData",
"UM.i18n.i18nCatalog",
"numpy.zeros",
"UM.Application.Application.getInstance",
"gc.collect",
"numpy.fromstring",
"time.time",
"UM.View.GL.OpenGLContext.OpenGLContext.isLegacyOpenGL"
] |
[((792, 811), 'UM.i18n.i18nCatalog', 'i18nCatalog', (['"""cura"""'], {}), "('cura')\n", (803, 811), False, 'from UM.i18n import i18nCatalog\n'), ((1015, 1081), 'UM.Logger.Logger.log', 'Logger.log', (['"""w"""', '"""Unable to convert color code, returning default"""'], {}), "('w', 'Unable to convert color code, returning default')\n", (1025, 1081), False, 'from UM.Logger import Logger\n'), ((2227, 2320), 'UM.Logger.Logger.log', 'Logger.log', (['"""d"""', "('Processing new layer for build plate %s...' % self._build_plate_number)"], {}), "('d', 'Processing new layer for build plate %s...' % self.\n _build_plate_number)\n", (2237, 2320), False, 'from UM.Logger import Logger\n'), ((2337, 2343), 'time.time', 'time', ([], {}), '()\n', (2341, 2343), False, 'from time import time\n'), ((2961, 3000), 'cura.Scene.CuraSceneNode.CuraSceneNode', 'CuraSceneNode', ([], {'no_setting_override': '(True)'}), '(no_setting_override=True)\n', (2974, 3000), False, 'from cura.Scene.CuraSceneNode import CuraSceneNode\n'), ((3344, 3356), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3354, 3356), False, 'import gc\n'), ((3373, 3383), 'UM.Mesh.MeshData.MeshData', 'MeshData', ([], {}), '()\n', (3381, 3383), False, 'from UM.Mesh.MeshData import MeshData\n'), ((3405, 3440), 'cura.LayerDataBuilder.LayerDataBuilder', 'LayerDataBuilder.LayerDataBuilder', ([], {}), '()\n', (3438, 3440), False, 'from cura import LayerDataBuilder\n'), ((8144, 8173), 'cura.Settings.ExtruderManager.ExtruderManager.getInstance', 'ExtruderManager.getInstance', ([], {}), '()\n', (8171, 8173), False, 'from cura.Settings.ExtruderManager import ExtruderManager\n'), ((9872, 9911), 'cura.LayerDataDecorator.LayerDataDecorator', 'LayerDataDecorator.LayerDataDecorator', ([], {}), '()\n', (9909, 9911), False, 'from cura import LayerDataDecorator\n'), ((2556, 2573), 'UM.Job.Job.yieldThread', 'Job.yieldThread', ([], {}), '()\n', (2571, 2573), False, 'from UM.Job import Job\n'), ((3033, 3078), 'cura.Scene.BuildPlateDecorator.BuildPlateDecorator', 'BuildPlateDecorator', (['self._build_plate_number'], {}), '(self._build_plate_number)\n', (3052, 3078), False, 'from cura.Scene.BuildPlateDecorator import BuildPlateDecorator\n'), ((7333, 7350), 'UM.Job.Job.yieldThread', 'Job.yieldThread', ([], {}), '()\n', (7348, 7350), False, 'from UM.Job import Job\n'), ((8992, 9032), 'numpy.zeros', 'numpy.zeros', (['(1, 4)'], {'dtype': 'numpy.float32'}), '((1, 4), dtype=numpy.float32)\n', (9003, 9032), False, 'import numpy\n'), ((9306, 9336), 'UM.View.GL.OpenGLContext.OpenGLContext.isLegacyOpenGL', 'OpenGLContext.isLegacyOpenGL', ([], {}), '()\n', (9334, 9336), False, 'from UM.View.GL.OpenGLContext import OpenGLContext\n'), ((4923, 4970), 'numpy.fromstring', 'numpy.fromstring', (['polygon.line_type'], {'dtype': '"""u1"""'}), "(polygon.line_type, dtype='u1')\n", (4939, 4970), False, 'import numpy\n'), ((5089, 5133), 'numpy.fromstring', 'numpy.fromstring', (['polygon.points'], {'dtype': '"""f4"""'}), "(polygon.points, dtype='f4')\n", (5105, 5133), False, 'import numpy\n'), ((5490, 5538), 'numpy.fromstring', 'numpy.fromstring', (['polygon.line_width'], {'dtype': '"""f4"""'}), "(polygon.line_width, dtype='f4')\n", (5506, 5538), False, 'import numpy\n'), ((5767, 5819), 'numpy.fromstring', 'numpy.fromstring', (['polygon.line_thickness'], {'dtype': '"""f4"""'}), "(polygon.line_thickness, dtype='f4')\n", (5783, 5819), False, 'import numpy\n'), ((6056, 6107), 'numpy.fromstring', 'numpy.fromstring', (['polygon.line_feedrate'], {'dtype': '"""f4"""'}), "(polygon.line_feedrate, dtype='f4')\n", (6072, 6107), False, 'import numpy\n'), ((7085, 7195), 'cura.LayerPolygon.LayerPolygon', 'LayerPolygon.LayerPolygon', (['extruder', 'line_types', 'new_points', 'line_widths', 'line_thicknesses', 'line_feedrates'], {}), '(extruder, line_types, new_points, line_widths,\n line_thicknesses, line_feedrates)\n', (7110, 7195), False, 'from cura import LayerPolygon\n'), ((7303, 7320), 'UM.Job.Job.yieldThread', 'Job.yieldThread', ([], {}), '()\n', (7318, 7320), False, 'from UM.Job import Job\n'), ((8074, 8099), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (8097, 8099), False, 'from UM.Application import Application\n'), ((10249, 10274), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (10272, 10274), False, 'from UM.Application import Application\n'), ((10400, 10425), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (10423, 10425), False, 'from UM.Application import Application\n'), ((11036, 11042), 'time.time', 'time', ([], {}), '()\n', (11040, 11042), False, 'from time import time\n'), ((1404, 1429), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (1427, 1429), False, 'from UM.Application import Application\n'), ((2359, 2384), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (2382, 2384), False, 'from UM.Application import Application\n'), ((9345, 9370), 'UM.Preferences.Preferences.getInstance', 'Preferences.getInstance', ([], {}), '()\n', (9368, 9370), False, 'from UM.Preferences import Preferences\n'), ((2737, 2762), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (2760, 2762), False, 'from UM.Application import Application\n'), ((11138, 11163), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (11161, 11163), False, 'from UM.Application import Application\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 16:02:58 2022
@author: erri
"""
import os
import numpy as np
import math
from morph_quantities_func_v2 import morph_quantities
import matplotlib.pyplot as plt
# SINGLE RUN NAME
run = 'q07_1'
DoD_name = 'DoD_s1-s0_filt_nozero_rst.txt'
# Step between surveys
DoD_delta = 1
# Base length in terms of columns. If the windows dimensions are channel width
# multiples, the windows_length_base is 12 columns
windows_length_base = 12
window_mode = 1
'''
windows_mode:
0 = fixed windows (all the channel)
1 = expanding window
2 = floating fixed windows (WxW, Wx2W, Wx3W, ...) without overlapping
3 = floating fixed windows (WxW, Wx2W, Wx3W, ...) with overlapping
'''
plot_mode = 2
'''
plot_mode:
1 = only summary plot
2 = all single DoD plot
'''
# Parameters
# Survey pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
W = 0.6 # Width [m]
d50 = 0.001
NaN = -999
# setup working directory and DEM's name
home_dir = os.getcwd()
# Source DoDs folder
DoDs_folder = os.path.join(home_dir, 'DoDs', 'DoD_'+run)
DoDs_name_array = [] # List the file's name of the DoDs with step of delta_step
for f in sorted(os.listdir(DoDs_folder)):
if f.endswith('_filt_nozero_rst.txt') and f.startswith('DoD_'):
delta = eval(f[5]) - eval(f[8])
if delta == DoD_delta:
DoDs_name_array = np.append(DoDs_name_array, f)
else:
pass
# Initialize overall arrays
dep_vol_w_array_all = []
sco_vol_w_array_all = []
# Loop over the DoDs with step of delta_step
for f in DoDs_name_array:
DoD_name = f
print(f)
DoD_path = os.path.join(DoDs_folder,DoD_name)
DoD_filt_nozero = np.loadtxt(DoD_path, delimiter='\t')
# DoD length
DoD_length = DoD_filt_nozero.shape[1]*px_x/1000 # DoD length [m]
dim_x = DoD_filt_nozero.shape[1]
# Initialize array
# Define total volume matrix, Deposition matrix and Scour matrix
DoD_vol = np.where(np.isnan(DoD_filt_nozero), 0, DoD_filt_nozero) # Total volume matrix
DoD_vol = np.where(DoD_vol==NaN, 0, DoD_vol)
dep_DoD = (DoD_vol>0)*DoD_vol # DoD of only deposition data
sco_DoD = (DoD_vol<0)*DoD_vol # DoD of only scour data
# Active pixel matrix:
act_px_matrix = np.where(DoD_vol!=0, 1, 0) # Active pixel matrix, both scour and deposition
act_px_matrix_dep = np.where(dep_DoD != 0, 1, 0) # Active deposition matrix
act_px_matrix_sco = np.where(sco_DoD != 0, 1, 0) # Active scour matrix
# Initialize array for each window dimension
###################################################################
# MOVING WINDOWS ANALYSIS
###################################################################
array = DoD_filt_nozero
W=windows_length_base
mean_array_tot = []
std_array_tot= []
window_boundary = np.array([0,0])
x_data_tot=[]
tot_vol_array=[] # Tot volume
tot_vol_mean_array=[]
tot_vol_std_array=[]
sum_vol_array=[] # Sum of scour and deposition volume
dep_vol_array=[] # Deposition volume
sco_vol_array=[] # Scour volume
morph_act_area_array=[] # Total active area array
morph_act_area_dep_array=[] # Deposition active area array
morph_act_area_sco_array=[] # Active active area array
act_width_mean_array=[] # Total active width mean array
act_width_mean_dep_array=[] # Deposition active width mean array
act_width_mean_sco_array=[] # Scour active width mean array
if window_mode == 1:
# With overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
# Initialize arrays that stock data for each window position
x_data=[]
tot_vol_w_array = []
sum_vol_w_array = []
dep_vol_w_array = []
sco_vol_w_array =[]
morph_act_area_w_array = []
morph_act_area_dep_w_array = []
morph_act_area_sco_w_array = []
act_width_mean_w_array = []
act_width_mean_dep_w_array = []
act_width_mean_sco_w_array = []
act_thickness_w_array = []
act_thickness_dep_w_array = []
act_thickness_sco_w_array = []
for i in range(0, array.shape[1]+1):
if i+w*W <= array.shape[1]:
window = array[:, i:W*w+i]
boundary = np.array([i,W*w+i])
window_boundary = np.vstack((window_boundary, boundary))
x_data=np.append(x_data, w)
# Calculate morphological quantities
tot_vol, sum_vol, dep_vol, sco_vol, morph_act_area, morph_act_area_dep, morph_act_area_sco, act_width_mean, act_width_mean_dep, act_width_mean_sco, act_thickness, act_thickness_dep, act_thickness_sco = morph_quantities(window)
# Append single data to array
# For each window position the calculated parameters will be appended to _array
tot_vol_w_array=np.append(tot_vol_w_array, tot_vol)
sum_vol_w_array=np.append(sum_vol_w_array, sum_vol)
dep_vol_w_array=np.append(dep_vol_w_array, dep_vol)
sco_vol_w_array=np.append(sco_vol_w_array, sco_vol)
morph_act_area_w_array=np.append(morph_act_area_w_array, morph_act_area)
morph_act_area_dep_w_array=np.append(morph_act_area_dep_w_array, morph_act_area_dep)
morph_act_area_sco_w_array=np.append(morph_act_area_sco_w_array, morph_act_area_sco)
act_width_mean_w_array=np.append(act_width_mean_w_array, act_width_mean)
act_width_mean_dep_w_array=np.append(act_width_mean_dep_w_array, act_width_mean_dep)
act_width_mean_sco_w_array=np.append(act_width_mean_sco_w_array, act_width_mean_sco)
act_thickness_w_array=np.append(act_thickness_w_array, act_thickness)
act_thickness_dep_w_array=np.append(act_thickness_dep_w_array, act_thickness_dep)
act_thickness_sco_w_array=np.append(act_thickness_sco_w_array, act_thickness_sco)
# For each window dimension w*W,
x_data_tot=np.append(x_data_tot, np.nanmean(x_data)) # Append one value of x_data
tot_vol_mean_array=np.append(tot_vol_mean_array, np.nanmean(tot_vol_w_array)) # Append the tot_vol_array mean
tot_vol_std_array=np.append(tot_vol_std_array, np.nanstd(tot_vol_w_array)) # Append the tot_vol_array mean
# sum_vol_array=
# dep_vol_array=
# sco_vol_array=
# morph_act_area_array=
# morph_act_area_dep_array=
# morph_act_area_sco_array=
# act_width_mean_array=
# act_width_mean_dep_array=
# act_width_mean_sco_array=
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
if window_mode == 2:
# Without overlapping
for w in range(1, int(math.floor(array.shape[1]/W))+1): # W*w is the dimension of every possible window
mean_array = []
std_array= []
x_data=[]
for i in range(0, array.shape[1]+1):
if W*w*(i+1) <= array.shape[1]:
window = array[:, W*w*i:W*w*(i+1)]
boundary = np.array([W*w*i,W*w*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, w)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
if window_mode == 3:
# Increasing window dimension keeping still the upstream cross section
mean_array = []
std_array= []
x_data=[]
for i in range(0, array.shape[1]+1):
if W*(i+1) <= array.shape[1]:
window = array[:, 0:W*(i+1)]
boundary = np.array([0,W*(i+1)])
window_boundary = np.vstack((window_boundary, boundary))
mean = np.nanmean(window)
std = np.nanstd(window)
mean_array = np.append(mean_array, mean)
std_array = np.append(std_array, std)
x_data=np.append(x_data, i)
mean_array_tot = np.append(mean_array_tot, np.nanmean(mean_array))
std_array_tot= np.append(std_array_tot, np.nanstd(std_array)) #TODO check this
x_data_tot=np.append(x_data_tot, np.nanmean(x_data))
# Slice window boundaries array to delete [0,0] when initialized
window_boundary = window_boundary[1,:]
# # TODO Go on with this section
# if windows_mode == 1:
# # Define x_data for plots
# x_data = np.linspace(W,dim_x,math.floor(DoD_length/W))*px_x/1e03
# for n in range(1,math.floor(DoD_length/W)+1):
# w_cols = n*round(W/(px_x/1000)) # Window analysis length in number of columns
# w_len = round(n*W,1) # Window analysis lenght im meter [m]
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:,0:w_cols] # Total volume matrix
# dep_DoD_w = dep_DoD[:,0:w_cols] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:,0:w_cols] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:,0:w_cols] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:,0:w_cols] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:,0:w_cols] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, '-', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Longitudinal coordinate [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# # Fixed window without overlapping
# if windows_mode == 2:
# # Calculate the number of suitable windows in the channel length
# c_array = []
# W_cols = int(W/px_x*1e03)
# for i in range(1, round(dim_x/W_cols)):
# c = math.floor(dim_x/(W_cols*i))
# if c*W_cols*i<=dim_x:
# c_array = np.append(c_array, c)
# else:
# pass
# # Define the components of the slicing operation (exclude the first one)
# f_cols_array = [0,0]
# x_data = [] # X data for the plot
# n = 0 # Initialize variable count
# for m in range(0,len(c_array)):
# # m is the window dimension in columns
# n+=1
# for i in range(1,(math.floor(dim_x/(W_cols*(m+1)))+1)):
# f_cols = [round(W_cols*(m+1)*(i-1), 1), round(W_cols*(m+1)*(i),1)]
# f_cols_array = np.vstack((f_cols_array, f_cols))
# x_data = np.append(x_data, n)
# x_data = (x_data)*W
# # Resize f_cols_array
# f_cols_array = f_cols_array[1:]
# for p in range(0, f_cols_array.shape[0]): # Loop over all the available window
# w_len = (f_cols_array[p,1] - f_cols_array[p,0])*px_x/1e03 # Define the window lwgth
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:, f_cols_array[p,0]:f_cols_array[p,1]] # Total volume matrix
# dep_DoD_w = dep_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, 'o', c='brown')
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# # Fixed window with overlapping
# if windows_mode == 3:
# # Calculate the number of suitable windows in the channel length
# c_array = []
# W_cols = int(W/px_x*1e03) # Minimum windows length WxW dimension in columns
# for i in range(1, math.floor(dim_x/W_cols)+1): # per each windows analysis WxWi
# c = dim_x - W_cols*i
# c_array = np.append(c_array, c) # Contains the number of windows for each dimension WxW*i
# else:
# pass
# f_cols_array = [0,0]
# x_data = []
# n = 0
# for m in range(1,int(dim_x/W_cols)+1):
# w_length = m*W_cols # Analysis windows length
# # print(w_length)
# n+=1
# for i in range(0,dim_x): # i is the lower limit of the analysis window
# low_lim = i # Analisys window lower limit
# upp_lim = i + w_length # Analisys window upper limit
# if upp_lim<=dim_x:
# # print(low_lim, upp_lim)
# # print(i+w_length)
# f_cols = [low_lim, upp_lim] # Lower and upper boundary of the analysis window
# f_cols_array = np.vstack((f_cols_array, f_cols))
# x_data = np.append(x_data, n)
# else:
# pass
# x_data = x_data*W
# # Resize f_cols_array
# f_cols_array = f_cols_array[1:]
# for p in range(0, f_cols_array.shape[0]):
# w_len = (f_cols_array[p,1] - f_cols_array[p,0])*px_x/1e03 # Define the window length
# # print()
# # print(f_cols_array[p,:])
# # print(w_len)
# # Define total volume matrix, Deposition matrix and Scour matrix
# DoD_vol_w = DoD_vol[:, f_cols_array[p,0]:f_cols_array[p,1]] # Total volume matrix
# dep_DoD_w = dep_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only deposition data
# sco_DoD_w = sco_DoD[:, f_cols_array[p,0]:f_cols_array[p,1]] # DoD of only scour data
# # Define active pixel matrix
# act_px_matrix_w = act_px_matrix[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active pixel matrix, both scour and deposition
# act_px_matrix_dep_w = act_px_matrix_dep[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active deposition matrix
# act_px_matrix_sco_w = act_px_matrix_sco[:, f_cols_array[p,0]:f_cols_array[p,1]] # Active scour matrix
# # Calculate principal quantities:
# # Volumes
# tot_vol_w = np.sum(DoD_vol_w)*px_x*px_y/(W*w_len*d50*1e09)# Total volume as V/(L*W*d50) [-] considering negative sign for scour
# sum_vol_w = np.sum(np.abs(DoD_vol_w))*px_x*px_y/(W*w_len*d50*1e09) # Sum of scour and deposition volume as V/(L*W*d50) [-]
# dep_vol_w = np.sum(dep_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Deposition volume as V/(L*W*d50) [-]
# sco_vol_w = np.sum(sco_DoD_w)*px_x*px_y/(W*w_len*d50*1e09) # Scour volume as V/(L*W*d50) [-]
# # Areas:
# morph_act_area_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Active area both in terms of scour and deposition as A/(W*L) [-]
# morph_act_area_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Active deposition area as A/(W*L) [-]
# morph_act_area_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Active scour area as A/(W*L) [-]
# # Widths:
# act_width_mean_w = np.count_nonzero(act_px_matrix_w)*px_x*px_y/(W*w_len*1e06) # Total mean active width [%] - Wact/W
# act_width_mean_dep_w = np.count_nonzero(act_px_matrix_dep_w)*px_x*px_y/(W*w_len*1e06) # Deposition mean active width [%] - Wact/W
# act_width_mean_sco_w = np.count_nonzero(act_px_matrix_sco_w)*px_x*px_y/(W*w_len*1e06) # Scour mean active width [%] - Wact/W
# # Thicknesses:
# act_thickness_w = sum_vol_w/morph_act_area_w*(d50*1e03) # Total active thickness (abs(V_sco) + V_dep)/act_area [mm]
# act_thickness_dep_w = dep_vol_w/morph_act_area_dep_w*(d50*1e03) # Deposition active thickness V_dep/act_area [mm]
# act_thickness_sco_w = sco_vol_w/act_width_mean_sco_w*(d50*1e03) # Scour active thickness V_sco/act_area [mm]
# # Append all values in arrays
# tot_vol_w_array = np.append(tot_vol_w_array, tot_vol_w)
# sum_vol_w_array = np.append(sum_vol_w_array, sum_vol_w)
# dep_vol_w_array = np.append(dep_vol_w_array, dep_vol_w)
# sco_vol_w_array = np.append(sco_vol_w_array, sco_vol_w)
# morph_act_area_w_array = np.append(morph_act_area_w_array, morph_act_area_w)
# morph_act_area_dep_w_array = np.append(morph_act_area_dep_w_array, morph_act_area_dep_w)
# morph_act_area_sco_w_array = np.append(morph_act_area_sco_w_array, morph_act_area_sco_w)
# act_width_mean_w_array = np.append(act_width_mean_w_array, act_width_mean_w)
# act_width_mean_dep_w_array = np.append(act_width_mean_dep_w_array, act_width_mean_dep_w)
# act_width_mean_sco_w_array = np.append(act_width_mean_sco_w_array, act_width_mean_sco_w)
# act_thickness_w_array = np.append(act_thickness_w_array, act_thickness_w)
# act_thickness_dep_w_array = np.append(act_thickness_dep_w_array, act_thickness_dep_w)
# act_thickness_sco_w_array = np.append(act_thickness_sco_w_array, act_thickness_sco_w)
# if plot_mode ==2:
# # Plots
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, dep_vol_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, sco_vol_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_width_mean_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active width actW/W [-]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# axs.plot(x_data, act_thickness_w_array, 'o', c='brown', markersize=0.1)
# axs.set_title(run)
# axs.set_xlabel('Window analysis length [m]')
# axs.set_ylabel('Active thickness [mm]')
# # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# plt.show()
# if f == DoDs_name_array[0]:
# dep_vol_w_array_all = np.transpose(np.array(dep_vol_w_array))
# sco_vol_w_array_all = np.transpose(np.array(sco_vol_w_array))
# else:
# pass
# dep_vol_w_array_all = np.vstack((dep_vol_w_array_all,dep_vol_w_array))
# dep_vol_mean = np.mean(dep_vol_w_array_all, axis=0)
# dep_vol_std = np.std(dep_vol_w_array_all, axis=0)
# sco_vol_w_array_all = np.vstack((sco_vol_w_array_all,sco_vol_w_array))
# sco_vol_mean = np.mean(sco_vol_w_array_all, axis=0)
# sco_vol_std = np.std(sco_vol_w_array_all, axis=0)
# if windows_mode==2:
# # Loop to define the windows to clusterize data
# array = [0]
# num=0
# for n in range(0,len(c_array)):
# num += c_array[n]
# array = np.append(array, num) # Clusterize window dimension
# dep_vol_mean = []
# sco_vol_mean = []
# dep_vol_std = []
# sco_vol_std = []
# x_data_full = x_data
# x_data = []
# for n in range(0, len(array)-1):
# x_data = np.append(x_data, x_data_full[int(array[n])])
# for n in f_cols_array:
# dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# # To finish
# if windows_mode == 3:
# # Loop to define the windows to clusterize data
# array = [0]
# num=0
# for n in range(0,len(c_array)):
# num += c_array[n]
# array = np.append(array, num) # Clusterize window dimension
# dep_vol_mean = []
# sco_vol_mean = []
# dep_vol_std = []
# sco_vol_std = []
# x_data_full = x_data
# x_data = []
# for n in range(0, len(array)-1):
# # low_lim = int(f_cols_array[n,0])
# # upp_lim = int(f_cols_array[n,1])
# x_data = np.append(x_data, round(x_data_full[int(array[n])+n],1))
# # dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,low_lim:upp_lim]))
# # sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,low_lim:upp_lim]))
# # dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,low_lim:upp_lim]))
# # sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,low_lim:upp_lim]))
# dep_vol_mean = np.append(dep_vol_mean, np.mean(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_mean = np.append(sco_vol_mean, np.mean(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# dep_vol_std = np.append(dep_vol_std, np.std(dep_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# sco_vol_std = np.append(sco_vol_std, np.std(sco_vol_w_array_all[:,int(array[n]):int(array[n+1])]))
# # print(int(array[n]),int(array[n+1]))
# # TODO To finish
# fig3, axs = plt.subplots(2,1,dpi=80, figsize=(10,6), sharex=True, tight_layout=True)
# fig3.suptitle(run + ' - Volume')
# axs[0].errorbar(x_data, sco_vol_mean, sco_vol_std, linestyle='--', marker='^', color='red')
# # axs[0].set_ylim(bottom=0)
# axs[0].set_title('Scour')
# # axs[0].set_xlabel()
# axs[0].set_ylabel('Scour volume V/(L*W*d50) [-]')
# axs[1].errorbar(x_data, dep_vol_mean, dep_vol_std, linestyle='--', marker='^', color='blue')
# axs[1].set_ylim(bottom=0)
# axs[1].set_title('Deposition')
# axs[1].set_xlabel('Analysis window length [m]')
# axs[1].set_ylabel('Deposition volume V/(L*W*d50) [-]')
# # plt.savefig(os.path.join(plot_dir, run +'dep_scour.png'), dpi=200)
# plt.show()
# # # Plots
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, dep_vol_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Deposition volumes V/(W*L*d50) [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, sco_vol_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Scour volumes V/(W*L*d50) [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, act_width_mean_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Active width actW/W [-]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
# # fig1, axs = plt.subplots(1,1,dpi=200, sharex=True, tight_layout=True)
# # axs.plot(x_data, act_thickness_w_array, 'o', c='brown')
# # axs.set_title(run)
# # axs.set_xlabel('Longitudinal coordinate [m]')
# # axs.set_ylabel('Active thickness [mm]')
# # # plt.savefig(os.path.join(plot_dir, run +'_morphW_interp.png'), dpi=200)
# # plt.show()
|
[
"os.listdir",
"numpy.nanstd",
"math.floor",
"numpy.where",
"morph_quantities_func_v2.morph_quantities",
"os.path.join",
"os.getcwd",
"numpy.append",
"numpy.array",
"numpy.nanmean",
"numpy.isnan",
"numpy.vstack",
"numpy.loadtxt"
] |
[((1005, 1016), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1014, 1016), False, 'import os\n'), ((1052, 1096), 'os.path.join', 'os.path.join', (['home_dir', '"""DoDs"""', "('DoD_' + run)"], {}), "(home_dir, 'DoDs', 'DoD_' + run)\n", (1064, 1096), False, 'import os\n'), ((1192, 1215), 'os.listdir', 'os.listdir', (['DoDs_folder'], {}), '(DoDs_folder)\n', (1202, 1215), False, 'import os\n'), ((1657, 1692), 'os.path.join', 'os.path.join', (['DoDs_folder', 'DoD_name'], {}), '(DoDs_folder, DoD_name)\n', (1669, 1692), False, 'import os\n'), ((1714, 1750), 'numpy.loadtxt', 'np.loadtxt', (['DoD_path'], {'delimiter': '"""\t"""'}), "(DoD_path, delimiter='\\t')\n", (1724, 1750), True, 'import numpy as np\n'), ((2092, 2128), 'numpy.where', 'np.where', (['(DoD_vol == NaN)', '(0)', 'DoD_vol'], {}), '(DoD_vol == NaN, 0, DoD_vol)\n', (2100, 2128), True, 'import numpy as np\n'), ((2302, 2330), 'numpy.where', 'np.where', (['(DoD_vol != 0)', '(1)', '(0)'], {}), '(DoD_vol != 0, 1, 0)\n', (2310, 2330), True, 'import numpy as np\n'), ((2402, 2430), 'numpy.where', 'np.where', (['(dep_DoD != 0)', '(1)', '(0)'], {}), '(dep_DoD != 0, 1, 0)\n', (2410, 2430), True, 'import numpy as np\n'), ((2483, 2511), 'numpy.where', 'np.where', (['(sco_DoD != 0)', '(1)', '(0)'], {}), '(sco_DoD != 0, 1, 0)\n', (2491, 2511), True, 'import numpy as np\n'), ((2905, 2921), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2913, 2921), True, 'import numpy as np\n'), ((2009, 2034), 'numpy.isnan', 'np.isnan', (['DoD_filt_nozero'], {}), '(DoD_filt_nozero)\n', (2017, 2034), True, 'import numpy as np\n'), ((1387, 1416), 'numpy.append', 'np.append', (['DoDs_name_array', 'f'], {}), '(DoDs_name_array, f)\n', (1396, 1416), True, 'import numpy as np\n'), ((9051, 9073), 'numpy.nanmean', 'np.nanmean', (['mean_array'], {}), '(mean_array)\n', (9061, 9073), True, 'import numpy as np\n'), ((9123, 9143), 'numpy.nanstd', 'np.nanstd', (['std_array'], {}), '(std_array)\n', (9132, 9143), True, 'import numpy as np\n'), ((9203, 9221), 'numpy.nanmean', 'np.nanmean', (['x_data'], {}), '(x_data)\n', (9213, 9221), True, 'import numpy as np\n'), ((6404, 6422), 'numpy.nanmean', 'np.nanmean', (['x_data'], {}), '(x_data)\n', (6414, 6422), True, 'import numpy as np\n'), ((6514, 6541), 'numpy.nanmean', 'np.nanmean', (['tot_vol_w_array'], {}), '(tot_vol_w_array)\n', (6524, 6541), True, 'import numpy as np\n'), ((6634, 6660), 'numpy.nanstd', 'np.nanstd', (['tot_vol_w_array'], {}), '(tot_vol_w_array)\n', (6643, 6660), True, 'import numpy as np\n'), ((8015, 8037), 'numpy.nanmean', 'np.nanmean', (['mean_array'], {}), '(mean_array)\n', (8025, 8037), True, 'import numpy as np\n'), ((8091, 8111), 'numpy.nanstd', 'np.nanstd', (['std_array'], {}), '(std_array)\n', (8100, 8111), True, 'import numpy as np\n'), ((8175, 8193), 'numpy.nanmean', 'np.nanmean', (['x_data'], {}), '(x_data)\n', (8185, 8193), True, 'import numpy as np\n'), ((8668, 8694), 'numpy.array', 'np.array', (['[0, W * (i + 1)]'], {}), '([0, W * (i + 1)])\n', (8676, 8694), True, 'import numpy as np\n'), ((8724, 8762), 'numpy.vstack', 'np.vstack', (['(window_boundary, boundary)'], {}), '((window_boundary, boundary))\n', (8733, 8762), True, 'import numpy as np\n'), ((8786, 8804), 'numpy.nanmean', 'np.nanmean', (['window'], {}), '(window)\n', (8796, 8804), True, 'import numpy as np\n'), ((8827, 8844), 'numpy.nanstd', 'np.nanstd', (['window'], {}), '(window)\n', (8836, 8844), True, 'import numpy as np\n'), ((8874, 8901), 'numpy.append', 'np.append', (['mean_array', 'mean'], {}), '(mean_array, mean)\n', (8883, 8901), True, 'import numpy as np\n'), ((8930, 8955), 'numpy.append', 'np.append', (['std_array', 'std'], {}), '(std_array, std)\n', (8939, 8955), True, 'import numpy as np\n'), ((8979, 8999), 'numpy.append', 'np.append', (['x_data', 'i'], {}), '(x_data, i)\n', (8988, 8999), True, 'import numpy as np\n'), ((3620, 3650), 'math.floor', 'math.floor', (['(array.shape[1] / W)'], {}), '(array.shape[1] / W)\n', (3630, 3650), False, 'import math\n'), ((4476, 4500), 'numpy.array', 'np.array', (['[i, W * w + i]'], {}), '([i, W * w + i])\n', (4484, 4500), True, 'import numpy as np\n'), ((4534, 4572), 'numpy.vstack', 'np.vstack', (['(window_boundary, boundary)'], {}), '((window_boundary, boundary))\n', (4543, 4572), True, 'import numpy as np\n'), ((4600, 4620), 'numpy.append', 'np.append', (['x_data', 'w'], {}), '(x_data, w)\n', (4609, 4620), True, 'import numpy as np\n'), ((4921, 4945), 'morph_quantities_func_v2.morph_quantities', 'morph_quantities', (['window'], {}), '(window)\n', (4937, 4945), False, 'from morph_quantities_func_v2 import morph_quantities\n'), ((5153, 5188), 'numpy.append', 'np.append', (['tot_vol_w_array', 'tot_vol'], {}), '(tot_vol_w_array, tot_vol)\n', (5162, 5188), True, 'import numpy as np\n'), ((5225, 5260), 'numpy.append', 'np.append', (['sum_vol_w_array', 'sum_vol'], {}), '(sum_vol_w_array, sum_vol)\n', (5234, 5260), True, 'import numpy as np\n'), ((5297, 5332), 'numpy.append', 'np.append', (['dep_vol_w_array', 'dep_vol'], {}), '(dep_vol_w_array, dep_vol)\n', (5306, 5332), True, 'import numpy as np\n'), ((5369, 5404), 'numpy.append', 'np.append', (['sco_vol_w_array', 'sco_vol'], {}), '(sco_vol_w_array, sco_vol)\n', (5378, 5404), True, 'import numpy as np\n'), ((5448, 5497), 'numpy.append', 'np.append', (['morph_act_area_w_array', 'morph_act_area'], {}), '(morph_act_area_w_array, morph_act_area)\n', (5457, 5497), True, 'import numpy as np\n'), ((5545, 5602), 'numpy.append', 'np.append', (['morph_act_area_dep_w_array', 'morph_act_area_dep'], {}), '(morph_act_area_dep_w_array, morph_act_area_dep)\n', (5554, 5602), True, 'import numpy as np\n'), ((5650, 5707), 'numpy.append', 'np.append', (['morph_act_area_sco_w_array', 'morph_act_area_sco'], {}), '(morph_act_area_sco_w_array, morph_act_area_sco)\n', (5659, 5707), True, 'import numpy as np\n'), ((5751, 5800), 'numpy.append', 'np.append', (['act_width_mean_w_array', 'act_width_mean'], {}), '(act_width_mean_w_array, act_width_mean)\n', (5760, 5800), True, 'import numpy as np\n'), ((5848, 5905), 'numpy.append', 'np.append', (['act_width_mean_dep_w_array', 'act_width_mean_dep'], {}), '(act_width_mean_dep_w_array, act_width_mean_dep)\n', (5857, 5905), True, 'import numpy as np\n'), ((5953, 6010), 'numpy.append', 'np.append', (['act_width_mean_sco_w_array', 'act_width_mean_sco'], {}), '(act_width_mean_sco_w_array, act_width_mean_sco)\n', (5962, 6010), True, 'import numpy as np\n'), ((6053, 6100), 'numpy.append', 'np.append', (['act_thickness_w_array', 'act_thickness'], {}), '(act_thickness_w_array, act_thickness)\n', (6062, 6100), True, 'import numpy as np\n'), ((6147, 6202), 'numpy.append', 'np.append', (['act_thickness_dep_w_array', 'act_thickness_dep'], {}), '(act_thickness_dep_w_array, act_thickness_dep)\n', (6156, 6202), True, 'import numpy as np\n'), ((6249, 6304), 'numpy.append', 'np.append', (['act_thickness_sco_w_array', 'act_thickness_sco'], {}), '(act_thickness_sco_w_array, act_thickness_sco)\n', (6258, 6304), True, 'import numpy as np\n'), ((7257, 7287), 'math.floor', 'math.floor', (['(array.shape[1] / W)'], {}), '(array.shape[1] / W)\n', (7267, 7287), False, 'import math\n'), ((7598, 7636), 'numpy.array', 'np.array', (['[W * w * i, W * w * (i + 1)]'], {}), '([W * w * i, W * w * (i + 1)])\n', (7606, 7636), True, 'import numpy as np\n'), ((7664, 7702), 'numpy.vstack', 'np.vstack', (['(window_boundary, boundary)'], {}), '((window_boundary, boundary))\n', (7673, 7702), True, 'import numpy as np\n'), ((7730, 7748), 'numpy.nanmean', 'np.nanmean', (['window'], {}), '(window)\n', (7740, 7748), True, 'import numpy as np\n'), ((7775, 7792), 'numpy.nanstd', 'np.nanstd', (['window'], {}), '(window)\n', (7784, 7792), True, 'import numpy as np\n'), ((7826, 7853), 'numpy.append', 'np.append', (['mean_array', 'mean'], {}), '(mean_array, mean)\n', (7835, 7853), True, 'import numpy as np\n'), ((7886, 7911), 'numpy.append', 'np.append', (['std_array', 'std'], {}), '(std_array, std)\n', (7895, 7911), True, 'import numpy as np\n'), ((7939, 7959), 'numpy.append', 'np.append', (['x_data', 'w'], {}), '(x_data, w)\n', (7948, 7959), True, 'import numpy as np\n')]
|
#--------------------------------------------------------Import libraries
import pickle
import socket
import struct
import cv2
from stable_baselines import PPO2
import numpy as np
import imageio
#--------------------------------------------------------Establiosh connection
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("RASPBERRY PI address",1235))
#--------------------------------------------------------Read model
model = PPO2.load("model_output/model_final.zip")
#--------------------------------------------------------Establish initial varibles to hold information
data = bytearray()
info = s.recv(4)
length = struct.unpack(">L", info[:4])[0]
#--------------------------------------------------------Initialize
# initializes arrays to hold images for GIF
images_O = []
cv2.namedWindow('frame')
cv2.resizeWindow('frame', 256,256)
try:
while True:
# Capture the bytes being sent
while len(data) < length:
data.extend(s.recv(4096))
# Convert to BGR TO RGB
frame = cv2.cvtColor(cv2.imdecode(np.frombuffer(data[:length],dtype=np.uint8),1),cv2.COLOR_BGR2RGB)
# add raw and transformed images
images_O.append(frame)
# Given state, predict action
action, _ = model.predict(frame, deterministic=True)
# send action
s.sendall(pickle.dumps(action))
# Set up to get new image
data = data[length:]
data.extend(s.recv(4))
length = struct.unpack(">L", data[:4])[0]
data = data[4:]
# Show image on display
# Convert transformed image to BGR so CV2 can show image correctly
cv2.imshow('frame',cv2.cvtColor(frame,cv2.COLOR_RGB2BGR))
if cv2.waitKey(1) & 0xFF == ord('q'):
s.close()
break
finally:
s.close()
# convert untransformed images to gif
imageio.mimsave('Lego_camera_view.gif', [np.array(img) for i, img in enumerate(images_O) if i % 2 == 0], fps=20)
|
[
"cv2.resizeWindow",
"socket.socket",
"pickle.dumps",
"stable_baselines.PPO2.load",
"numpy.array",
"struct.unpack",
"cv2.cvtColor",
"numpy.frombuffer",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((279, 328), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (292, 328), False, 'import socket\n'), ((446, 487), 'stable_baselines.PPO2.load', 'PPO2.load', (['"""model_output/model_final.zip"""'], {}), "('model_output/model_final.zip')\n", (455, 487), False, 'from stable_baselines import PPO2\n'), ((803, 827), 'cv2.namedWindow', 'cv2.namedWindow', (['"""frame"""'], {}), "('frame')\n", (818, 827), False, 'import cv2\n'), ((828, 863), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""frame"""', '(256)', '(256)'], {}), "('frame', 256, 256)\n", (844, 863), False, 'import cv2\n'), ((642, 671), 'struct.unpack', 'struct.unpack', (['""">L"""', 'info[:4]'], {}), "('>L', info[:4])\n", (655, 671), False, 'import struct\n'), ((1352, 1372), 'pickle.dumps', 'pickle.dumps', (['action'], {}), '(action)\n', (1364, 1372), False, 'import pickle\n'), ((1487, 1516), 'struct.unpack', 'struct.unpack', (['""">L"""', 'data[:4]'], {}), "('>L', data[:4])\n", (1500, 1516), False, 'import struct\n'), ((1681, 1719), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (1693, 1719), False, 'import cv2\n'), ((1916, 1929), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1924, 1929), True, 'import numpy as np\n'), ((1072, 1116), 'numpy.frombuffer', 'np.frombuffer', (['data[:length]'], {'dtype': 'np.uint8'}), '(data[:length], dtype=np.uint8)\n', (1085, 1116), True, 'import numpy as np\n'), ((1731, 1745), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1742, 1745), False, 'import cv2\n')]
|
import os
from fnmatch import fnmatch
from typing import Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
from typing_extensions import Literal
from . import lib
from .otf import TemporaryOTF
from .util import PathOrArray, _kwargs_for, imread
def rl_cleanup():
"""Release GPU buffer and cleanup after deconvolution
Call this before program quits to release global GPUBuffer d_interpOTF.
- Resets any bleach corrections
- Removes OTF from GPU buffer
- Destroys cuFFT plan
- Releases GPU buffers
"""
return lib.RL_cleanup()
def rl_init(
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
"""Initialize GPU for deconvolution.
Prepares cuFFT plan for deconvolution with a given data shape and OTF.
Must be used prior to :func:`pycudadecon.rl_decon`
Parameters
----------
rawdata_shape : Tuple[int, int, int]
3-tuple of data shape
otfpath : str
Path to OTF TIF
dzdata : float, optional
Z-step size of data, by default 0.5
dxdata : float, optional
XY pixel size of data, by default 0.1
dzpsf : float, optional
Z-step size of the OTF, by default 0.1
dxpsf : float, optional
XY pixel size of the OTF, by default 0.1
deskew : float, optional
Deskew angle. If not 0.0 then deskewing will be performed before
deconvolution, by default 0
rotate : float, optional
Rotation angle; if not 0.0 then rotation will be performed around Y
axis after deconvolution, by default 0
width : int, optional
If deskewed, the output image's width, by default 0 (do not crop)
Examples
--------
>>> rl_init(im.shape, otfpath)
>>> decon_result = rl_decon(im)
>>> rl_cleanup()
"""
nz, ny, nx = rawdata_shape
lib.RL_interface_init(
nx,
ny,
nz,
dxdata,
dzdata,
dxpsf,
dzpsf,
deskew,
rotate,
width,
otfpath.encode(),
)
def rl_decon(
im: np.ndarray,
background: Union[int, Literal["auto"]] = 80,
n_iters: int = 10,
shift: int = 0,
save_deskewed: bool = False,
output_shape: Optional[Tuple[int, int, int]] = None,
napodize: int = 15,
nz_blend: int = 0,
pad_val: float = 0.0,
dup_rev_z: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Perform Richardson Lucy Deconvolution.
Performs actual deconvolution. GPU must first be initialized with
:func:`pycudadecon.rl_init`
Parameters
----------
im : np.ndarray
3D image volume to deconvolve
background : int or 'auto'
User-supplied background to subtract. If 'auto', the median value of the
last Z plane will be used as background. by default 80
n_iters : int, optional
Number of iterations, by default 10
shift : int, optional
If deskewed, the output image's extra shift in X (positive->left),
by default 0
save_deskewed : bool, optional
Save deskewed raw data as well as deconvolution result, by default False
output_shape : tuple of int, optional
Specify the output shape after deskewing. Usually this is unnecessary and
can be autodetected. Mostly intended for use within a
:class:`pycudadecon.RLContext` context, by default None
napodize : int, optional
Number of pixels to soften edge with, by default 15
nz_blend : int, optional
Number of top and bottom sections to blend in to reduce axial ringing,
by default 0
pad_val : float, optional
Value with which to pad image when deskewing, by default 0.0
dup_rev_z : bool, optional
Duplicate reversed stack prior to decon to reduce axial ringing,
by default False
Returns
-------
np.ndarray or 2-tuple of np.ndarray
The deconvolved result. If `save_deskewed` is `True`, returns
`(decon_result, deskew_result)`
Raises
------
ValueError
If im.ndim is not 3, or `output_shape` is provided but not length 3
"""
if im.ndim != 3:
raise ValueError("Only 3D arrays supported")
nz, ny, nx = im.shape
if output_shape is None:
output_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
elif len(output_shape) != 3:
raise ValueError("Decon output shape must have length==3")
decon_result = np.empty(tuple(output_shape), dtype=np.float32)
if save_deskewed:
deskew_result = np.empty_like(decon_result)
else:
deskew_result = np.empty(1, dtype=np.float32)
# must be 16 bit going in
if not np.issubdtype(im.dtype, np.uint16):
im = im.astype(np.uint16)
if isinstance(background, str) and background == "auto":
background = np.median(im[-1])
rescale = False # not sure if this works yet...
if not im.flags["C_CONTIGUOUS"]:
im = np.ascontiguousarray(im)
lib.RL_interface(
im,
nx,
ny,
nz,
decon_result,
deskew_result,
background,
rescale,
save_deskewed,
n_iters,
shift,
napodize,
nz_blend,
pad_val,
dup_rev_z,
)
if save_deskewed:
return decon_result, deskew_result
else:
return decon_result
def quickDecon(image: np.ndarray, otfpath: str, **kwargs):
"""Perform deconvolution of `image` with otf at `otfpath`.
Not currently used...
"""
rl_init(image.shape, otfpath, **_kwargs_for(rl_init, kwargs))
result = rl_decon(image, **_kwargs_for(rl_decon, kwargs))
lib.RL_cleanup()
return result
class RLContext:
"""Context manager to setup the GPU for RL decon
Takes care of handing the OTF to the GPU, preparing a cuFFT plane,
and cleaning up after decon. Internally, this calls :func:`rl_init`,
stores the shape of the expected output volume after any deskew/decon,
then calls :func:`rl_cleanup` when exiting the context.
For parameters, see :func:`rl_init`.
Examples
--------
>>> with RLContext(data.shape, otfpath, dz) as ctx:
... result = rl_decon(data, ctx.out_shape)
"""
def __init__(
self,
rawdata_shape: Tuple[int, int, int],
otfpath: str,
dzdata: float = 0.5,
dxdata: float = 0.1,
dzpsf: float = 0.1,
dxpsf: float = 0.1,
deskew: float = 0,
rotate: float = 0,
width: int = 0,
):
self.kwargs = locals()
self.kwargs.pop("self")
self.out_shape: Optional[Tuple[int, int, int]] = None
def __enter__(self):
"""Setup the context and return the ZYX shape of the output image"""
rl_init(**self.kwargs)
self.out_shape = (lib.get_output_nz(), lib.get_output_ny(), lib.get_output_nx())
return self
def __exit__(self, typ, val, traceback):
# exit receives a tuple with any exceptions raised during processing
# if __exit__ returns True, exceptions will be supressed
lib.RL_cleanup()
# alias
rl_context = RLContext
def _yield_arrays(
images: Union[PathOrArray, Sequence[PathOrArray]], fpattern="*.tif"
) -> Iterator[np.ndarray]:
"""Yield arrays from an array, path, or sequence of either.
Parameters
----------
images : Union[PathOrArray, Sequence[PathOrArray]]
an array, path, or sequence of either
fpattern : str, optional
used to filter files in a directory, by default "*.tif"
Yields
-------
Iterator[np.ndarray]
Arrays (read from paths if necessary)
Raises
------
OSError
If a directory is provided and no files match fpattern.
"""
if isinstance(images, np.ndarray):
yield images
elif isinstance(images, str):
if os.path.isfile(images):
yield imread(images)
elif os.path.isdir(images):
imfiles = [f for f in os.listdir(images) if fnmatch(f, fpattern)]
if not len(imfiles):
raise OSError(
'No files matching pattern "{}" found in directory: {}'.format(
fpattern, images
)
)
for fpath in imfiles:
yield imread(os.path.join(images, fpath))
else:
for item in images:
yield from _yield_arrays(item)
def decon(
images: Union[PathOrArray, Sequence[PathOrArray]],
psf: PathOrArray,
fpattern: str = "*.tif",
**kwargs
) -> Union[np.ndarray, List[np.ndarray]]:
"""Deconvolve an image or images with a PSF or OTF file.
If `images` is a directory, use the `fpattern` argument to select files
by filename pattern.
Parameters
----------
images : str, np.ndarray, or sequence of either
The array, filepath, directory, or list/tuple thereof to deconvolve
psf : str or np.ndarray
a filepath of a PSF or OTF file, or a 3D numpy PSF array. Function will
auto-detect whether the file is a 3D PSF or a filepath representing a 2D
complex OTF.
fpattern : str, optional
Filepattern to use when a directory is provided in the `images` argument,
by default `*.tif`
** kwargs
All other kwargs must be valid for either :func:`rl_init` or :func:`rl_decon`.
Returns
-------
np.ndarray or list of array
The deconvolved image(s)
Raises
------
ValueError
If save_deskewed is True and deskew is unset or 0
IOError
If a directory is provided as input and ``fpattern`` yields no files
NotImplementedError
If ``psf`` is provided as a complex, 2D numpy array (OTFs can only be
provided as filenames created with :func:`pycudadecon.make_otf`)
Examples
--------
deconvolve a 3D TIF volume with a 3D PSF volume (e.g. a single bead stack)
>>> result = decon('/path/to/image.tif', '/path/to/psf.tif')
deconvolve all TIF files in a specific directory that match a certain
`filename pattern <https://docs.python.org/3.6/library/fnmatch.html>`_,
(in this example, all TIFs with the string '560nm' in their name)
>>> result = decon(
... '/directory/with/images', '/path/to/psf.tif', fpattern='*560nm*.tif'
... )
deconvolve a list of images, provided either as np.ndarrays, filepaths,
or directories
>>> imarray = tifffile.imread('some_other_image.tif')
>>> inputs = ['/directory/with/images', '/path/to/image.tif', imarray]
>>> result = decon(inputs, '/path/to/psf.tif', fpattern='*560nm*.tif')
"""
if kwargs.get("save_deskewed"):
if kwargs.get("deskew", 1) == 0:
raise ValueError("Cannot use save_deskewed=True with deskew=0")
if not kwargs.get("deskew"):
raise ValueError("Must set deskew != 0 when using save_deskewed=True")
init_kwargs = _kwargs_for(rl_init, kwargs)
decon_kwargs = _kwargs_for(rl_decon, kwargs)
out = []
with TemporaryOTF(psf, **kwargs) as otf:
arraygen = _yield_arrays(images, fpattern)
# first, assume that all of the images are the same shape...
# in which case we can prevent a lot of GPU IO
# grab and store the shape of the first item in the generator
next_im = next(arraygen)
shp = next_im.shape
with RLContext(shp, otf.path, **init_kwargs) as ctx:
while True:
out.append(
rl_decon(next_im, output_shape=ctx.out_shape, **decon_kwargs)
)
try:
next_im = next(arraygen)
# here we check to make sure that the images are still the same
# shape... if not, we'll continue below
if next_im.shape != shp:
break
except StopIteration:
next_im = None
break
# if we had a shape mismatch, there will still be images left to process
# process them the slow way here...
if next_im is not None:
for imarray in [next_im, *arraygen]:
with RLContext(imarray.shape, otf.path, **init_kwargs) as ctx:
out.append(
rl_decon(imarray, output_shape=ctx.out_shape, **decon_kwargs)
)
if isinstance(images, (list, tuple)) and len(images) > 1:
return out
else:
return out[0]
|
[
"numpy.median",
"os.listdir",
"os.path.join",
"numpy.ascontiguousarray",
"os.path.isfile",
"numpy.issubdtype",
"os.path.isdir",
"numpy.empty",
"numpy.empty_like",
"fnmatch.fnmatch"
] |
[((4717, 4744), 'numpy.empty_like', 'np.empty_like', (['decon_result'], {}), '(decon_result)\n', (4730, 4744), True, 'import numpy as np\n'), ((4779, 4808), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (4787, 4808), True, 'import numpy as np\n'), ((4851, 4885), 'numpy.issubdtype', 'np.issubdtype', (['im.dtype', 'np.uint16'], {}), '(im.dtype, np.uint16)\n', (4864, 4885), True, 'import numpy as np\n'), ((5004, 5021), 'numpy.median', 'np.median', (['im[-1]'], {}), '(im[-1])\n', (5013, 5021), True, 'import numpy as np\n'), ((5127, 5151), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im'], {}), '(im)\n', (5147, 5151), True, 'import numpy as np\n'), ((8034, 8056), 'os.path.isfile', 'os.path.isfile', (['images'], {}), '(images)\n', (8048, 8056), False, 'import os\n'), ((8105, 8126), 'os.path.isdir', 'os.path.isdir', (['images'], {}), '(images)\n', (8118, 8126), False, 'import os\n'), ((8162, 8180), 'os.listdir', 'os.listdir', (['images'], {}), '(images)\n', (8172, 8180), False, 'import os\n'), ((8184, 8204), 'fnmatch.fnmatch', 'fnmatch', (['f', 'fpattern'], {}), '(f, fpattern)\n', (8191, 8204), False, 'from fnmatch import fnmatch\n'), ((8498, 8525), 'os.path.join', 'os.path.join', (['images', 'fpath'], {}), '(images, fpath)\n', (8510, 8525), False, 'import os\n')]
|
import numpy as np
from astroquery.hitran import Hitran
from astropy import units as un
from astropy.constants import c, k_B, h, u
def calc_solid_angle(radius,distance):
'''
Convenience function to calculate solid angle from radius and distance, assuming a disk shape.
Parameters
----------
radius : float
radius value in AU
distance : float
distance value in parsec
Returns
----------
solid angle : float
solid angle in steradians
'''
return np.pi*radius**2./(distance*206265.)**2.
def calc_radius(solid_angle,distance):
'''
Convenience function to calculate disk radius from solid angle and distance, assuming a disk shape.
Parameters
----------
solid_angle : float
solid angle value in radians
distance : float
distance value in parsec
Returns
----------
radius : float
disk radius in AU
'''
return (distance*206265)*np.sqrt(solid_angle/np.pi)
def get_molmass(molecule_name,isotopologue_number=1):
'''
For a given input molecular formula, return the corresponding molecular mass, in amu
Parameters
----------
molecular_formula : str
The string describing the molecule.
isotopologue_number : int, optional
The isotopologue number, from most to least common.
Returns
-------
mu : float
Molecular mass in amu
'''
mol_isot_code=molecule_name+'_'+str(isotopologue_number)
#https://hitran.org/docs/iso-meta/
mass = { 'H2O_1':18.010565, 'H2O_2':20.014811, 'H2O_3':19.01478, 'H2O_4':19.01674,
'H2O_5':21.020985, 'H2O_6':20.020956, 'H2O_7':20.022915,
'CO2_1':43.98983,'CO2_2':44.993185,'CO2_3':45.994076,'CO2_4':44.994045,
'CO2_5':46.997431,'CO2_6':45.9974,'CO2_7':47.998322,'CO2_8':46.998291,
'CO2_9':45.998262,'CO2_10':49.001675,'CO2_11':48.001646,'CO2_12':47.0016182378,
'O3_1':47.984745,'O3_2':49.988991,'O3_3':49.988991,'O3_4':48.98896,'O3_5':48.98896,
'N2O_1':44.001062,'N2O_2':44.998096,'N2O_3':44.998096,'N2O_4':46.005308,'N2O_5':45.005278,
'CO_1':27.994915,'CO_2':28.99827,'CO_3':29.999161,'CO_4':28.99913,'CO_5':31.002516,'CO_6':30.002485,
'CH4_1':16.0313,'CH4_2':17.034655,'CH4_3':17.037475,'CH4_4':18.04083,
'O2_1':31.98983,'O2_2':33.994076,'O2_3':32.994045,
'NO_1':29.997989,'NO_2':30.995023,'NO_3':32.002234,
'SO2_1':63.961901,'SO2_2':65.957695,
'NO2_1':45.992904,'NO2_2':46.989938,
'NH3_1':17.026549,'NH3_2':18.023583,
'HNO3_1':62.995644,'HNO3_2':63.99268,
'OH_1':17.00274,'OH_2':19.006986,'OH_3':18.008915,
'HF_1':20.006229,'HF_2':21.012404,
'HCl_1':35.976678,'HCl_2':37.973729,'HCl_3':36.982853,'HCl_4':38.979904,
'HBr_1':79.92616,'HBr_2':81.924115,'HBr_3':80.932336,'HBr_4':82.930289,
'HI_1':127.912297,'HI_2':128.918472,
'ClO_1':50.963768,'ClO_2':52.960819,
'OCS_1':59.966986,'OCS_2':61.96278,'OCS_3':60.970341,'OCS_4':60.966371,'OCS_5':61.971231, 'OCS_6':62.966136,
'H2CO_1':30.010565,'H2CO_2':31.01392,'H2CO_3':32.014811,
'HOCl_1':51.971593,'HOCl_2':53.968644,
'N2_1':28.006148,'N2_2':29.003182,
'HCN_1':27.010899,'HCN_2':28.014254,'HCN_3':28.007933,
'CH3Cl_1':49.992328,'CH3CL_2':51.989379,
'H2O2_1':34.00548,
'C2H2_1':26.01565,'C2H2_2':27.019005,'C2H2_3':27.021825,
'C2H6_1':30.04695,'C2H6_2':31.050305,
'PH3_1':33.997238,
'COF2_1':65.991722,'COF2_2':66.995083,
'SF6_1':145.962492,
'H2S_1':33.987721,'H2S_2':35.983515,'H2S_3':34.987105,
'HCOOH_1':46.00548,
'HO2_1':32.997655,
'O_1':15.994915,
'ClONO2_1':96.956672,'ClONO2_2':98.953723,
'NO+_1':29.997989,
'HOBr_1':95.921076,'HOBr_2':97.919027,
'C2H4_1':28.0313,'C2H4_2':29.034655,
'CH3OH_1':32.026215,
'CH3Br_1':93.941811,'CH3Br_2':95.939764,
'CH3CN_1':41.026549,
'CF4_1':87.993616,
'C4H2_1':50.01565,
'HC3N_1':51.010899,
'H2_1':2.01565,'H2_2':3.021825,
'CS_1':43.971036,'CS_2':45.966787,'CS_3':44.974368,'CS_4':44.970399,
'SO3_1':79.95682,
'C2N2_1':52.006148,
'COCl2_1':97.9326199796,'COCl2_2':99.9296698896,
'CS2_1':75.94414,'CS2_2':77.93994,'CS2_3':76.943256,'CS2_4':76.947495}
return mass[mol_isot_code]
|
[
"numpy.sqrt"
] |
[((945, 973), 'numpy.sqrt', 'np.sqrt', (['(solid_angle / np.pi)'], {}), '(solid_angle / np.pi)\n', (952, 973), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from termcolor import colored
import logging
import torch.nn as nn
import torch.utils.data
log = logging.getLogger(__name__)
import torch
import numpy as np
import math
class Dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.dataset = [
(torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x))
]
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
class Dynamics(nn.Module):
def __init__(self,env):
super(Dynamics, self).__init__()
self.env=env
self.dt = env.dt
self.model_cfg = {}
self.model_cfg['device'] = 'cpu'
self.model_cfg['hidden_size'] = [100, 30]
self.model_cfg['batch_size'] = 128
self.model_cfg['epochs'] = 500
self.model_cfg['display_epoch'] = 50
self.model_cfg['learning_rate'] = 0.001
self.model_cfg['ensemble_size'] = 3
self.model_cfg['state_dim'] = env.state_dim
self.model_cfg['action_dim'] = env.action_dim
self.model_cfg['output_dim'] = env.pos_dim
self.ensemble = EnsembleProbabilisticModel(self.model_cfg)
self.data_X = []
self.data_Y = []
self.norm_in = torch.Tensor(np.expand_dims(np.array([1.0,1.0,8.0,8.0,1.0,1.0]),axis=0))
def train(self,states,actions):
inputs = (torch.cat((states[:-1],actions),dim=1)/self.norm_in).detach().numpy()
outputs = (states[1:,self.env.pos_dim:] - states[:-1,self.env.pos_dim:]).detach().numpy()
self.data_X+=list(inputs)
self.data_Y+=list(outputs)
training_dataset = {}
training_dataset['X'] = np.array(self.data_X)
training_dataset['Y'] = np.array(self.data_Y)
#self.ensemble = EnsembleProbabilisticModel(self.model_cfg)
self.ensemble.train_model(training_dataset, training_dataset, 0.0)
def step_model(self,state,action):
input_x = torch.cat((state,action),dim=0)/self.norm_in
pred_acc = self.ensemble.forward(input_x)[0].squeeze()
#numerically integrate predicted acceleration to velocity and position
pred_vel = state[self.env.pos_dim:]+pred_acc
pred_pos = state[:self.env.pos_dim] + pred_vel*self.dt
pred_pos = torch.clamp(pred_pos, min=-3.0, max=3.0)
pred_vel = torch.clamp(pred_vel, min=-4.0, max=4.0)
next_state = torch.cat((pred_pos.squeeze(),pred_vel.squeeze()),dim=0)
return next_state.squeeze()
# I did not make this inherit from nn.Module, because our GP implementation is not torch based
class AbstractModel(object):
# def forward(self, x):
# raise NotImplementedError("Subclass must implement")
def train_model(self, training_dataset, testing_dataset, training_params):
raise NotImplementedError("Subclass must implement")
# function that (if necessary) converts between numpy input x and torch, and returns a prediction in numpy
def predict_np(self, x):
raise NotImplementedError("Subclass must implement")
def get_input_size(self):
raise NotImplementedError("Subclass must implement")
def get_output_size(self):
raise NotImplementedError("Subclass must implement")
def get_hyperparameters(self):
return None
class Dataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.dataset = [
(torch.FloatTensor(x[i]), torch.FloatTensor(y[i])) for i in range(len(x))
]
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx]
# creates K datasets out of X and Y
# if N is the total number of data points, then this function splits it in to K subsets. and each dataset contains K-1
# subsets.
# so let's say K=5. We create 5 subsets.
# Each datasets contains 4 out of the 5 datasets, by leaving out one of the K subsets.
def split_to_subsets(X, Y, K):
if K == 1:
# for 1 split, do not resshuffle dataset
return [Dataset(X, Y)]
n_data = len(X)
chunk_sz = int(math.ceil(n_data / K))
all_idx = np.random.permutation(n_data)
datasets = []
# each dataset contains
for i in range(K):
start_idx = i * (chunk_sz)
end_idx = min(start_idx + chunk_sz, n_data)
dataset_idx = np.delete(all_idx, range(start_idx, end_idx), axis=0)
X_subset = [X[idx] for idx in dataset_idx]
Y_subset = [Y[idx] for idx in dataset_idx]
datasets.append(Dataset(X_subset, Y_subset))
return datasets
class NLLLoss(torch.nn.modules.loss._Loss):
"""
Specialized NLL loss used to predict both mean (the actual function) and the variance of the input data.
"""
def __init__(self, size_average=None, reduce=None, reduction="mean"):
super(NLLLoss, self).__init__(size_average, reduce, reduction)
def forward(self, net_output, target):
assert net_output.dim() == 3
assert net_output.size(0) == 2
mean = net_output[0]
var = net_output[1]
reduction = "mean"
ret = 0.5 * torch.log(var) + 0.5 * ((mean - target) ** 2) / var
# ret = 0.5 * ((mean - target) ** 2)
if reduction != "none":
ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret)
return ret
class EnsembleProbabilisticModel(AbstractModel):
def __init__(self, model_cfg):
super(EnsembleProbabilisticModel, self).__init__()
self.input_dimension = model_cfg['state_dim'] + model_cfg['action_dim']
# predicting velocity only (second half of state space)
assert model_cfg['state_dim'] % 2 == 0
self.output_dimension = model_cfg['state_dim'] // 2
if model_cfg['device'] == "gpu":
self.device = model_cfg['gpu_name']
else:
self.device = "cpu"
self.ensemble_size = model_cfg['ensemble_size']
self.model_cfg = model_cfg
self.reset()
def reset(self):
self.models = [PModel(self.model_cfg) for _ in range(self.ensemble_size)]
def forward(self, x):
x = torch.Tensor(x)
means = []
variances = []
for eid in range(self.ensemble_size):
mean_and_var = self.models[eid](x)
means.append(mean_and_var[0])
variances.append(mean_and_var[1])
mean = sum(means) / len(means)
dum = torch.zeros_like(variances[0])
for i in range(len(means)):
dum_var2 = variances[i]
dum_mean2 = means[i] * means[i]
dum += dum_var2 + dum_mean2
var = (dum / len(means)) - (mean * mean)
# Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum
# level of noise
# the clipping here is probably not necessary anymore because we're now clipping at the individual model level
var = var.clamp_min(1e-3)
return torch.stack((mean, var))
def predict_np(self, x_np):
x = torch.Tensor(x_np)
pred = self.forward(x).detach().cpu().numpy()
return pred[0].squeeze(), pred[1].squeeze()
def train_model(self, training_dataset, testing_dataset, training_params):
X = training_dataset["X"]
Y = training_dataset["Y"]
datasets = split_to_subsets(X, Y, self.ensemble_size)
for m in range(self.ensemble_size):
print(colored("training model={}".format(m), "green"))
self.models[m].train_model(datasets[m])
def get_gradient(self, x_np):
x = torch.Tensor(x_np).requires_grad_()
output_mean, _ = self.forward(x)
gradients = []
# get gradients of ENN with respect to x and u
for output_dim in range(self.output_dimension):
grads = torch.autograd.grad(
output_mean[0, output_dim], x, create_graph=True
)[0].data
gradients.append(grads.detach().cpu().numpy()[0, :])
return np.array(gradients).reshape(
[self.output_dimension, self.input_dimension]
)
def get_input_size(self):
return self.input_dimension
def get_output_size(self):
return self.output_dimension
def get_hyper_params(self):
return None
class PModel(nn.Module):
"""
Probabilistic network
Output a 3d tensor:
d0 : always 2, first element is mean and second element is variance
d1 : batch size
d2 : output size (number of dimensions in the output of the modeled function)
"""
def __init__(self, config):
super(PModel, self).__init__()
if config["device"] == "gpu":
self.device = config["gpu_name"]
else:
self.device = "cpu"
self.input_sz = config['state_dim'] + config['action_dim']
self.output_sz = config['output_dim']
self.learning_rate = config["learning_rate"]
self.display_epoch = config["display_epoch"]
self.epochs = config["epochs"]
w = config["hidden_size"]
self.layers = nn.Sequential(
nn.Linear(self.input_sz, w[0]),
nn.Tanh(),
nn.Linear(w[0], w[1]),
nn.Tanh(),
)
self.mean = nn.Linear(w[1], self.output_sz)
self.var = nn.Sequential(nn.Linear(w[1], self.output_sz), nn.Softplus())
self.to(self.device)
def forward(self, x):
x = x.to(device=self.device)
assert x.dim() == 2, "Expected 2 dimensional input, got {}".format(x.dim())
assert x.size(1) == self.input_sz
y = self.layers(x)
mean_p = self.mean(y)
var_p = self.var(y)
# Clipping the variance to a minimum of 1e-3, we can interpret this as saying weexpect a minimum
# level of noise
var_p = var_p.clamp_min(1e-3)
return torch.stack((mean_p, var_p))
def predict_np(self, x_np):
x = torch.Tensor(x_np)
pred = self.forward(x).detach().cpu().numpy()
return pred[0].squeeze(), pred[1].squeeze()
def train_model(self, training_data):
train_loader = torch.utils.data.DataLoader(
training_data, batch_size=64, num_workers=0
)
optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)
loss_fn = NLLLoss()
for epoch in range(self.epochs):
losses = []
for batch, (data, target) in enumerate(
train_loader, 1
): # This is the training loader
x = data.type(torch.FloatTensor).to(device=self.device)
y = target.type(torch.FloatTensor).to(device=self.device)
if x.dim() == 1:
x = x.unsqueeze(0).t()
if y.dim() == 1:
y = y.unsqueeze(0).t()
py = self.forward(x)
loss = loss_fn(py, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
if epoch % self.display_epoch == 0:
print(
colored(
"epoch={}, loss={}".format(epoch, np.mean(losses)), "yellow"
)
)
|
[
"logging.getLogger",
"torch.nn.Tanh",
"numpy.array",
"torch.sum",
"numpy.mean",
"torch.mean",
"torch.zeros_like",
"numpy.random.permutation",
"torch.Tensor",
"torch.autograd.grad",
"torch.cat",
"torch.clamp",
"torch.nn.Softplus",
"math.ceil",
"torch.log",
"torch.stack",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.FloatTensor"
] |
[((169, 196), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'import logging\n'), ((4210, 4239), 'numpy.random.permutation', 'np.random.permutation', (['n_data'], {}), '(n_data)\n', (4231, 4239), True, 'import numpy as np\n'), ((1774, 1795), 'numpy.array', 'np.array', (['self.data_X'], {}), '(self.data_X)\n', (1782, 1795), True, 'import numpy as np\n'), ((1828, 1849), 'numpy.array', 'np.array', (['self.data_Y'], {}), '(self.data_Y)\n', (1836, 1849), True, 'import numpy as np\n'), ((2376, 2416), 'torch.clamp', 'torch.clamp', (['pred_pos'], {'min': '(-3.0)', 'max': '(3.0)'}), '(pred_pos, min=-3.0, max=3.0)\n', (2387, 2416), False, 'import torch\n'), ((2436, 2476), 'torch.clamp', 'torch.clamp', (['pred_vel'], {'min': '(-4.0)', 'max': '(4.0)'}), '(pred_vel, min=-4.0, max=4.0)\n', (2447, 2476), False, 'import torch\n'), ((4173, 4194), 'math.ceil', 'math.ceil', (['(n_data / K)'], {}), '(n_data / K)\n', (4182, 4194), False, 'import math\n'), ((6203, 6218), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (6215, 6218), False, 'import torch\n'), ((6496, 6526), 'torch.zeros_like', 'torch.zeros_like', (['variances[0]'], {}), '(variances[0])\n', (6512, 6526), False, 'import torch\n'), ((7031, 7055), 'torch.stack', 'torch.stack', (['(mean, var)'], {}), '((mean, var))\n', (7042, 7055), False, 'import torch\n'), ((7101, 7119), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (7113, 7119), False, 'import torch\n'), ((9311, 9342), 'torch.nn.Linear', 'nn.Linear', (['w[1]', 'self.output_sz'], {}), '(w[1], self.output_sz)\n', (9320, 9342), True, 'import torch.nn as nn\n'), ((9911, 9939), 'torch.stack', 'torch.stack', (['(mean_p, var_p)'], {}), '((mean_p, var_p))\n', (9922, 9939), False, 'import torch\n'), ((9985, 10003), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (9997, 10003), False, 'import torch\n'), ((10176, 10248), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['training_data'], {'batch_size': '(64)', 'num_workers': '(0)'}), '(training_data, batch_size=64, num_workers=0)\n', (10203, 10248), False, 'import torch\n'), ((2052, 2085), 'torch.cat', 'torch.cat', (['(state, action)'], {'dim': '(0)'}), '((state, action), dim=0)\n', (2061, 2085), False, 'import torch\n'), ((9167, 9197), 'torch.nn.Linear', 'nn.Linear', (['self.input_sz', 'w[0]'], {}), '(self.input_sz, w[0])\n', (9176, 9197), True, 'import torch.nn as nn\n'), ((9211, 9220), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9218, 9220), True, 'import torch.nn as nn\n'), ((9234, 9255), 'torch.nn.Linear', 'nn.Linear', (['w[0]', 'w[1]'], {}), '(w[0], w[1])\n', (9243, 9255), True, 'import torch.nn as nn\n'), ((9269, 9278), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9276, 9278), True, 'import torch.nn as nn\n'), ((9376, 9407), 'torch.nn.Linear', 'nn.Linear', (['w[1]', 'self.output_sz'], {}), '(w[1], self.output_sz)\n', (9385, 9407), True, 'import torch.nn as nn\n'), ((9409, 9422), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (9420, 9422), True, 'import torch.nn as nn\n'), ((353, 376), 'torch.FloatTensor', 'torch.FloatTensor', (['x[i]'], {}), '(x[i])\n', (370, 376), False, 'import torch\n'), ((378, 401), 'torch.FloatTensor', 'torch.FloatTensor', (['y[i]'], {}), '(y[i])\n', (395, 401), False, 'import torch\n'), ((1370, 1410), 'numpy.array', 'np.array', (['[1.0, 1.0, 8.0, 8.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 8.0, 8.0, 1.0, 1.0])\n', (1378, 1410), True, 'import numpy as np\n'), ((3505, 3528), 'torch.FloatTensor', 'torch.FloatTensor', (['x[i]'], {}), '(x[i])\n', (3522, 3528), False, 'import torch\n'), ((3530, 3553), 'torch.FloatTensor', 'torch.FloatTensor', (['y[i]'], {}), '(y[i])\n', (3547, 3553), False, 'import torch\n'), ((5190, 5204), 'torch.log', 'torch.log', (['var'], {}), '(var)\n', (5199, 5204), False, 'import torch\n'), ((5338, 5353), 'torch.mean', 'torch.mean', (['ret'], {}), '(ret)\n', (5348, 5353), False, 'import torch\n'), ((5382, 5396), 'torch.sum', 'torch.sum', (['ret'], {}), '(ret)\n', (5391, 5396), False, 'import torch\n'), ((7649, 7667), 'torch.Tensor', 'torch.Tensor', (['x_np'], {}), '(x_np)\n', (7661, 7667), False, 'import torch\n'), ((8069, 8088), 'numpy.array', 'np.array', (['gradients'], {}), '(gradients)\n', (8077, 8088), True, 'import numpy as np\n'), ((7880, 7949), 'torch.autograd.grad', 'torch.autograd.grad', (['output_mean[0, output_dim]', 'x'], {'create_graph': '(True)'}), '(output_mean[0, output_dim], x, create_graph=True)\n', (7899, 7949), False, 'import torch\n'), ((1473, 1513), 'torch.cat', 'torch.cat', (['(states[:-1], actions)'], {'dim': '(1)'}), '((states[:-1], actions), dim=1)\n', (1482, 1513), False, 'import torch\n'), ((11253, 11268), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (11260, 11268), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
# @Time : 24/10/18 2:40 PM
# @Author : <NAME>
# @FileName: plot_result.py
# @Software: PyCharm
# @Github : https://github.com/hzm2016
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
import pickle
import copy as cp
from baselines.deepq.assembly.src.value_functions import *
"""=================================Plot result====================================="""
# YLABEL = ['$F_x(N)$', '$F_y(N)$', '$F_z(N)$', '$M_x(Nm)$', '$M_y(Nm)$', '$M_z(Nm)$']
YLABEL = ['$F_x$(N)', '$F_y$(N)', '$F_z$(N)', '$M_x$(Nm)', '$M_y$(Nm)', '$M_z$(Nm)']
Title = ["X axis force", "Y axis force", "Z axis force",
"X axis moment", "Y axis moment", "Z axis moment"]
High = np.array([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5])
Low = np.array([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5])
scale = np.array([40, 40, 40, 5, 5, 5])
"""================================================================================="""
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
def plot(result_path):
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Search Result')
prediction_result = np.load(result_path)
for i in range(len(prediction_result)):
for j in range(6):
line = prediction_result[:, j]
# plt.subplot(2, 3, j+1)
plt.plot(line)
plt.ylabel(YLABEL[j])
plt.xlabel('steps')
plt.legend(YLABEL)
plt.show()
def plot_force_and_moment(path_2, path_3):
V_force = np.load(path_2)
V_state = np.load(path_3)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of Force", fontsize=20)
plt.plot(V_force[:100])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("F(N)", fontsize=20)
plt.legend(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of State", fontsize=20)
plt.plot(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("Coordinate", fontsize=20)
plt.legend(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
def plot_reward(reward_path):
reward = np.load(reward_path)
print(reward[0])
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Episode Reward')
plt.plot(np.arange(len(reward) - 1), np.array(reward[1:]))
plt.ylabel('Episode Reward')
plt.xlabel('Episodes')
plt.show()
def plot_raw_data(path_1):
data = np.load(path_1)
force_m = np.zeros((len(data), 12))
plt.figure(figsize=(20, 20), dpi=100)
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
plt.title("True Data")
for j in range(len(data)):
force_m[j] = data[j, 0]
k = -1
for i in range(len(data)):
if data[i, 1] == 0:
print("===========================================")
line = force_m[k+1:i+1]
print(line)
k = i
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(line[:, j])
# plt.plot(line[:, 0])
if j == 1:
plt.ylabel(YLABEL[j], fontsize=17.5)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
else:
plt.ylabel(YLABEL[j], fontsize=20)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
i += 1
def plot_continuous_data(path):
raw_data = np.load(path)
plt.figure(figsize=(20, 15))
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.22)
# plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:, j]*scale[j], linewidth=2.5)
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('steps', fontsize=30)
plt.title(YLABEL[j], fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('raw_data.pdf')
plt.show()
def compute_true_return(path):
raw_data = np.load(path)
# print(raw_data)
clock = 0
G = 0.
past_gammas = []
past_cumulants = []
all_G = []
for i in range(len(raw_data)):
observation, action, done, action_probability = raw_data[i]
if done == False:
gamma = 0.99
else:
gamma = 0.
past_gammas.append(gamma)
past_cumulants.append(1)
if done == False:
clock += 1
G = 0
all_G.append(cp.deepcopy(G))
else:
print('clock', clock)
for j in reversed(range(0, clock + 1)):
G *= past_gammas[j]
G += past_cumulants[j]
all_G.append(cp.deepcopy(G))
clock = 0
past_cumulants = []
past_gammas = []
print(len(raw_data))
plt.figure(figsize=(20, 15))
plt.plot(all_G[300:400])
plt.show()
return all_G
# Plot the true prediction and true value
def plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 15))
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=30)
if j > 1:
plt.xlabel('steps', fontsize=30)
plt.ylabel('Number of steps', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.savefig('different_gamma.pdf')
plt.show()
# Plot the true prediction and true value
def chinese_plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=36)
if j > 1:
plt.xlabel('搜索步数', fontsize=36)
plt.ylabel('预测周期', fontsize=36)
plt.xticks([0, 50, 100, 150, 200], fontsize=36)
plt.yticks(fontsize=36)
plt.savefig('./figure/pdf/chinese_different_gamma.pdf')
# plt.show()
def chinese_plot_compare_raw_data(path1, path2):
raw_data = np.load(path1)
raw_data_1 = np.load(path2)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=0.33, hspace=0.15)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
data_1 = np.zeros((len(raw_data_1), 12))
for j in range(len(raw_data_1)):
data_1[j] = raw_data_1[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:100, j], linewidth=2.5, color='r', linestyle='--')
plt.plot(data_1[:100, j], linewidth=2.5, color='b')
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('搜索步数', fontsize=38)
plt.title(YLABEL[j], fontsize=38)
plt.xticks(fontsize=38)
plt.yticks(fontsize=38)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('./figure/pdf/chinese_raw_data.pdf')
# plt.show()
# Plot the true prediction and true value
def chinese_plot_different_policy_data(path, name):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33, hspace=0.25)
# plt.subplots_adjust(left=0.1, bottom=0.12, right=0.98, top=0.94, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=38)
if j > 2:
plt.xlabel('搜索步数', fontsize=38)
plt.xticks([0, 50, 100, 150, 200], fontsize=38)
plt.yticks(fontsize=38)
plt.savefig('./figure/pdf/chinese_' + name +'.pdf')
# plt.show()
# Plot the true prediction and true value
def plot_different_policy_data(path):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=1.0, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=30)
if j > 2:
plt.xlabel('steps', fontsize=30)
plt.xticks([0, 50, 100, 150, 200], fontsize=25)
plt.yticks(fontsize=25)
plt.savefig('./figure/pdf/chinese_different_policies_b.pdf')
# plt.show()
if __name__ == "__main__":
# force = np.load('./search_force.npy')
# state = np.load('./search_state.npy')
# print(np.max(force, axis=0))
# print(np.min(force, axis=0))
# print(np.max(state, axis=0))
# print(np.min(state, axis=0))
# plot('./search_state.npy')
# plot('./search_force.npy')
# plot_reward('./episode_rewards.npy')
# data = np.load('prediction_result.npy')
# print(data[:, 2])
# plot_continuous_data('prediction_result_different_gamma_six_force.npy')
# f = open('../data/learning_result', 'rb')
# y = pickle.load(f)
# data = y[('GTD(1)', 'Hindsight Error')]['Move down Fz']
# print(data)
# plt.figure(figsize=(15, 15), dpi=100)
# plt.title('Search Result')
#
# plt.plot(data)
# plt.ylabel(YLABEL[0])
# plt.xlabel('steps')
# plt.legend(YLABEL)
# plt.show()
# compute_true_return('prediction_result_different_gamma.npy')
# plot_true_data('learning_result_six_force_gamma_0.9')
# plot_true_data('learning_result_different_gamma')
# plot_different_gamma_data('learning_result_different_policy')
"""=============================== plot different policy ===================================== """
# plot_different_policy_data('learning_result_six_force_gamma_0.9')
# chinese_plot_different_policy_data('learning_result_six_force_gamma_0.9')
# plot_different_policy_data('learning_result_different_policy_new_3')
chinese_plot_different_policy_data('learning_result_different_policy_new_3', 'off_policy_3')
# chinese_plot_different_policy_data('learning_result_different_policy')
# chinese_plot_different_policy_data('learning_result_different_policy')
"""=============================== plot different gamma ======================================== """
# plot_different_gamma_data('learning_result_different_gamma_new')
# chinese_plot_different_gamma_data('learning_result_different_gamma_new')
|
[
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"copy.deepcopy",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((719, 773), 'numpy.array', 'np.array', (['[40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5]'], {}), '([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5])\n', (727, 773), True, 'import numpy as np\n'), ((780, 844), 'numpy.array', 'np.array', (['[-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5]'], {}), '([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5])\n', (788, 844), True, 'import numpy as np\n'), ((853, 884), 'numpy.array', 'np.array', (['[40, 40, 40, 5, 5, 5]'], {}), '([40, 40, 40, 5, 5, 5])\n', (861, 884), True, 'import numpy as np\n'), ((1086, 1123), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'dpi': '(100)'}), '(figsize=(15, 15), dpi=100)\n', (1096, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1154), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result"""'], {}), "('Search Result')\n", (1137, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1199), 'numpy.load', 'np.load', (['result_path'], {}), '(result_path)\n', (1186, 1199), True, 'import numpy as np\n'), ((1479, 1489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1487, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1565), 'numpy.load', 'np.load', (['path_2'], {}), '(path_2)\n', (1557, 1565), True, 'import numpy as np\n'), ((1580, 1595), 'numpy.load', 'np.load', (['path_3'], {}), '(path_3)\n', (1587, 1595), True, 'import numpy as np\n'), ((1601, 1638), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (1611, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1691), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result of Force"""'], {'fontsize': '(20)'}), "('Search Result of Force', fontsize=20)\n", (1652, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1719), 'matplotlib.pyplot.plot', 'plt.plot', (['V_force[:100]'], {}), '(V_force[:100])\n', (1704, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1756), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(20)'}), "('Steps', fontsize=20)\n", (1734, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F(N)"""'], {'fontsize': '(20)'}), "('F(N)', fontsize=20)\n", (1771, 1792), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz']", 'loc': '"""best"""', 'fontsize': '(20)'}), "(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20\n )\n", (1807, 1882), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1905), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (1892, 1905), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1933), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (1920, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)', 'dpi': '(100)'}), '(figsize=(15, 10), dpi=100)\n', (1949, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2029), 'matplotlib.pyplot.title', 'plt.title', (['"""Search Result of State"""'], {'fontsize': '(20)'}), "('Search Result of State', fontsize=20)\n", (1990, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2127), 'matplotlib.pyplot.plot', 'plt.plot', (['(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414]\n )'], {}), '(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, \n 1.30539, 0.21414])\n', (2042, 2127), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2159), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(20)'}), "('Steps', fontsize=20)\n", (2137, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2201), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coordinate"""'], {'fontsize': '(20)'}), "('Coordinate', fontsize=20)\n", (2174, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': "['x', 'y', 'z', 'rx', 'ry', 'rz']", 'loc': '"""best"""', 'fontsize': '(20)'}), "(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20)\n", (2216, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2288, 2311), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (2298, 2311), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2339), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (2326, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2353, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2421), 'numpy.load', 'np.load', (['reward_path'], {}), '(reward_path)\n', (2408, 2421), True, 'import numpy as np\n'), ((2447, 2484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'dpi': '(100)'}), '(figsize=(15, 15), dpi=100)\n', (2457, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2516), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (2498, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2612), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (2594, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (2627, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2644, 2654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2652, 2654), True, 'import matplotlib.pyplot as plt\n'), ((2695, 2710), 'numpy.load', 'np.load', (['path_1'], {}), '(path_1)\n', (2702, 2710), True, 'import numpy as np\n'), ((2756, 2793), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)', 'dpi': '(100)'}), '(figsize=(20, 20), dpi=100)\n', (2766, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2843), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (2814, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2942), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.065)', 'bottom': '(0.1)', 'right': '(0.995)', 'top': '(0.9)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=\n 0.2, hspace=0.2)\n', (2867, 2942), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2964), 'matplotlib.pyplot.title', 'plt.title', (['"""True Data"""'], {}), "('True Data')\n", (2951, 2964), True, 'import matplotlib.pyplot as plt\n'), ((3897, 3910), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3904, 3910), True, 'import numpy as np\n'), ((3915, 3943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3925, 3943), True, 'import matplotlib.pyplot as plt\n'), ((3948, 3975), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (3957, 3975), True, 'import matplotlib.pyplot as plt\n'), ((3980, 4025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (3996, 4025), True, 'import matplotlib.pyplot as plt\n'), ((4030, 4123), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.22)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.22)\n', (4049, 4123), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""raw_data.pdf"""'], {}), "('raw_data.pdf')\n", (4753, 4769), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4782, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4833, 4846), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (4840, 4846), True, 'import numpy as np\n'), ((5652, 5680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (5662, 5680), True, 'import matplotlib.pyplot as plt\n'), ((5685, 5709), 'matplotlib.pyplot.plot', 'plt.plot', (['all_G[300:400]'], {}), '(all_G[300:400])\n', (5693, 5709), True, 'import matplotlib.pyplot as plt\n'), ((5714, 5724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5722, 5724), True, 'import matplotlib.pyplot as plt\n'), ((6415, 6429), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6426, 6429), False, 'import pickle\n'), ((6434, 6462), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (6444, 6462), True, 'import matplotlib.pyplot as plt\n'), ((6467, 6512), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(0.5)'}), '(pad=3, w_pad=1.0, h_pad=0.5)\n', (6483, 6512), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6609), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.23)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.23)\n', (6535, 6609), True, 'import matplotlib.pyplot as plt\n'), ((7922, 7932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7930, 7932), True, 'import matplotlib.pyplot as plt\n'), ((8614, 8628), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8625, 8628), False, 'import pickle\n'), ((8633, 8671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (8643, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8676, 8721), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(0.5)'}), '(pad=3, w_pad=1.0, h_pad=0.5)\n', (8692, 8721), True, 'import matplotlib.pyplot as plt\n'), ((8725, 8821), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.08)', 'bottom': '(0.12)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.23)', 'hspace': '(0.33)'}), '(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=\n 0.23, hspace=0.33)\n', (8744, 8821), True, 'import matplotlib.pyplot as plt\n'), ((10108, 10163), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_different_gamma.pdf"""'], {}), "('./figure/pdf/chinese_different_gamma.pdf')\n", (10119, 10163), True, 'import matplotlib.pyplot as plt\n'), ((10247, 10261), 'numpy.load', 'np.load', (['path1'], {}), '(path1)\n', (10254, 10261), True, 'import numpy as np\n'), ((10279, 10293), 'numpy.load', 'np.load', (['path2'], {}), '(path2)\n', (10286, 10293), True, 'import numpy as np\n'), ((10298, 10336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (10308, 10336), True, 'import matplotlib.pyplot as plt\n'), ((10341, 10368), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (10350, 10368), True, 'import matplotlib.pyplot as plt\n'), ((10373, 10418), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (10389, 10418), True, 'import matplotlib.pyplot as plt\n'), ((10423, 10519), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.08)', 'bottom': '(0.08)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.33)', 'hspace': '(0.15)'}), '(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=\n 0.33, hspace=0.15)\n', (10442, 10519), True, 'import matplotlib.pyplot as plt\n'), ((11243, 11291), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_raw_data.pdf"""'], {}), "('./figure/pdf/chinese_raw_data.pdf')\n", (11254, 11291), True, 'import matplotlib.pyplot as plt\n'), ((11912, 11926), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11923, 11926), False, 'import pickle\n'), ((11931, 11969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (11941, 11969), True, 'import matplotlib.pyplot as plt\n'), ((11974, 12001), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (11983, 12001), True, 'import matplotlib.pyplot as plt\n'), ((12006, 12051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (12022, 12051), True, 'import matplotlib.pyplot as plt\n'), ((12056, 12149), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.1)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.33)', 'hspace': '(0.25)'}), '(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33,\n hspace=0.25)\n', (12075, 12149), True, 'import matplotlib.pyplot as plt\n'), ((13533, 13585), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./figure/pdf/chinese_' + name + '.pdf')"], {}), "('./figure/pdf/chinese_' + name + '.pdf')\n", (13544, 13585), True, 'import matplotlib.pyplot as plt\n'), ((14190, 14204), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14201, 14204), False, 'import pickle\n'), ((14209, 14247), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)', 'dpi': '(1000)'}), '(figsize=(20, 12), dpi=1000)\n', (14219, 14247), True, 'import matplotlib.pyplot as plt\n'), ((14252, 14279), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Reward"""'], {}), "('Episode Reward')\n", (14261, 14279), True, 'import matplotlib.pyplot as plt\n'), ((14284, 14329), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(1.0)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=1.0, h_pad=1.0)\n', (14300, 14329), True, 'import matplotlib.pyplot as plt\n'), ((14334, 14427), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.15)', 'right': '(0.98)', 'top': '(0.9)', 'wspace': '(0.23)', 'hspace': '(0.23)'}), '(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23,\n hspace=0.23)\n', (14353, 14427), True, 'import matplotlib.pyplot as plt\n'), ((15716, 15776), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure/pdf/chinese_different_policies_b.pdf"""'], {}), "('./figure/pdf/chinese_different_policies_b.pdf')\n", (15727, 15776), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2578), 'numpy.array', 'np.array', (['reward[1:]'], {}), '(reward[1:])\n', (2566, 2578), True, 'import numpy as np\n'), ((4356, 4380), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (4367, 4380), True, 'import matplotlib.pyplot as plt\n'), ((4389, 4435), 'matplotlib.pyplot.plot', 'plt.plot', (['(data[:, j] * scale[j])'], {'linewidth': '(2.5)'}), '(data[:, j] * scale[j], linewidth=2.5)\n', (4397, 4435), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4581), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(30)'}), '(YLABEL[j], fontsize=30)\n', (4557, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4590, 4613), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (4600, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4645), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (4632, 4645), True, 'import matplotlib.pyplot as plt\n'), ((6789, 6813), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(j + 1)'], {}), '(2, 2, j + 1)\n', (6800, 6813), True, 'import matplotlib.pyplot as plt\n'), ((7665, 7698), 'matplotlib.pyplot.title', 'plt.title', (['titles[j]'], {'fontsize': '(30)'}), '(titles[j], fontsize=30)\n', (7674, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7770, 7812), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of steps"""'], {'fontsize': '(30)'}), "('Number of steps', fontsize=30)\n", (7780, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7844), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (7831, 7844), True, 'import matplotlib.pyplot as plt\n'), ((7853, 7876), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (7863, 7876), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9025), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(j + 1)'], {}), '(2, 2, j + 1)\n', (9012, 9025), True, 'import matplotlib.pyplot as plt\n'), ((9878, 9911), 'matplotlib.pyplot.title', 'plt.title', (['titles[j]'], {'fontsize': '(36)'}), '(titles[j], fontsize=36)\n', (9887, 9911), True, 'import matplotlib.pyplot as plt\n'), ((9983, 10014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""预测周期"""'], {'fontsize': '(36)'}), "('预测周期', fontsize=36)\n", (9993, 10014), True, 'import matplotlib.pyplot as plt\n'), ((10023, 10070), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(36)'}), '([0, 50, 100, 150, 200], fontsize=36)\n', (10033, 10070), True, 'import matplotlib.pyplot as plt\n'), ((10079, 10102), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(36)'}), '(fontsize=36)\n', (10089, 10102), True, 'import matplotlib.pyplot as plt\n'), ((10776, 10800), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (10787, 10800), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10874), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:100, j]'], {'linewidth': '(2.5)', 'color': '"""r"""', 'linestyle': '"""--"""'}), "(data[:100, j], linewidth=2.5, color='r', linestyle='--')\n", (10817, 10874), True, 'import matplotlib.pyplot as plt\n'), ((10883, 10934), 'matplotlib.pyplot.plot', 'plt.plot', (['data_1[:100, j]'], {'linewidth': '(2.5)', 'color': '"""b"""'}), "(data_1[:100, j], linewidth=2.5, color='b')\n", (10891, 10934), True, 'import matplotlib.pyplot as plt\n'), ((11048, 11081), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(38)'}), '(YLABEL[j], fontsize=38)\n', (11057, 11081), True, 'import matplotlib.pyplot as plt\n'), ((11090, 11113), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (11100, 11113), True, 'import matplotlib.pyplot as plt\n'), ((11122, 11145), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (11132, 11145), True, 'import matplotlib.pyplot as plt\n'), ((12427, 12451), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (12438, 12451), True, 'import matplotlib.pyplot as plt\n'), ((13344, 13377), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(38)'}), '(YLABEL[j], fontsize=38)\n', (13353, 13377), True, 'import matplotlib.pyplot as plt\n'), ((13448, 13495), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(38)'}), '([0, 50, 100, 150, 200], fontsize=38)\n', (13458, 13495), True, 'import matplotlib.pyplot as plt\n'), ((13504, 13527), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(38)'}), '(fontsize=38)\n', (13514, 13527), True, 'import matplotlib.pyplot as plt\n'), ((14608, 14632), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (14619, 14632), True, 'import matplotlib.pyplot as plt\n'), ((15526, 15559), 'matplotlib.pyplot.title', 'plt.title', (['YLABEL[j]'], {'fontsize': '(30)'}), '(YLABEL[j], fontsize=30)\n', (15535, 15559), True, 'import matplotlib.pyplot as plt\n'), ((15631, 15678), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150, 200]'], {'fontsize': '(25)'}), '([0, 50, 100, 150, 200], fontsize=25)\n', (15641, 15678), True, 'import matplotlib.pyplot as plt\n'), ((15687, 15710), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (15697, 15710), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1377), 'matplotlib.pyplot.plot', 'plt.plot', (['line'], {}), '(line)\n', (1371, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {}), '(YLABEL[j])\n', (1400, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1443), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {}), "('steps')\n", (1434, 1443), True, 'import matplotlib.pyplot as plt\n'), ((1456, 1474), 'matplotlib.pyplot.legend', 'plt.legend', (['YLABEL'], {}), '(YLABEL)\n', (1466, 1474), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (4517, 4539), True, 'import matplotlib.pyplot as plt\n'), ((7729, 7761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (7739, 7761), True, 'import matplotlib.pyplot as plt\n'), ((9943, 9974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(36)'}), "('搜索步数', fontsize=36)\n", (9953, 9974), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11039), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(38)'}), "('搜索步数', fontsize=38)\n", (11018, 11039), True, 'import matplotlib.pyplot as plt\n'), ((13408, 13439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""搜索步数"""'], {'fontsize': '(38)'}), "('搜索步数', fontsize=38)\n", (13418, 13439), True, 'import matplotlib.pyplot as plt\n'), ((15590, 15622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(30)'}), "('steps', fontsize=30)\n", (15600, 15622), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3312), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(j + 1)'], {}), '(2, 3, j + 1)\n', (3299, 3312), True, 'import matplotlib.pyplot as plt\n'), ((3329, 3349), 'matplotlib.pyplot.plot', 'plt.plot', (['line[:, j]'], {}), '(line[:, j])\n', (3337, 3349), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5321), 'copy.deepcopy', 'cp.deepcopy', (['G'], {}), '(G)\n', (5318, 5321), True, 'import copy as cp\n'), ((5523, 5537), 'copy.deepcopy', 'cp.deepcopy', (['G'], {}), '(G)\n', (5534, 5537), True, 'import copy as cp\n'), ((7028, 7068), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'UDE'][key]"], {}), "(raw_data['GTD(0)', 'UDE'][key])\n", (7036, 7068), True, 'import numpy as np\n'), ((7375, 7422), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'Prediction'][key]"], {}), "(raw_data['GTD(0)', 'Prediction'][key])\n", (7383, 7422), True, 'import numpy as np\n'), ((9240, 9280), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'UDE'][key]"], {}), "(raw_data['GTD(0)', 'UDE'][key])\n", (9248, 9280), True, 'import numpy as np\n'), ((9587, 9634), 'numpy.array', 'np.array', (["raw_data['GTD(0)', 'Prediction'][key]"], {}), "(raw_data['GTD(0)', 'Prediction'][key])\n", (9595, 9634), True, 'import numpy as np\n'), ((3437, 3473), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {'fontsize': '(17.5)'}), '(YLABEL[j], fontsize=17.5)\n', (3447, 3473), True, 'import matplotlib.pyplot as plt\n'), ((3494, 3526), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(20)'}), "('steps', fontsize=20)\n", (3504, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3570), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3557, 3570), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3614), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3601, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3657, 3691), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['YLABEL[j]'], {'fontsize': '(20)'}), '(YLABEL[j], fontsize=20)\n', (3667, 3691), True, 'import matplotlib.pyplot as plt\n'), ((3712, 3744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""steps"""'], {'fontsize': '(20)'}), "('steps', fontsize=20)\n", (3722, 3744), True, 'import matplotlib.pyplot as plt\n'), ((3765, 3788), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3775, 3788), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3832), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (3819, 3832), True, 'import matplotlib.pyplot as plt\n'), ((12678, 12718), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'UDE'][key]"], {}), "(raw_data['GTD(1)', 'UDE'][key])\n", (12686, 12718), True, 'import numpy as np\n'), ((13045, 13092), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'Prediction'][key]"], {}), "(raw_data['GTD(1)', 'Prediction'][key])\n", (13053, 13092), True, 'import numpy as np\n'), ((14859, 14899), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'UDE'][key]"], {}), "(raw_data['GTD(1)', 'UDE'][key])\n", (14867, 14899), True, 'import numpy as np\n'), ((15226, 15273), 'numpy.array', 'np.array', (["raw_data['GTD(1)', 'Prediction'][key]"], {}), "(raw_data['GTD(1)', 'Prediction'][key])\n", (15234, 15273), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
from math import factorial
import numpy as np
# test passed
def generate_poly(max_exponent,max_diff,symbol):
f=np.zeros((max_diff+1, max_exponent+1), dtype=float)
for k in range(max_diff+1):
for i in range(max_exponent+1):
if (i - k) >= 0:
f[k,i] = factorial(i)*symbol**(i-k)/factorial(i-k)
else:
f[k,i] = 0
return f
|
[
"math.factorial",
"numpy.zeros"
] |
[((137, 192), 'numpy.zeros', 'np.zeros', (['(max_diff + 1, max_exponent + 1)'], {'dtype': 'float'}), '((max_diff + 1, max_exponent + 1), dtype=float)\n', (145, 192), True, 'import numpy as np\n'), ((315, 331), 'math.factorial', 'factorial', (['(i - k)'], {}), '(i - k)\n', (324, 331), False, 'from math import factorial\n'), ((288, 300), 'math.factorial', 'factorial', (['i'], {}), '(i)\n', (297, 300), False, 'from math import factorial\n')]
|
from dask.distributed import Client
import dask.dataframe as dd
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from IPython.display import display, HTML
from sklearn.cluster import KMeans
import plotly
import plotly.graph_objs as go
import plotly.io as pio
from functools import partial
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score
def make_groundtruth_figures(data_folder, update_figs=False, no_labels=False):
vectors = pd.read_csv(os.path.join(data_folder, 'features.csv'), index_col='app')
if no_labels: # mostly for testing
all_apps = vectors.assign(
label=['app', 'app'],
category=['app', 'app']
)
else:
all_apps = pd.read_csv("data/out/all-apps/all_apps.csv", index_col='app')
all_apps['label'] = all_apps[all_apps.category=='malware'].app_dir.str.split('/').apply(lambda list: list[5])
top_9_malware = all_apps.label.value_counts().sort_values(ascending=False)[:9]
top_9_min = top_9_malware.min()
other_mal_map = {key: "Other malware" for key, value in all_apps.label.value_counts().items() if value <= top_9_min}
# other_mal_map = {key: key for key, value in all_apps.label.value_counts().items() if value <= 200}
all_apps.label = all_apps.label.map(other_mal_map).fillna(all_apps.label)
all_apps.label.fillna(all_apps.category, inplace=True)
vectors = vectors.assign(
label=all_apps.label,
category=all_apps.category
)
labels = vectors.label
# Retrieve node embeddings and corresponding subjects
node_ids = list(vectors.uid) # list of node IDs
node_embeddings = vectors.drop(columns=['uid', 'category', 'label'])
node_targets = labels
transform = TSNE # Dimensionality reduction transformer
# 2D plot -- matplotlib
print('Making 2D plot...')
plt.rcParams.update({'font.size': 14})
trans = transform(n_components=2)
node_embeddings_2d = trans.fit_transform(node_embeddings)
label_map = {l: i for i, l in enumerate(np.unique(node_targets))}
node_colours = [label_map[target] for target in node_targets]
plt.figure(figsize=(10, 8))
plt.axes().set(aspect="equal")
scatter = plt.scatter(
node_embeddings_2d[:, 0],
node_embeddings_2d[:, 1],
c=node_colours,
cmap='tab20',
alpha=1,
s=5
)
plt.title("2D {} visualization of node embeddings".format(transform.__name__))
legend1 = plt.legend(scatter.legend_elements()[0], pd.Series(label_map.keys()).str.replace('-', ' ').str.title(),
loc='center left', bbox_to_anchor=(1, 0.5), title="App Type", markerscale=1.5)
# order labels (https://stackoverflow.com/a/46160465/13710014)
# handles, g_labels = plt.gca().get_legend_handles_labels()
# print(handles, labels)
# if not no_labels:
# order = ['Popular Apps', 'Random Apps']
# order += list(top_9_malware.index)
# plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order])
plt.savefig(os.path.join(data_folder, '2D-plot.png'), bbox_inches='tight')
# 3D plot - using plotly
print('Making 3D plot...')
trans3d = transform(n_components=3)
node_embeddings_3d = trans3d.fit_transform(node_embeddings)
data_3d = pd.DataFrame(node_embeddings_3d, index=vectors.index)
data_3d['malware'] = vectors['category']=='malware'
data_3d['type'] = vectors.label
type_chart = data_3d[['malware', 'type']].drop_duplicates()
type_chart['num'] = type_chart.type.map(label_map)
layout = go.Layout(
title="Interactive 3D TNSE representation of node embeddings",
margin={'l': 0, 'r': 0, 'b': 0, 't': 30},
legend=dict(y=0.5, itemsizing='constant'),
scene={
'xaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
},
'yaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
},
'zaxis': {
'showspikes': False,
'showgrid': False,
'zeroline': False,
'visible': False
}
}
)
fig = go.Figure(layout=layout)
# add invisible bounding trace to keep axes' scale constant
fig.add_trace(
go.Scatter3d(
x=[data_3d[0].min(), data_3d[0].max()],
y=[data_3d[1].min(), data_3d[1].max()],
z=[data_3d[2].min(), data_3d[2].max()],
mode='markers',
marker={
'color':'rgba(0,0,0,0)',
'opacity': 0,
},
showlegend=False
)
)
for index, row in type_chart.sort_values('num', ascending=False).iterrows():
if row['malware']:
symbol = 'circle'
group='Malware'
size = 2
else:
symbol = 'x'
group='Unlabeled'
size = 1.5
name = f"{group}, {row['type'].replace('-', ' ').title()}"
if row['type']=='Other malware':
name=row['type']
df = data_3d[data_3d.type==row['type']]
rbg = tuple([255*val for val in cm.tab20(row['num'])[:3]])
color = f"rgb{rbg}"
trace = go.Scatter3d(
name=name,
x=df[0],
y=df[1],
z=df[2],
customdata=list(df.index),
hovertemplate=
"<b>%{customdata}</b><br>" +
f"{name}" +
"<extra></extra>",
mode='markers',
marker={
'size': size,
'opacity': 1,
'color': color,
'symbol': symbol,
},
)
fig.add_trace(trace)
# Save the plot.
pio.write_html(fig, file=os.path.join(data_folder, '3D-plot.html'), auto_open=True)
if update_figs:
pio.write_html(fig, file=os.path.join('docs', '_includes', '3D-plot.html'), auto_open=True)
def compute_model_performance_statistics(pred, true):
'''
Returns a series with the f1-score, accuracy, recall, and confusion counts (TP, TN, FP, FN).
'''
TN, FP, FN, TP = confusion_matrix(true, pred).ravel()
return pd.Series({
'ACC': accuracy_score(true, pred),
'TPR': recall_score(true, pred),
'F1': f1_score(true, pred),
'TP': TP,
'TN': TN,
'FP': FP,
'FN': FN
})
def create_performance_table(m2v_results_path, hindroid_results_path, outpath=None):
results = pd.read_csv(m2v_results_path, index_col='app', usecols=['app', 'm2vDroid', 'true'])
if 'true' in results.columns:
results = results.drop(columns=['true'])
results = results.join(pd.read_csv(hindroid_results_path, index_col='app'))
y_true = results.true
table = results.drop(columns=['true']).apply(partial(compute_model_performance_statistics, true=y_true)).T
table = table.astype({col: int for col in ['TP', 'TN', 'FP', 'FN']})
if outpath is not None:
table.to_csv(outpath)
return table
def generate_analysis(data_path, jobs={}):
"Generates plots, aggregates, and statistical analysis on app data located in `data_path`"
# load data
# app_data_path = os.path.join(data_path, 'app_data.csv')
# app_data = dd.read_csv(app_data_path)
# os.makedirs(out_folder, exist_ok=True)
if "plots" in jobs:
make_groundtruth_figures(data_path, **jobs['plots'])
|
[
"sklearn.metrics.f1_score",
"numpy.unique",
"pandas.read_csv",
"os.path.join",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"functools.partial",
"matplotlib.pyplot.scatter",
"matplotlib.cm.tab20",
"pandas.DataFrame",
"plotly.graph_objs.Figure",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] |
[((2021, 2059), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (2040, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2333), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2316, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2495), 'matplotlib.pyplot.scatter', 'plt.scatter', (['node_embeddings_2d[:, 0]', 'node_embeddings_2d[:, 1]'], {'c': 'node_colours', 'cmap': '"""tab20"""', 'alpha': '(1)', 's': '(5)'}), "(node_embeddings_2d[:, 0], node_embeddings_2d[:, 1], c=\n node_colours, cmap='tab20', alpha=1, s=5)\n", (2394, 2495), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3539), 'pandas.DataFrame', 'pd.DataFrame', (['node_embeddings_3d'], {'index': 'vectors.index'}), '(node_embeddings_3d, index=vectors.index)\n', (3498, 3539), True, 'import pandas as pd\n'), ((4530, 4554), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'layout': 'layout'}), '(layout=layout)\n', (4539, 4554), True, 'import plotly.graph_objs as go\n'), ((6864, 6951), 'pandas.read_csv', 'pd.read_csv', (['m2v_results_path'], {'index_col': '"""app"""', 'usecols': "['app', 'm2vDroid', 'true']"}), "(m2v_results_path, index_col='app', usecols=['app', 'm2vDroid',\n 'true'])\n", (6875, 6951), True, 'import pandas as pd\n'), ((612, 653), 'os.path.join', 'os.path.join', (['data_folder', '"""features.csv"""'], {}), "(data_folder, 'features.csv')\n", (624, 653), False, 'import os\n'), ((860, 922), 'pandas.read_csv', 'pd.read_csv', (['"""data/out/all-apps/all_apps.csv"""'], {'index_col': '"""app"""'}), "('data/out/all-apps/all_apps.csv', index_col='app')\n", (871, 922), True, 'import pandas as pd\n'), ((3240, 3280), 'os.path.join', 'os.path.join', (['data_folder', '"""2D-plot.png"""'], {}), "(data_folder, '2D-plot.png')\n", (3252, 3280), False, 'import os\n'), ((7058, 7109), 'pandas.read_csv', 'pd.read_csv', (['hindroid_results_path'], {'index_col': '"""app"""'}), "(hindroid_results_path, index_col='app')\n", (7069, 7109), True, 'import pandas as pd\n'), ((2338, 2348), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2346, 2348), True, 'import matplotlib.pyplot as plt\n'), ((6120, 6161), 'os.path.join', 'os.path.join', (['data_folder', '"""3D-plot.html"""'], {}), "(data_folder, '3D-plot.html')\n", (6132, 6161), False, 'import os\n'), ((6497, 6525), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true', 'pred'], {}), '(true, pred)\n', (6513, 6525), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6572, 6598), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true', 'pred'], {}), '(true, pred)\n', (6586, 6598), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6615, 6639), 'sklearn.metrics.recall_score', 'recall_score', (['true', 'pred'], {}), '(true, pred)\n', (6627, 6639), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((6655, 6675), 'sklearn.metrics.f1_score', 'f1_score', (['true', 'pred'], {}), '(true, pred)\n', (6663, 6675), False, 'from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, recall_score\n'), ((7186, 7244), 'functools.partial', 'partial', (['compute_model_performance_statistics'], {'true': 'y_true'}), '(compute_model_performance_statistics, true=y_true)\n', (7193, 7244), False, 'from functools import partial\n'), ((2209, 2232), 'numpy.unique', 'np.unique', (['node_targets'], {}), '(node_targets)\n', (2218, 2232), True, 'import numpy as np\n'), ((6237, 6286), 'os.path.join', 'os.path.join', (['"""docs"""', '"""_includes"""', '"""3D-plot.html"""'], {}), "('docs', '_includes', '3D-plot.html')\n", (6249, 6286), False, 'import os\n'), ((5505, 5525), 'matplotlib.cm.tab20', 'cm.tab20', (["row['num']"], {}), "(row['num'])\n", (5513, 5525), True, 'import matplotlib.cm as cm\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for ShowerConditionProbability plugin"""
from typing import Dict, List, Tuple, Union
import numpy as np
import pytest
from iris.cube import CubeList
from numpy import ndarray
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.shower_condition_probability import (
ShowerConditionProbability,
)
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
ATTRIBUTES = {
"institution": "Met Office",
"mosg__model_configuration": "gl_ens",
"source": "Met Office Unified Model",
"title": "MOGREPS-G Forecast on UK 2 km Standard Grid",
}
EXPECTED_ATTRIBUTES = {
"institution": "Met Office",
"source": "Met Office Unified Model",
"title": "Post-Processed MOGREPS-G Forecast on UK 2 km Standard Grid",
}
MODEL_ID_ATTR_ATTRIBUTES = EXPECTED_ATTRIBUTES.copy()
MODEL_ID_ATTR_ATTRIBUTES.update({"mosg__model_configuration": "gl_ens"})
@pytest.fixture(name="test_cubes")
def cube_fixture(cube_properties: Tuple[Dict[str, Dict[str, Union[List, ndarray]]]]):
"""Create a test cube"""
cubes = CubeList()
for name, values in cube_properties.items():
cubes.append(
set_up_variable_cube(
values["data"],
name=name,
units=1,
realizations=values["realizations"],
attributes=ATTRIBUTES,
)
)
return cubes
@pytest.mark.parametrize(
"cube_properties, kwargs, expected",
(
# Simple case with one realization, cloud dominates returned
# probabilities (i.e. clear skies).
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# As above, but using the model_id_attr keyword to preserve the model
# information.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.zeros((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{
"model_id_attr": "mosg__model_configuration",
"cloud_threshold": 0.5,
"convection_threshold": 0.5,
},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), MODEL_ID_ATTR_ATTRIBUTES),
),
# Simple case with one realization, convection dominates returned
# probabilities.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# As above, but the convective_ratio includes masked values. This test
# checks that they are ignored in setting the resulting probabilities
# and that the output is not masked. One resulting value differs to the
# above, corresponding to the masked point.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.ones((2, 2)).astype(FLOAT_DTYPE),
"realizations": [0],
},
"convective_ratio": {
"data": np.ma.masked_array(
np.ones((2, 2)).astype(FLOAT_DTYPE),
mask=np.array([[0, 0], [0, 1]]),
),
"realizations": [0],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[1, 1], [1, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Multi-realization case with a range of probabilities returned due
# to variable cloud.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array(
[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]
).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[0.5, 0], [0.5, 0]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Same as above, but with different threshold values applied.
# Cloud =< 0.7, which will result in probabilities all equal to 1.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array(
[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]
).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.zeros((2, 2, 2)).astype(FLOAT_DTYPE),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.7, "convection_threshold": 0.5},
# Expected result
(np.ones((2, 2)).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
# Multi-realization case with cloud and convection both providing a
# showery probability of 1.
(
{
"low_and_medium_type_cloud_area_fraction": {
"data": np.array([[[0, 1], [1, 1]], [[0, 1], [1, 1]]]).astype(
FLOAT_DTYPE
),
"realizations": [0, 1],
},
"convective_ratio": {
"data": np.array([[[0, 0], [0, 1]], [[0, 0], [0, 1]]]).astype(
FLOAT_DTYPE
),
"realizations": [0, 1],
},
},
# Other plugin kwargs
{"cloud_threshold": 0.5, "convection_threshold": 0.5},
# Expected result
(np.array([[1, 0], [0, 1]]).astype(FLOAT_DTYPE), EXPECTED_ATTRIBUTES),
),
),
)
def test_scenarios(test_cubes, kwargs, expected):
"""Test output type and metadata"""
expected_shape = test_cubes[0].shape[-2:]
result = ShowerConditionProbability(**kwargs)(test_cubes)
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.units == "1"
assert result.shape == expected_shape
assert result.data.dtype == FLOAT_DTYPE
assert (result.data == expected[0]).all()
assert result.attributes == expected[1]
assert result.coord(var_name="threshold").name() == "shower_condition"
assert result.coord(var_name="threshold").points == 1.0
def test_incorrect_inputs_exception():
"""Tests that the expected exception is raised for incorrectly named
input cubes."""
temperature = set_up_variable_cube(np.ones((2, 2)).astype(FLOAT_DTYPE))
expected = (
"A cloud area fraction and convective ratio are required, "
f"but the inputs were: {temperature.name()}, {temperature.name()}"
)
with pytest.raises(ValueError, match=expected):
ShowerConditionProbability()(CubeList([temperature, temperature]))
def test_mismatched_shape_exception():
"""Tests that the expected exception is raised for cloud and convection
cubes of different shapes."""
cloud = set_up_variable_cube(
np.ones((2, 2)).astype(FLOAT_DTYPE),
name="low_and_medium_type_cloud_area_fraction",
)
convection = set_up_variable_cube(
np.ones((3, 3)).astype(FLOAT_DTYPE), name="convective_ratio"
)
expected = (
"The cloud area fraction and convective ratio cubes are not the same "
"shape and cannot be combined to generate a shower probability"
)
with pytest.raises(ValueError, match=expected):
ShowerConditionProbability()(CubeList([cloud, convection]))
|
[
"iris.cube.CubeList",
"numpy.ones",
"improver.synthetic_data.set_up_test_cubes.set_up_variable_cube",
"improver.precipitation_type.shower_condition_probability.ShowerConditionProbability",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"pytest.fixture"
] |
[((2584, 2617), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""test_cubes"""'}), "(name='test_cubes')\n", (2598, 2617), False, 'import pytest\n'), ((2745, 2755), 'iris.cube.CubeList', 'CubeList', ([], {}), '()\n', (2753, 2755), False, 'from iris.cube import CubeList\n'), ((9135, 9171), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '(**kwargs)\n', (9161, 9171), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((9991, 10032), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (10004, 10032), False, 'import pytest\n'), ((10700, 10741), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (10713, 10741), False, 'import pytest\n'), ((2839, 2960), 'improver.synthetic_data.set_up_test_cubes.set_up_variable_cube', 'set_up_variable_cube', (["values['data']"], {'name': 'name', 'units': '(1)', 'realizations': "values['realizations']", 'attributes': 'ATTRIBUTES'}), "(values['data'], name=name, units=1, realizations=\n values['realizations'], attributes=ATTRIBUTES)\n", (2859, 2960), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube\n'), ((10042, 10070), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '()\n', (10068, 10070), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((10071, 10107), 'iris.cube.CubeList', 'CubeList', (['[temperature, temperature]'], {}), '([temperature, temperature])\n', (10079, 10107), False, 'from iris.cube import CubeList\n'), ((10751, 10779), 'improver.precipitation_type.shower_condition_probability.ShowerConditionProbability', 'ShowerConditionProbability', ([], {}), '()\n', (10777, 10779), False, 'from improver.precipitation_type.shower_condition_probability import ShowerConditionProbability\n'), ((10780, 10809), 'iris.cube.CubeList', 'CubeList', (['[cloud, convection]'], {}), '([cloud, convection])\n', (10788, 10809), False, 'from iris.cube import CubeList\n'), ((9778, 9793), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (9785, 9793), True, 'import numpy as np\n'), ((10302, 10317), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (10309, 10317), True, 'import numpy as np\n'), ((10448, 10463), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (10455, 10463), True, 'import numpy as np\n'), ((3800, 3815), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3807, 3815), True, 'import numpy as np\n'), ((4614, 4629), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4621, 4629), True, 'import numpy as np\n'), ((5320, 5335), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5327, 5335), True, 'import numpy as np\n'), ((6335, 6361), 'numpy.array', 'np.array', (['[[1, 1], [1, 0]]'], {}), '([[1, 1], [1, 0]])\n', (6343, 6361), True, 'import numpy as np\n'), ((7156, 7186), 'numpy.array', 'np.array', (['[[0.5, 0], [0.5, 0]]'], {}), '([[0.5, 0], [0.5, 0]])\n', (7164, 7186), True, 'import numpy as np\n'), ((8021, 8036), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (8028, 8036), True, 'import numpy as np\n'), ((8895, 8921), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (8903, 8921), True, 'import numpy as np\n'), ((3379, 3395), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3387, 3395), True, 'import numpy as np\n'), ((3543, 3559), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3551, 3559), True, 'import numpy as np\n'), ((4084, 4100), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (4092, 4100), True, 'import numpy as np\n'), ((4248, 4264), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (4256, 4264), True, 'import numpy as np\n'), ((4901, 4916), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4908, 4916), True, 'import numpy as np\n'), ((5064, 5079), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5071, 5079), True, 'import numpy as np\n'), ((5792, 5807), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5799, 5807), True, 'import numpy as np\n'), ((6065, 6091), 'numpy.array', 'np.array', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (6073, 6091), True, 'import numpy as np\n'), ((6634, 6696), 'numpy.array', 'np.array', (['[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]'], {}), '([[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]])\n', (6642, 6696), True, 'import numpy as np\n'), ((6893, 6912), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6901, 6912), True, 'import numpy as np\n'), ((7499, 7561), 'numpy.array', 'np.array', (['[[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]]'], {}), '([[[0.4, 0.6], [0.4, 0.6]], [[0.6, 0.6], [0.6, 0.6]]])\n', (7507, 7561), True, 'import numpy as np\n'), ((7758, 7777), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (7766, 7777), True, 'import numpy as np\n'), ((8316, 8362), 'numpy.array', 'np.array', (['[[[0, 1], [1, 1]], [[0, 1], [1, 1]]]'], {}), '([[[0, 1], [1, 1]], [[0, 1], [1, 1]]])\n', (8324, 8362), True, 'import numpy as np\n'), ((8559, 8605), 'numpy.array', 'np.array', (['[[[0, 0], [0, 1]], [[0, 0], [0, 1]]]'], {}), '([[[0, 0], [0, 1]], [[0, 0], [0, 1]]])\n', (8567, 8605), True, 'import numpy as np\n'), ((5999, 6014), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (6006, 6014), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from tqdm.notebook import tqdm
class System():
def __init__(self,
num_part,
dim,
Ansatz=None,
External=None,
Internal=None,
Sampler=None
):
self.num_part = num_part
self.dim = dim
self.Ansatz = Ansatz
self.External = External
self.Internal = Internal
self.Sampler = Sampler
self.Ansatz.system = self
self.Sampler.system = self
class Metropolis():
def __init__(self, step_length, steps):
self.step_length = step_length
self.steps = steps
def __call__(self, batch_size):
total_accepted = 0
dim = self.system.dim
# inital position for walkers
x_old = tf.random.uniform(
(batch_size, dim), minval=-2, maxval=2, dtype=tf.dtypes.float64)
psi_old = self.system.Ansatz(x_old).numpy()
# thermalizing steps
for i in range(self.steps):
x_new = x_old + self.step_length * \
tf.random.uniform((batch_size, dim), minval=-1, maxval=1,
dtype=tf.dtypes.float64)
psi_new = self.system.Ansatz(x_new).numpy()
U = np.random.uniform(0, 1, (batch_size, 1))
# vectorized acceptance criterion
mask = ((psi_new / psi_old)**2 > U)[:, 0]
x_old = x_old.numpy()
x_new = x_new.numpy()
# update walkers
x_old[mask] = x_new[mask]
psi_old[mask] = psi_new[mask]
x_old = tf.convert_to_tensor(x_old, dtype=tf.dtypes.float64)
total_accepted += np.sum(mask)
return x_old, total_accepted
class HarmonicOsc():
def __init__(self, omega):
self.omega = omega
def __call__(self, x):
V = 0.5 * self.omega**2 * \
tf.reshape(tf.reduce_sum(x**2, axis=1), (-1, 1))
return V
class Coulomb():
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def __call__(self, x, num_part, dim):
V = 0
for i in range(num_part):
for j in range(i):
r12 = tf.norm(x[:, i * dim:(i + 1) * dim] -
x[:, j * dim:(j + 1) * dim], axis=1)
r12 = tf.reshape(r12, (-1, 1))
V += self.alpha / tf.math.sqrt(r12**2 + self.beta**2)
return V
def oneBodyDensity(pos, bins, mode="radial"):
if mode == "radial1D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / dr
except:
pass
return density
if mode == "radial2D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / (2 * np.pi * dr * r)
except:
pass
return density
if mode == "radial3D":
density = np.zeros(bins.shape[0])
r_min = bins[0]
dr = bins[1] - bins[0]
rPos = np.linalg.norm(pos, axis=1)
for r in tqdm(rPos):
try:
density[int((r - r_min) // dr)] += 1 / (4 * np.pi * dr * r**2)
except:
pass
return density
if mode == "1D":
density = np.zeros(bins.shape[0])
x_min = bins[0]
dx = bins[1] - bins[0]
for x in tqdm(pos):
try:
density[int((x - x_min) // dx)] += 1
except:
pass
return density / dx
if mode == "2D":
density = np.zeros((bins.shape[0], bins.shape[0]))
y_min = x_min = bins[0]
dy = dx = bins[1] - bins[0]
for x, y in tqdm(pos):
try:
density[int((x - x_min) // dx), int((y - y_min) // dy)] += 1
except:
pass
return density / pos.shape[0]
|
[
"tensorflow.random.uniform",
"tensorflow.reshape",
"tensorflow.reduce_sum",
"tensorflow.math.sqrt",
"numpy.linalg.norm",
"numpy.sum",
"numpy.zeros",
"numpy.random.uniform",
"tensorflow.convert_to_tensor",
"tqdm.notebook.tqdm",
"tensorflow.norm"
] |
[((842, 929), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, dim)'], {'minval': '(-2)', 'maxval': '(2)', 'dtype': 'tf.dtypes.float64'}), '((batch_size, dim), minval=-2, maxval=2, dtype=tf.dtypes.\n float64)\n', (859, 929), True, 'import tensorflow as tf\n'), ((2594, 2617), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (2602, 2617), True, 'import numpy as np\n'), ((2688, 2715), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (2702, 2715), True, 'import numpy as np\n'), ((2733, 2743), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (2737, 2743), False, 'from tqdm.notebook import tqdm\n'), ((2931, 2954), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (2939, 2954), True, 'import numpy as np\n'), ((3025, 3052), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (3039, 3052), True, 'import numpy as np\n'), ((3070, 3080), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (3074, 3080), False, 'from tqdm.notebook import tqdm\n'), ((3286, 3309), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (3294, 3309), True, 'import numpy as np\n'), ((3380, 3407), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (3394, 3407), True, 'import numpy as np\n'), ((3425, 3435), 'tqdm.notebook.tqdm', 'tqdm', (['rPos'], {}), '(rPos)\n', (3429, 3435), False, 'from tqdm.notebook import tqdm\n'), ((3638, 3661), 'numpy.zeros', 'np.zeros', (['bins.shape[0]'], {}), '(bins.shape[0])\n', (3646, 3661), True, 'import numpy as np\n'), ((3734, 3743), 'tqdm.notebook.tqdm', 'tqdm', (['pos'], {}), '(pos)\n', (3738, 3743), False, 'from tqdm.notebook import tqdm\n'), ((3925, 3965), 'numpy.zeros', 'np.zeros', (['(bins.shape[0], bins.shape[0])'], {}), '((bins.shape[0], bins.shape[0]))\n', (3933, 3965), True, 'import numpy as np\n'), ((4054, 4063), 'tqdm.notebook.tqdm', 'tqdm', (['pos'], {}), '(pos)\n', (4058, 4063), False, 'from tqdm.notebook import tqdm\n'), ((1311, 1351), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(batch_size, 1)'], {}), '(0, 1, (batch_size, 1))\n', (1328, 1351), True, 'import numpy as np\n'), ((1651, 1703), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_old'], {'dtype': 'tf.dtypes.float64'}), '(x_old, dtype=tf.dtypes.float64)\n', (1671, 1703), True, 'import tensorflow as tf\n'), ((1734, 1746), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (1740, 1746), True, 'import numpy as np\n'), ((1953, 1982), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(x ** 2)'], {'axis': '(1)'}), '(x ** 2, axis=1)\n', (1966, 1982), True, 'import tensorflow as tf\n'), ((2260, 2334), 'tensorflow.norm', 'tf.norm', (['(x[:, i * dim:(i + 1) * dim] - x[:, j * dim:(j + 1) * dim])'], {'axis': '(1)'}), '(x[:, i * dim:(i + 1) * dim] - x[:, j * dim:(j + 1) * dim], axis=1)\n', (2267, 2334), True, 'import tensorflow as tf\n'), ((2387, 2411), 'tensorflow.reshape', 'tf.reshape', (['r12', '(-1, 1)'], {}), '(r12, (-1, 1))\n', (2397, 2411), True, 'import tensorflow as tf\n'), ((1121, 1208), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch_size, dim)'], {'minval': '(-1)', 'maxval': '(1)', 'dtype': 'tf.dtypes.float64'}), '((batch_size, dim), minval=-1, maxval=1, dtype=tf.dtypes.\n float64)\n', (1138, 1208), True, 'import tensorflow as tf\n'), ((2446, 2485), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(r12 ** 2 + self.beta ** 2)'], {}), '(r12 ** 2 + self.beta ** 2)\n', (2458, 2485), True, 'import tensorflow as tf\n')]
|
"""
Demonstrates the hover functionality of mpldatacursor as well as point labels
and a custom formatting function. Notice that overlapping points have both
labels displayed.
"""
import string
import matplotlib.pyplot as plt
import numpy as np
from mpldatacursor import datacursor
np.random.seed(1977)
x, y = np.random.random((2, 26))
labels = string.ascii_lowercase
fig, ax = plt.subplots()
ax.scatter(x, y, s=200)
ax.set_title('Mouse over a point')
# Show only the point label and allow nicer formatting if points overlap
formatter = lambda **kwargs: ', '.join(kwargs['point_label'])
datacursor(hover=True, formatter=formatter, point_labels=labels)
plt.show()
|
[
"numpy.random.random",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"mpldatacursor.datacursor",
"matplotlib.pyplot.show"
] |
[((281, 301), 'numpy.random.seed', 'np.random.seed', (['(1977)'], {}), '(1977)\n', (295, 301), True, 'import numpy as np\n'), ((310, 335), 'numpy.random.random', 'np.random.random', (['(2, 26)'], {}), '((2, 26))\n', (326, 335), True, 'import numpy as np\n'), ((379, 393), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (391, 393), True, 'import matplotlib.pyplot as plt\n'), ((589, 653), 'mpldatacursor.datacursor', 'datacursor', ([], {'hover': '(True)', 'formatter': 'formatter', 'point_labels': 'labels'}), '(hover=True, formatter=formatter, point_labels=labels)\n', (599, 653), False, 'from mpldatacursor import datacursor\n'), ((655, 665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (663, 665), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
# import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
# def plothist(x):
# vmin = x.min()-1
# vmax = x.max()+1
# bins = np.arange(vmin, vmax, (vmax - vmin)/50)
# plt.hist(x, bins=bins)
# plt.show()
# def scatterpred(pred):
# plt.scatter(pred[:,0], pred[:,1])
# plt.show()
# def scatter_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 8)
# plt.scatter(c[:,0], c[:,1], color='r')
# plt.show()
def most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
assign[d.argmin()] += 1
return assign.argmax()
def mean_on_most_assigned(x, c):
nb_c = len(c)
assign = np.zeros(nb_c)
mean = np.zeros(c.shape)
for i in range(len(x)):
y = x[i].reshape((1,2))
d = np.sqrt(np.sum(np.power(y.repeat(nb_c, axis=0) - c, 2), axis=1))
idx = d.argmin()
assign[idx] += 1
mean[idx,:] += x[i]
idx = assign.argmax()
return mean[idx,:] / assign[idx]
# def best_kmeans(pred):
# plt.scatter(pred[:,0], pred[:,1], color='b')
# c,v = kmeans(pred, 3)
# plt.scatter(c[:,0], c[:,1], color='g')
# n = most_assigned(pred, c)
# plt.scatter(c[n,0], c[n,1], color='r')
# plt.show()
def clustering_joints(y_pred, k=3):
_,nb_spl,nb_joints,dim = y_pred.shape
y = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j]
c,v = kmeans(d, k)
n = most_assigned(d, c)
y[s,j,:] = c[n]
return y
def clustering_grid(y_pred, size=10):
_, nb_spl, nb_joints, dim = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, nb_joints, dim))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,j,:]
xmin = d[:,0].min()
ymin = d[:,1].min()
xmax = d[:,0].max()
ymax = d[:,1].max()
xstep = (xmax - xmin) / size
ystep = (ymax - ymin) / size
c = np.zeros((size * size, dim))
for x in range(size):
for y in range(size):
c[x + size*y, 0] = xmin + (x + 0.5) * xstep
c[x + size*y, 1] = ymin + (y + 0.5) * ystep
yp[s,j,:] = mean_on_most_assigned(d, c)
return yp
def mean_joints(y_pred):
_, nb_spl, dim, nb_joints = y_pred.shape
assert dim == 2
yp = np.zeros((nb_spl, dim, nb_joints))
for s in range(nb_spl):
for j in range(nb_joints):
d = y_pred[:,s,:,j]
yp[s, 0, j] = d[:,0].mean()
yp[s, 1, j] = d[:,1].mean()
return yp
|
[
"numpy.zeros",
"scipy.cluster.vq.kmeans"
] |
[((564, 578), 'numpy.zeros', 'np.zeros', (['nb_c'], {}), '(nb_c)\n', (572, 578), True, 'import numpy as np\n'), ((840, 854), 'numpy.zeros', 'np.zeros', (['nb_c'], {}), '(nb_c)\n', (848, 854), True, 'import numpy as np\n'), ((866, 883), 'numpy.zeros', 'np.zeros', (['c.shape'], {}), '(c.shape)\n', (874, 883), True, 'import numpy as np\n'), ((1494, 1528), 'numpy.zeros', 'np.zeros', (['(nb_spl, nb_joints, dim)'], {}), '((nb_spl, nb_joints, dim))\n', (1502, 1528), True, 'import numpy as np\n'), ((1843, 1877), 'numpy.zeros', 'np.zeros', (['(nb_spl, nb_joints, dim)'], {}), '((nb_spl, nb_joints, dim))\n', (1851, 1877), True, 'import numpy as np\n'), ((2594, 2628), 'numpy.zeros', 'np.zeros', (['(nb_spl, dim, nb_joints)'], {}), '((nb_spl, dim, nb_joints))\n', (2602, 2628), True, 'import numpy as np\n'), ((1640, 1652), 'scipy.cluster.vq.kmeans', 'kmeans', (['d', 'k'], {}), '(d, k)\n', (1646, 1652), False, 'from scipy.cluster.vq import kmeans\n'), ((2199, 2227), 'numpy.zeros', 'np.zeros', (['(size * size, dim)'], {}), '((size * size, dim))\n', (2207, 2227), True, 'import numpy as np\n')]
|
# Import required libraries
import cv2
from os.path import os, dirname
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import random
# List of categories (directories names)
CATEGORIES = ["bad_apple", "bad_grape", "bad_pear", "cherry", "good_apple", "good_avocado", "good_grape", "good_pear", "ripe_avocado"]
# Level 2 - display information about errors only
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Commented line = use GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Source folder path
main_dir = dirname(os.path.abspath(__file__))
# Paths to image database (train, test, all)
training_dir = os.path.join(main_dir, 'database', 'training')
testing_dir = os.path.join(main_dir, 'database', 'testing')
all_dir = os.path.join(main_dir, 'database', 'all')
# Currently used path
DATADIR = testing_dir
# Load all images and save them to array variable
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img))
break
break
# Variable to store training data
testing_data = []
# Function that converts previously created data array to a test data array
def create_testing_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
class_num = CATEGORIES.index(category)
for img in tqdm(os.listdir(path)):
try:
img_array = cv2.imread(os.path.join(path, img))
testing_data.append([img_array, class_num])
except Exception as e:
pass
# Call the function
create_testing_data()
# Shuffle test data
random.shuffle(testing_data)
# Create array variables to store objects and labels
X = []
y = []
# Save objects and labels to arrays
for features, label in testing_data:
X.append(features)
y.append(label)
# Convert arrays to NumPy matrices
X = np.array(X)
y = np.array(y)
# Change the value range from 0-255 to 0-1
X = X / 255.0
# Load the trained model from given path
keras_model_path = os.path.join(main_dir, 'models', 'test')
model = tf.keras.models.load_model(keras_model_path)
# Display model summary
model.summary()
# Display information about the effectiveness of test data classification
loss, acc = model.evaluate(X, y, verbose=2)
print('Accuracy: {:5.2f}%'.format(100 * acc))
print('Loss: {:5.2f}'.format(loss))
|
[
"random.shuffle",
"os.path.os.listdir",
"numpy.array",
"tensorflow.keras.models.load_model",
"os.path.os.path.join",
"os.path.os.path.abspath"
] |
[((618, 664), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""training"""'], {}), "(main_dir, 'database', 'training')\n", (630, 664), False, 'from os.path import os, dirname\n'), ((679, 724), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""testing"""'], {}), "(main_dir, 'database', 'testing')\n", (691, 724), False, 'from os.path import os, dirname\n'), ((735, 776), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""database"""', '"""all"""'], {}), "(main_dir, 'database', 'all')\n", (747, 776), False, 'from os.path import os, dirname\n'), ((1646, 1674), 'random.shuffle', 'random.shuffle', (['testing_data'], {}), '(testing_data)\n', (1660, 1674), False, 'import random\n'), ((1900, 1911), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1908, 1911), True, 'import numpy as np\n'), ((1916, 1927), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1924, 1927), True, 'import numpy as np\n'), ((2047, 2087), 'os.path.os.path.join', 'os.path.join', (['main_dir', '"""models"""', '"""test"""'], {}), "(main_dir, 'models', 'test')\n", (2059, 2087), False, 'from os.path import os, dirname\n'), ((2096, 2140), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['keras_model_path'], {}), '(keras_model_path)\n', (2122, 2140), True, 'import tensorflow as tf\n'), ((530, 555), 'os.path.os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (545, 555), False, 'from os.path import os, dirname\n'), ((912, 943), 'os.path.os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (924, 943), False, 'from os.path import os, dirname\n'), ((959, 975), 'os.path.os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (969, 975), False, 'from os.path import os, dirname\n'), ((1262, 1293), 'os.path.os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (1274, 1293), False, 'from os.path import os, dirname\n'), ((1008, 1031), 'os.path.os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1020, 1031), False, 'from os.path import os, dirname\n'), ((1365, 1381), 'os.path.os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1375, 1381), False, 'from os.path import os, dirname\n'), ((1440, 1463), 'os.path.os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1452, 1463), False, 'from os.path import os, dirname\n')]
|
"""Covers import of data downloaded from the
`Meadows online behavior platform <https://meadows-research.com/>`_.
For information on available file types see the meadows
`documentation on downloads <https://meadows-research.com/documentation\
/researcher/downloads/>`_.
"""
from os.path import basename
import numpy
from scipy.io import loadmat
from pyrsa.rdm.rdms import RDMs
def load_rdms(fpath, sort=True):
"""Read a Meadows results file and return any RDMs as a pyrsa object
Args:
fpath (str): path to .mat Meadows results file
sort (bool): whether to sort the RDM based on the stimulus names
Raises:
ValueError: Will raise an error if the file is missing an expected
variable. This can happen if the file does not contain MA task
data.
Returns:
RDMs: All rdms found in the data file as an RDMs object
"""
info = extract_filename_segments(fpath)
data = loadmat(fpath)
if info['participant_scope'] == 'single':
for var in ('stimuli', 'rdmutv'):
if var not in data:
raise ValueError(f'File missing variable: {var}')
utvs = data['rdmutv']
stimuli_fnames = data['stimuli']
pnames = [info['participant']]
else:
stim_vars = [v for v in data.keys() if v[:7] == 'stimuli']
stimuli_fnames = data[stim_vars[0]]
pnames = ['-'.join(v.split('_')[1:]) for v in stim_vars]
utv_vars = ['rdmutv_' + p.replace('-', '_') for p in pnames]
utvs = numpy.squeeze(numpy.stack([data[v] for v in utv_vars]))
desc_info_keys = (
'participant',
'task_index',
'task_name',
'experiment_name'
)
conds = [f.split('.')[0] for f in stimuli_fnames]
rdms = RDMs(
utvs,
dissimilarity_measure='euclidean',
descriptors={k: info[k] for k in desc_info_keys if k in info},
rdm_descriptors=dict(participants=pnames),
pattern_descriptors=dict(conds=conds),
)
if sort:
rdms.sort_by(conds='alpha')
return rdms
def extract_filename_segments(fpath):
"""Get information from the name of a downloaded results file
Will determine:
* participant_scope: 'single' or 'multiple', how many participant
sessions this file covers.
* task_scope: 'single' or 'multiple', how many experiment tasks this
file covers.
* participant: the Meadows nickname of the participant, if this is a
single participation file.
* task_index: the 1-based index of the task in the experiment, if
this is a single participant file.
* task_name: the name of the task in the experiment, if
this is not a single participant file.
* version: the experiment version as a string.
* experiment_name: name of the experiment on Meadows.
* structure: the structure of the data contained, one of 'tree',
'events', '1D', '2D', etc.
* filetype: the file extension and file format used to serialize the
data.
Args:
fpath (str): File system path to downloaded file
Returns:
dict: Dictionary with the fields described above.
"""
fname, ext = basename(fpath).split('.')
segments = fname.split('_')
info = dict(
task_scope='single',
version=segments[3].replace('v', ''),
experiment_name=segments[1],
structure=segments[-1],
filetype=ext
)
if segments[-2].isdigit():
info['participant_scope'] = 'single'
info['participant'] = segments[-3]
info['task_index'] = int(segments[-2])
else:
info['participant_scope'] = 'multiple'
info['task_name'] = segments[-2]
return info
|
[
"numpy.stack",
"scipy.io.loadmat",
"os.path.basename"
] |
[((948, 962), 'scipy.io.loadmat', 'loadmat', (['fpath'], {}), '(fpath)\n', (955, 962), False, 'from scipy.io import loadmat\n'), ((1543, 1583), 'numpy.stack', 'numpy.stack', (['[data[v] for v in utv_vars]'], {}), '([data[v] for v in utv_vars])\n', (1554, 1583), False, 'import numpy\n'), ((3258, 3273), 'os.path.basename', 'basename', (['fpath'], {}), '(fpath)\n', (3266, 3273), False, 'from os.path import basename\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @File : Qrbar_test.py
import cv2
import numpy as np
from pyzbar.pyzbar import decode
img = cv2.imread('qrcode.png')
for barcode in decode(img):
print(barcode.data.decode('utf-8'))
print(barcode.data)
pts = np.array([barcode.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
print(pts)
print(barcode.rect)
|
[
"pyzbar.pyzbar.decode",
"numpy.array",
"cv2.imread"
] |
[((141, 165), 'cv2.imread', 'cv2.imread', (['"""qrcode.png"""'], {}), "('qrcode.png')\n", (151, 165), False, 'import cv2\n'), ((181, 192), 'pyzbar.pyzbar.decode', 'decode', (['img'], {}), '(img)\n', (187, 192), False, 'from pyzbar.pyzbar import decode\n'), ((268, 305), 'numpy.array', 'np.array', (['[barcode.polygon]', 'np.int32'], {}), '([barcode.polygon], np.int32)\n', (276, 305), True, 'import numpy as np\n')]
|
# pommerman/cli/run_battle.py
# pommerman/agents/TensorFlowAgent/pit.py
import atexit
from datetime import datetime
import os
import random
import sys
import time
import argparse
import numpy as np
from pommerman import helpers, make
from TensorFlowAgent import TensorFlowAgent
from pommerman import utility
import tensorflow as tf
class Pit(object):
def __init__(self, tfa, saver, game_nums=2):
self.tfa = tfa
self.saver = saver
self.game_nums = game_nums
def launch_games(self, sess, render=True):
sess.run(tf.global_variables_initializer())
self.tfa.restore_weigths(sess, self.saver)
env = self.tfa.getEnv()
reward_board = np.zeros((1, 4))
for i in range(self.game_nums):
curr_state = env.reset()
while True:
if render:
env.render()
all_actions = env.act(curr_state)
next_state, reward, terminal, _ = env.step(all_actions)
if terminal:
reward_board += np.array(reward)
print("Game #{0}, rewards = {1}, reward agent = {2}".format(i, "".join(str(i) + " " for i in reward), reward[self.tfa.agent_id]))
break
def main(args):
tf.reset_default_graph()
with tf.Session() as sess:
tfa = TensorFlowAgent(name="TFA", args=args, sess=sess)
saver = tf.train.Saver(allow_empty=True)
pit = Pit(tfa, saver, game_nums=2)
pit.launch_games(sess)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--environment", type=str, default="pommerman")
parser.add_argument("--policy", type=str, default="MlpPolicy")
parser.add_argument("--checkpoint_dir", type=str, default="./save_model")
parser.add_argument("--a_learning_rate", type=float, default=0.0001)
parser.add_argument("--c_learning_rate", type=float, default=0.0002)
parser.add_argument('--summary_dir', type=str, default='./summary_log')
parser.add_argument("--cliprange", type=float, default=0.2)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--training_step", type=int, default=10)
parser.add_argument("--gamma", type=float, default=0.9)
parser.add_argument("--train", type=str, default="False", choices=["False"])
parser.add_argument("--type", type=str, default="Simple", choices=["Simple, CNN"])
args = parser.parse_args()
main(args)
|
[
"tensorflow.reset_default_graph",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"numpy.array",
"TensorFlowAgent.TensorFlowAgent",
"numpy.zeros"
] |
[((1290, 1314), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1312, 1314), True, 'import tensorflow as tf\n'), ((1580, 1605), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1603, 1605), False, 'import argparse\n'), ((701, 717), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (709, 717), True, 'import numpy as np\n'), ((1325, 1337), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1335, 1337), True, 'import tensorflow as tf\n'), ((1362, 1411), 'TensorFlowAgent.TensorFlowAgent', 'TensorFlowAgent', ([], {'name': '"""TFA"""', 'args': 'args', 'sess': 'sess'}), "(name='TFA', args=args, sess=sess)\n", (1377, 1411), False, 'from TensorFlowAgent import TensorFlowAgent\n'), ((1428, 1460), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'allow_empty': '(True)'}), '(allow_empty=True)\n', (1442, 1460), True, 'import tensorflow as tf\n'), ((559, 592), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (590, 592), True, 'import tensorflow as tf\n'), ((1073, 1089), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (1081, 1089), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
import unittest
from xcenternet.model.evaluation.overlap import compute_overlap
from xcenternet.model.evaluation.mean_average_precision import MAP
class TestMeanAveragePrecision(unittest.TestCase):
def setUp(self):
self.map_bboxes = np.array(
[
[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]],
[[0, 0, 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]],
],
dtype=np.float64,
)
self.map_labels = np.array([[0, 0, 1], [0, 0, 0]])
self.map_predictions = np.array(
[
[
[10, 40, 40, 90, 0.1, 0], # overlap 1.00 with bbox #2, low prob
[60, 10, 90, 60, 0.5, 0], # overlap 0.29 with bbox #1
[10, 30, 50, 90, 0.7, 0], # overlap 0.625 with bbox #2
[0, 0, 100, 90, 0.7, 1], # overlap 0.9 with bbox #3
[0, 0, 100, 80, 0.7, 1], # overlap 0.8 with bbox #3
],
[
[20, 20, 30, 50, 0.6, 0], # 0.21 overlap with #2
[2, 0, 10, 11, 0.8, 0], # overlap with #1
[0, 2, 14, 10, 0.9, 0], # overlap with #1
[0, 0, 10, 10, 0.7, 1], # no ground truth for 1
[80, 20, 100, 50, 0.1, 1], # no ground truth for 1
],
],
dtype=np.float32,
)
self.map_masks = np.array([[1, 1, 1], [1, 1, 1]], dtype=np.float32)
self.result_1 = {"overall": 3 / 4, "weighted": 2 / 3, "per_class": {0: (0.5, 2), 1: (1.0, 1)}}
self.result_both = {"overall": 2 / 3, "weighted": 4 / 9, "per_class": {0: (1 / 3, 5), 1: (1.0, 1)}}
def test_compute_overlap(self):
boxes1 = np.array([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)
boxes2 = np.array([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np.float64)
overlap = compute_overlap(boxes1, boxes2)
self.assertAlmostEqual(1.0, overlap[0][0])
self.assertAlmostEqual(6 / 11, overlap[0][1])
self.assertAlmostEqual(0.0, overlap[0][2])
self.assertAlmostEqual(0.5, overlap[1][0])
self.assertAlmostEqual(4 / 9, overlap[1][1])
self.assertAlmostEqual(0.0, overlap[1][2])
def test_map_update_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state(self.map_predictions[0], self.map_bboxes[0], self.map_labels[0])
mean_average_precision.update_state(self.map_predictions[1], self.map_bboxes[1], self.map_labels[1])
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def test_map_update_batch_one(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant([self.map_predictions[0]]),
tf.constant([self.map_bboxes[0]]),
tf.constant([self.map_labels[0]]),
tf.constant([self.map_masks[0]]),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_1)
def test_map_update_batch_both(self):
mean_average_precision = MAP(2, iou_threshold=0.5, score_threshold=0.3)
mean_average_precision.update_state_batch(
tf.constant(self.map_predictions),
tf.constant(self.map_bboxes),
tf.constant(self.map_labels),
tf.constant(self.map_masks),
)
result = mean_average_precision.result()
self._assert_map(result, self.result_both)
def _assert_map(self, first, second):
self.assertAlmostEqual(first["overall"], second["overall"])
self.assertAlmostEqual(first["weighted"], second["weighted"])
self.assertAlmostEqual(first["per_class"][0][0], second["per_class"][0][0]) # mAP
self.assertAlmostEqual(first["per_class"][0][1], second["per_class"][0][1]) # num objects
self.assertAlmostEqual(first["per_class"][1][0], second["per_class"][1][0]) # mAP
self.assertAlmostEqual(first["per_class"][1][1], second["per_class"][1][1]) # num objects
if __name__ == "__main__":
unittest.main()
|
[
"numpy.array",
"tensorflow.constant",
"unittest.main",
"xcenternet.model.evaluation.overlap.compute_overlap",
"xcenternet.model.evaluation.mean_average_precision.MAP"
] |
[((4629, 4644), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4642, 4644), False, 'import unittest\n'), ((291, 435), 'numpy.array', 'np.array', (['[[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]], [[0, 0, 10, 10], [\n 20, 20, 40, 90], [80, 20, 100, 50]]]'], {'dtype': 'np.float64'}), '([[[20, 10, 80, 60], [10, 40, 40, 90], [0, 0, 100, 100]], [[0, 0, \n 10, 10], [20, 20, 40, 90], [80, 20, 100, 50]]], dtype=np.float64)\n', (299, 435), True, 'import numpy as np\n'), ((539, 571), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 1], [0, 0, 0]])\n', (547, 571), True, 'import numpy as np\n'), ((603, 901), 'numpy.array', 'np.array', (['[[[10, 40, 40, 90, 0.1, 0], [60, 10, 90, 60, 0.5, 0], [10, 30, 50, 90, 0.7,\n 0], [0, 0, 100, 90, 0.7, 1], [0, 0, 100, 80, 0.7, 1]], [[20, 20, 30, 50,\n 0.6, 0], [2, 0, 10, 11, 0.8, 0], [0, 2, 14, 10, 0.9, 0], [0, 0, 10, 10,\n 0.7, 1], [80, 20, 100, 50, 0.1, 1]]]'], {'dtype': 'np.float32'}), '([[[10, 40, 40, 90, 0.1, 0], [60, 10, 90, 60, 0.5, 0], [10, 30, 50,\n 90, 0.7, 0], [0, 0, 100, 90, 0.7, 1], [0, 0, 100, 80, 0.7, 1]], [[20, \n 20, 30, 50, 0.6, 0], [2, 0, 10, 11, 0.8, 0], [0, 2, 14, 10, 0.9, 0], [0,\n 0, 10, 10, 0.7, 1], [80, 20, 100, 50, 0.1, 1]]], dtype=np.float32)\n', (611, 901), True, 'import numpy as np\n'), ((1500, 1550), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1]]'], {'dtype': 'np.float32'}), '([[1, 1, 1], [1, 1, 1]], dtype=np.float32)\n', (1508, 1550), True, 'import numpy as np\n'), ((1817, 1881), 'numpy.array', 'np.array', (['[[10, 10, 30, 50], [10, 10, 30, 30]]'], {'dtype': 'np.float64'}), '([[10, 10, 30, 50], [10, 10, 30, 30]], dtype=np.float64)\n', (1825, 1881), True, 'import numpy as np\n'), ((1899, 1988), 'numpy.array', 'np.array', (['[[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]]'], {'dtype': 'np.float64'}), '([[10, 10, 30, 50], [10, 10, 40, 40], [100, 70, 110, 90]], dtype=np\n .float64)\n', (1907, 1988), True, 'import numpy as np\n'), ((2003, 2034), 'xcenternet.model.evaluation.overlap.compute_overlap', 'compute_overlap', (['boxes1', 'boxes2'], {}), '(boxes1, boxes2)\n', (2018, 2034), False, 'from xcenternet.model.evaluation.overlap import compute_overlap\n'), ((2415, 2461), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (2418, 2461), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((2739, 2785), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (2742, 2785), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3180, 3226), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (3183, 3226), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3654, 3700), 'xcenternet.model.evaluation.mean_average_precision.MAP', 'MAP', (['(2)'], {'iou_threshold': '(0.5)', 'score_threshold': '(0.3)'}), '(2, iou_threshold=0.5, score_threshold=0.3)\n', (3657, 3700), False, 'from xcenternet.model.evaluation.mean_average_precision import MAP\n'), ((3290, 3328), 'tensorflow.constant', 'tf.constant', (['[self.map_predictions[0]]'], {}), '([self.map_predictions[0]])\n', (3301, 3328), True, 'import tensorflow as tf\n'), ((3342, 3375), 'tensorflow.constant', 'tf.constant', (['[self.map_bboxes[0]]'], {}), '([self.map_bboxes[0]])\n', (3353, 3375), True, 'import tensorflow as tf\n'), ((3389, 3422), 'tensorflow.constant', 'tf.constant', (['[self.map_labels[0]]'], {}), '([self.map_labels[0]])\n', (3400, 3422), True, 'import tensorflow as tf\n'), ((3436, 3468), 'tensorflow.constant', 'tf.constant', (['[self.map_masks[0]]'], {}), '([self.map_masks[0]])\n', (3447, 3468), True, 'import tensorflow as tf\n'), ((3764, 3797), 'tensorflow.constant', 'tf.constant', (['self.map_predictions'], {}), '(self.map_predictions)\n', (3775, 3797), True, 'import tensorflow as tf\n'), ((3811, 3839), 'tensorflow.constant', 'tf.constant', (['self.map_bboxes'], {}), '(self.map_bboxes)\n', (3822, 3839), True, 'import tensorflow as tf\n'), ((3853, 3881), 'tensorflow.constant', 'tf.constant', (['self.map_labels'], {}), '(self.map_labels)\n', (3864, 3881), True, 'import tensorflow as tf\n'), ((3895, 3922), 'tensorflow.constant', 'tf.constant', (['self.map_masks'], {}), '(self.map_masks)\n', (3906, 3922), True, 'import tensorflow as tf\n')]
|
import os.path
from typing import Sequence, Optional, Dict
import numpy as np
import pandas as pd
from nk_sent2vec import Sent2Vec as _Sent2Vec
from d3m import container, utils
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams, base as metadata_base, params
__author__ = "Distil"
__version__ = "1.3.0"
__contact__ = "mailto:<EMAIL>"
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
class Sent2VecPrimitive(TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs : Input pandas dataframe
Returns
-------
Outputs
The output is a pandas dataframe
"""
metadata = metadata_base.PrimitiveMetadata(
{
# Simply an UUID generated once and fixed forever. Generated using "uuid.uuid4()".
"id": "cf450079-9333-4a3f-aed4-b77a4e8c7be7",
"version": __version__,
"name": "sent2vec_wrapper",
# Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.
"keywords": ["Sent2Vec", "Embedding", "NLP", "Natural Language Processing"],
"source": {
"name": __author__,
"contact": __contact__,
"uris": [
# Unstructured URIs.
"https://github.com/kungfuai/d3m-primitives"
],
},
# A list of dependencies in order. These can be Python packages, system packages, or Docker images.
# Of course Python packages can also have their own dependencies, but sometimes it is necessary to
# install a Python package first to be even able to run setup.py of another package. Or you have
# a dependency which is not on PyPi.
"installation": [
{"type": "PIP", "package": "cython", "version": "0.29.16"},
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/kungfuai/d3m-primitives.git@{git_commit}#egg=kf-d3m-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
{
"type": "FILE",
"key": "sent2vec_model",
"file_uri": "http://public.datadrivendiscovery.org/twitter_bigrams.bin",
"file_digest": "9e8ccfea2aaa4435ca61b05b11b60e1a096648d56fff76df984709339f423dd6",
},
],
# The same path the primitive is registered with entry points in setup.py.
"python_path": "d3m.primitives.feature_extraction.nk_sent2vec.Sent2Vec",
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.VECTORIZATION],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_EXTRACTION,
}
)
# class instance to avoid unnecessary re-init on subsequent produce calls
_vectorizer: Optional[_Sent2Vec] = None
def __init__(
self,
*,
hyperparams: Hyperparams,
random_seed: int = 0,
volumes: Dict[str, str] = None
) -> None:
super().__init__(
hyperparams=hyperparams, random_seed=random_seed, volumes=volumes
)
self.volumes = volumes
def produce(
self, *, inputs: Inputs, timeout: float = None, iterations: int = None
) -> CallResult[Outputs]:
"""
Produce numerical representations (features) for short texts or sentences.
Parameters
----------
inputs : Input pandas dataframe
Returns
-------
Outputs
The output is a pandas dataframe
"""
# figure out columns to operate on
cols = self._get_operating_columns(inputs, self.hyperparams['use_columns'], ('http://schema.org/Text',))
frame = inputs.iloc[:, cols]
outputs = inputs.copy()
try:
# lazy load the model and keep it around for subsequent produce calls
if Sent2VecPrimitive._vectorizer is None:
Sent2VecPrimitive._vectorizer = _Sent2Vec(path=self.volumes["sent2vec_model"])
output_vectors = []
for col in range(frame.shape[1]):
text = frame.iloc[:, col].tolist()
embedded_sentences = Sent2VecPrimitive._vectorizer.embed_sentences(sentences=text)
output_vectors.append(embedded_sentences)
embedded_df = pd.DataFrame(np.array(output_vectors).reshape(len(embedded_sentences), -1))
except ValueError:
# just return inputs with file names deleted if vectorizing fails
return CallResult(outputs)
# create df with vectorized columns and append to input df
embedded_df = d3m_DataFrame(embedded_df)
for col in range(embedded_df.shape[1]):
col_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, col)))
col_dict['structural_type'] = type(1.0)
col_dict['name'] = "vector_" + str(col)
col_dict["semantic_types"] = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute",
)
embedded_df.metadata = embedded_df.metadata.update(
(metadata_base.ALL_ELEMENTS, col), col_dict
)
df_dict = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict_1 = dict(embedded_df.metadata.query((metadata_base.ALL_ELEMENTS, )))
df_dict['dimension'] = df_dict_1
df_dict_1['name'] = 'columns'
df_dict_1['semantic_types'] = ('https://metadata.datadrivendiscovery.org/types/TabularColumn',)
df_dict_1['length'] = embedded_df.shape[1]
embedded_df.metadata = embedded_df.metadata.update((metadata_base.ALL_ELEMENTS,), df_dict)
return CallResult(outputs.append_columns(embedded_df))
@classmethod
def _get_operating_columns(cls, inputs: container.DataFrame, use_columns: Sequence[int],
semantic_types: Sequence[str], require_attribute: bool = True) -> Sequence[int]:
# use caller supplied columns if supplied
cols = set(use_columns)
type_cols = set(inputs.metadata.list_columns_with_semantic_types(semantic_types))
if require_attribute:
attributes = set(inputs.metadata.list_columns_with_semantic_types(('https://metadata.datadrivendiscovery.org/types/Attribute',)))
type_cols = type_cols & attributes
if len(cols) > 0:
cols = type_cols & cols
else:
cols = type_cols
return list(cols)
|
[
"d3m.primitive_interfaces.base.CallResult",
"numpy.array",
"nk_sent2vec.Sent2Vec",
"d3m.container.DataFrame"
] |
[((5704, 5730), 'd3m.container.DataFrame', 'd3m_DataFrame', (['embedded_df'], {}), '(embedded_df)\n', (5717, 5730), True, 'from d3m.container import DataFrame as d3m_DataFrame\n'), ((5034, 5080), 'nk_sent2vec.Sent2Vec', '_Sent2Vec', ([], {'path': "self.volumes['sent2vec_model']"}), "(path=self.volumes['sent2vec_model'])\n", (5043, 5080), True, 'from nk_sent2vec import Sent2Vec as _Sent2Vec\n'), ((5594, 5613), 'd3m.primitive_interfaces.base.CallResult', 'CallResult', (['outputs'], {}), '(outputs)\n', (5604, 5613), False, 'from d3m.primitive_interfaces.base import CallResult\n'), ((5407, 5431), 'numpy.array', 'np.array', (['output_vectors'], {}), '(output_vectors)\n', (5415, 5431), True, 'import numpy as np\n')]
|
import torch
from torch.optim import lr_scheduler
from tqdm import tqdm
from torchsummary import summary
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from loss import dice
from pathlib import Path
from data import CaseDataset, load_case, save_pred, \
orient_crop_case, regions_crop_case, resample_normalize_case
import nibabel as nib
import numpy as np
import scipy.special as spe
from transform import pad, crop_pad, to_numpy, to_tensor, resize
def predict_per_patch(input,
model,
num_classes=3,
patch_size=(96, 96, 96),
step_per_patch=4,
verbose=True,
one_hot=False):
device = next(model.parameters()).device
# add padding if patch is larger than input shape
origial_shape = input.shape[:3]
input = pad(input, patch_size)
padding_shape = input.shape[:3]
coord_start = np.array([i // 2 for i in patch_size])
coord_end = np.array([padding_shape[i] - patch_size[i] // 2
for i in range(len(patch_size))])
num_steps = np.ceil([(coord_end[i] - coord_start[i]) / (patch_size[i] / step_per_patch)
for i in range(3)])
step_size = np.array([(coord_end[i] - coord_start[i]) / (num_steps[i] + 1e-8)
for i in range(3)])
step_size[step_size == 0] = 9999999
x_steps = np.arange(coord_start[0], coord_end[0] + 1e-8, step_size[0], dtype=np.int)
y_steps = np.arange(coord_start[1], coord_end[1] + 1e-8, step_size[1], dtype=np.int)
z_steps = np.arange(coord_start[2], coord_end[2] + 1e-8, step_size[2], dtype=np.int)
result = torch.zeros([num_classes] + list(padding_shape)).to(device)
result_n = torch.zeros_like(result).to(device)
if verbose:
print('Image Shape: {} Patch Size: {}'.format(padding_shape, patch_size))
print('X step: %d Y step: %d Z step: %d' %
(len(x_steps), len(y_steps), len(z_steps)))
# W H D C => C W H D => N C W H D for model input
input = torch.from_numpy(to_tensor(input)[None]).to(device)
patchs_slices = []
for x in x_steps:
x_mix = x - patch_size[0] // 2
x_max = x + patch_size[0] // 2
for y in y_steps:
y_min = y - patch_size[1] // 2
y_max = y + patch_size[1] // 2
for z in z_steps:
z_min = z - patch_size[2] // 2
z_max = z + patch_size[2] // 2
patchs_slices.append([slice(x_mix, x_max),
slice(y_min, y_max),
slice(z_min, z_max)])
# predict loop
predict_loop = tqdm(patchs_slices) if verbose else patchs_slices
model.eval()
with torch.no_grad():
for slices in predict_loop:
output = model(input[[slice(None), slice(None)]+slices])
if num_classes == 1:
output = torch.sigmoid(output)
else:
output = torch.softmax(output, dim=1)
result[[slice(None)]+slices] += output[0]
result_n[[slice(None)]+slices] += 1
# merge all patchs
if verbose:
print('Merging all patchs...')
result = result / result_n
if one_hot:
result = to_numpy(result.cpu().numpy()).astype(np.float32)
else:
if num_classes == 1:
result = torch.squeeze(result, dim=0)
else:
result = torch.softmax(result, dim=0)
result = torch.argmax(result, axis=0)
result = np.round(result.cpu().numpy()).astype(np.uint8)
return crop_pad(result, origial_shape)
def predict_case(case,
model,
target_spacing,
normalize_stats,
num_classes=3,
patch_size=(96, 96, 96),
step_per_patch=4,
verbose=True,
one_hot=False):
orig_shape = case['image'].shape[:-1]
affine = case['affine']
# resample case for predict
if verbose:
print('Resampling the case for prediction...')
case_ = resample_normalize_case(case, target_spacing, normalize_stats)
if verbose:
print('Predicting the case...')
pred = predict_per_patch(case_['image'],
model,
num_classes,
patch_size,
step_per_patch,
verbose,
one_hot)
if verbose:
print('Resizing the case to origial shape...')
case['pred'] = resize(pred, orig_shape, is_label=one_hot is False)
case['affine'] = affine
if verbose:
print('All done!')
return case
def batch_predict_case(load_dir,
save_dir,
model,
target_spacing,
normalize_stats,
num_classes=3,
patch_size=(240, 240, 80),
step_per_patch=4,
data_range=None):
load_dir = Path(load_dir)
cases = CaseDataset(load_dir, load_meta=True)
if data_range is None:
data_range = range(len(cases))
for i in tqdm(data_range):
case = predict_case(cases[i],
model,
target_spacing,
normalize_stats,
num_classes,
patch_size,
step_per_patch,
False)
save_pred(case, save_dir)
def cascade_predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
verbose=True):
if verbose:
print('Predicting the rough shape for further prediction...')
case = predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
1,
coarse_patch_size,
step_per_patch,
verbose=verbose)
regions = regions_crop_case(case, region_threshold, crop_padding, 'pred')
num_classes = detail_model.out_channels
orig_shape = case['image'].shape[:-1]
result = np.zeros(list(orig_shape)+[num_classes])
result_n = np.zeros_like(result)
if verbose:
print('Cropping regions (%d)...' % len(regions))
for idx, region in enumerate(regions):
bbox = region['bbox']
shape = region['image'].shape[:-1]
if verbose:
print('Region {} {} predicting...'.format(idx, shape))
region = predict_case(region,
detail_model,
detail_target_spacing,
detail_normalize_stats,
num_classes,
detail_patch_size,
step_per_patch,
verbose=verbose,
one_hot=True)
region_slices = []
result_slices = []
for i in range(len(bbox)):
region_slice_min = 0 + max(0 - bbox[i][0], 0)
region_slice_max = shape[i] - max(bbox[i][1] - orig_shape[i], 0)
region_slices.append(slice(region_slice_min, region_slice_max))
origin_slice_min = max(bbox[i][0], 0)
origin_slice_max = min(bbox[i][1], orig_shape[i])
result_slices.append(slice(origin_slice_min, origin_slice_max))
region_slices.append(slice(None))
result_slices.append(slice(None))
result[result_slices] += region['pred'][region_slices]
result_n[result_slices] += 1
if verbose:
print('Merging all regions...')
# avoid orig_pred_n = 0
mask = np.array(result_n > 0)
result[mask] = result[mask] / result_n[mask]
if num_classes == 1:
result = np.squeeze(result, axis=-1)
result = np.around(result)
else:
result = spe.softmax(result, axis=-1)
result = np.argmax(result, axis=-1)
case['pred'] = result.astype(np.uint8)
if verbose:
print('All done!')
return case
def cascade_predict(image_file,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air=-200,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
label_file=None,
verbose=True):
orig_case = load_case(image_file, label_file)
case = orient_crop_case(orig_case, air)
case = cascade_predict_case(case,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
num_classes,
step_per_patch,
region_threshold,
crop_padding,
verbose)
orient = nib.orientations.io_orientation(orig_case['affine'])
indices = orient[:, 0].astype(np.int)
orig_shape = np.array(orig_case['image'].shape[:3])
orig_shape = np.take(orig_shape, indices)
bbox = case['bbox']
orig_pred = np.zeros(orig_shape, dtype=np.uint8)
result_slices = []
for i in range(len(bbox)):
orig_slice_min = max(bbox[i][0], 0)
orig_slice_max = min(bbox[i][1], orig_shape[i])
result_slices.append(slice(orig_slice_min, orig_slice_max))
orig_pred[result_slices] = case['pred']
# orient
orig_case['pred'] = nib.orientations.apply_orientation(orig_pred, orient)
if len(orig_case['image'].shape) == 3:
orig_case['image'] = np.expand_dims(orig_case['image'], -1)
return orig_case
def batch_cascade_predict(image_dir,
save_dir,
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air=-200,
num_classes=3,
step_per_patch=4,
region_threshold=10000,
crop_padding=20,
data_range=None):
image_dir = Path(image_dir)
image_files = [path for path in sorted(image_dir.iterdir()) if path.is_file()]
if data_range is None:
data_range = range(len(image_files))
for i in tqdm(data_range):
case = cascade_predict(image_files[i],
coarse_model,
coarse_target_spacing,
coarse_normalize_stats,
coarse_patch_size,
detail_model,
detail_target_spacing,
detail_normalize_stats,
detail_patch_size,
air,
num_classes,
step_per_patch,
region_threshold,
crop_padding,
None,
False)
save_pred(case, save_dir)
def evaluate_case(case):
num_classes = case['label'].max()
evaluate_result = []
for c in range(num_classes):
pred = np.array(case['pred'] == c+1).astype(np.float32)
label = np.array(case['label'] == c+1).astype(np.float32)
dsc = dice(torch.tensor(pred), torch.tensor(label)).item()
evaluate_result.append(dsc)
return evaluate_result
def evaluate(label_file, pred_file):
label_nib = nib.load(str(label_file))
pred_nib = nib.load(str(pred_file))
case = {}
case['label'] = label_nib.get_fdata().astype(np.uint8)
case['pred'] = pred_nib.get_fdata().astype(np.uint8)
evaluate_result = evaluate_case(case)
return evaluate_result
def batch_evaluate(label_dir, pred_dir, data_range=None):
label_dir = Path(label_dir)
pred_dir = Path(pred_dir)
label_files = sorted(list(label_dir.glob('*.nii.gz')))
pred_files = sorted(list(pred_dir.glob('*.nii.gz')))
if data_range is None:
data_range = range(len(label_files))
evaluate_results = []
par = tqdm(data_range)
for i in par:
evaluate_result = evaluate(label_files[i], pred_files[i])
evaluate_results.append(evaluate_result)
evaluate_dict = {}
for idx, e in enumerate(evaluate_result):
evaluate_dict["label_%d" % (idx+1)] = e
par.set_description("Case %d" % i)
par.set_postfix(evaluate_dict)
print('\nThe mean dsc of each label:')
means = np.array(evaluate_results).mean(axis=0)
for i, mean in enumerate(means):
print("label_%d: %f" % (i+1, mean))
return evaluate_results
class Subset(torch.utils.data.Subset):
def __init__(self, dataset, indices, transform):
super(Subset, self).__init__(dataset, indices)
self.transform = transform
def __getitem__(self, idx):
case = self.dataset[self.indices[idx]]
if self.transform:
case = self.transform(case)
return case
class Trainer():
def __init__(self,
model,
optimizer,
loss,
dataset,
batch_size=10,
dataloader_kwargs={'num_workers': 2,
'pin_memory': True},
valid_split=0.2,
num_samples=None,
metrics=None,
scheduler=None,
train_transform=None,
valid_transform=None):
self.model = model
self.optimizer = optimizer
self.loss = loss
self.dataset = dataset
self.metrics = metrics
self.scheduler = scheduler
self.train_transform = train_transform
self.valid_transform = valid_transform
dataset_size = len(self.dataset)
indices = list(range(dataset_size))
split = int(np.floor(valid_split * dataset_size))
np.random.shuffle(indices)
self.train_indices = indices[split:]
self.valid_indices = indices[:split]
self.dataloader_kwargs = {'batch_size': batch_size, **dataloader_kwargs}
self.num_samples = num_samples
self.valid_split = valid_split
self.device = next(model.parameters()).device
self.best_result = {'loss': float('inf')}
self.current_epoch = 0
self.patience_counter = 0
self.amp_state_dict = None
def get_lr(self, idx=0):
return self.optimizer.param_groups[idx]['lr']
def set_lr(self, lr, idx=0):
self.optimizer.param_groups[idx]['lr'] = lr
def summary(self, input_shape):
return summary(self.model, input_shape)
def batch_loop(self, data_loader, is_train=True):
results = []
self.progress_bar.reset(len(data_loader))
desc = "Epoch %d/%d (LR %.2g)" % (self.current_epoch+1,
self.num_epochs,
self.get_lr())
self.progress_bar.set_description(desc)
for batch_idx, batch in enumerate(data_loader):
x = batch['image'].to(self.device)
y = batch['label'].to(self.device)
# forward
if is_train:
self.model.train()
y_pred = self.model(x)
else:
self.model.eval()
with torch.no_grad():
y_pred = self.model(x)
loss = self.loss(y_pred, y)
# backward
if is_train:
self.optimizer.zero_grad()
if self.use_amp:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.optimizer.step()
result = {'loss': loss.item()}
# calc the other metrics
if self.metrics is not None:
for key, metric_fn in self.metrics.items():
result[key] = metric_fn(y_pred, y).item()
if not torch.isnan(loss):
results.append(result)
self.progress_bar.set_postfix(result)
self.progress_bar.update()
mean_result = {}
for key in results[0].keys():
mean_result[key] = np.mean(np.array([x[key] for x in results]))
name = 'train' if is_train else 'valid'
if self.save_dir is not None:
writer = SummaryWriter(self.save_dir)
for key in mean_result.keys():
writer.add_scalar('%s/%s' % (key, name),
mean_result[key],
self.current_epoch)
writer.close()
return mean_result
def fit(self,
num_epochs=10,
save_dir=None,
use_amp=False,
opt_level='O1'):
# ----------------------
# initialize
# ----------------------
self.num_epochs = num_epochs
self.use_amp = use_amp
self.save_dir = save_dir
if use_amp:
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level=opt_level)
if self.amp_state_dict is not None:
amp.load_state_dict(self.amp_state_dict)
self.progress_bar = tqdm(total=0)
# ----------------------
# prepare data
# ----------------------
train_set = Subset(self.dataset, self.train_indices, self.train_transform)
if self.num_samples is not None:
sampler = torch.utils.data.RandomSampler(train_set, True, self.num_samples)
train_loader = torch.utils.data.DataLoader(train_set,
sampler=sampler,
**self.dataloader_kwargs)
else:
train_loader = torch.utils.data.DataLoader(train_set,
shuffle=True,
**self.dataloader_kwargs)
if len(self.valid_indices) > 0:
valid_set = Subset(self.dataset, self.valid_indices, self.valid_transform)
if self.num_samples is not None:
num_samples = round(self.num_samples * self.valid_split)
sampler = torch.utils.data.RandomSampler(valid_set, True, num_samples)
valid_loader = torch.utils.data.DataLoader(valid_set,
sampler=sampler,
**self.dataloader_kwargs)
else:
valid_loader = torch.utils.data.DataLoader(valid_set,
**self.dataloader_kwargs)
else:
valid_loader = None
# ----------------------
# main loop
# ----------------------
for epoch in range(self.current_epoch, num_epochs):
self.current_epoch = epoch
# train loop
result = self.batch_loop(train_loader, is_train=True)
# vaild loop
if valid_loader is not None:
result = self.batch_loop(valid_loader, is_train=False)
# build-in fn: lr_scheduler
if self.scheduler is not None:
if isinstance(self.scheduler, lr_scheduler.ReduceLROnPlateau):
self.scheduler.step(result['loss'])
else:
self.scheduler.step()
# save best
if result['loss'] < self.best_result['loss']-1e-3:
self.best_result = result
if save_dir is not None:
self.save_checkpoint(save_dir+'-best.pt')
if save_dir is not None:
self.save_checkpoint(save_dir+'-last.pt')
self.progress_bar.close()
def save_checkpoint(self, file_path):
checkpoint = {'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'train_indices': self.train_indices,
'valid_indices': self.valid_indices,
'best_result': self.best_result}
if self.scheduler is not None:
checkpoint['scheduler_state_dict'] = self.scheduler.state_dict()
if self.use_amp:
checkpoint['amp_state_dict'] = amp.state_dict()
torch.save(checkpoint, file_path)
def load_checkpoint(self, file_path):
checkpoint = torch.load(file_path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.current_epoch = checkpoint['current_epoch']+1
self.train_indices = checkpoint['train_indices']
self.valid_indices = checkpoint['valid_indices']
self.best_result = checkpoint['best_result']
if 'amp_state_dict' in checkpoint:
self.amp_state_dict = checkpoint['amp_state_dict']
if 'scheduler_state_dict' in checkpoint and self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
# cross valid
# elif num_folds > 1:
# # split the dataset into k-fold
# fold_len = len(dataset) // num_folds
# fold_len_list = []
# for i in range(num_folds-1):
# fold_len_list.append(fold_len)
# fold_len_list.append(len(dataset)-fold_len * (num_folds-1))
# fold_subsets = torch.utils.data.random_split(dataset, fold_len_list)
# fold_metrics = []
# avg_metrics = {}
# self.save('init.pt')
# for i, fold_subset in enumerate(fold_subsets):
# train_subsets = fold_subsets.copy()
# train_subsets.remove(fold_subset)
# train_subset = torch.utils.data.ConcatDataset(train_subsets)
# train_set = DatasetFromSubset(train_subset, tr_transform)
# valid_set = DatasetFromSubset(fold_subset, vd_transform)
# print('Fold %d/%d:' % (i+1, num_folds))
# self.load('init.pt')
# train_kwargs['log_dir'] = '%s_%d' % (log_dir, i)
# metrics = self.train(train_set, valid_set, **train_kwargs)
# fold_metrics.append(metrics)
# # calc the avg
# for name in fold_metrics[0].keys():
# sum_metric = 0
# for fold_metric in fold_metrics:
# sum_metric += fold_metric[name]
# avg_metrics[name] = sum_metric / num_folds
# for i, fold_metric in enumerate(fold_metrics):
# print('Fold %d metrics:\t%s' %
# (i+1, self.metrics_stringify(fold_metric)))
# print('Avg metrics:\t%s' % self.metrics_stringify(avg_metrics))
# manual ctrl @lr_factor @min_lr @patience
# if metrics['Loss'] < best_metrics['Loss']-1e-4:
# if save_dir and save_best:
# self.save(save_dir+'-best.pt')
# best_metrics = metrics
# patience_counter = 0
# elif patience > 0:
# patience_counter += 1
# if patience_counter > patience:
# print("│\n├Loss stopped improving for %d num_epochs." %
# patience_counter)
# patience_counter = 0
# lr = self.get_lr() * lr_factor
# if min_lr and lr < min_lr:
# print("│LR below the min LR, stop training.")
# break
# else:
# print('│Reduce LR to %.3g' % lr)
# self.set_lr(lr)
# def get_lr(self):
# for param_group in self.optimizer.param_groups:
# return param_group['lr']
# def set_lr(self, lr):
# for param_group in self.optimizer.param_groups:
# param_group['lr'] = lr
# # save best & early_stop_patience counter
# if result['loss'] < self.best_result['loss']-1e-3:
# self.best_result = result
# self.patience_counter = 0
# if save_dir and save_best:
# self.save_checkpoint(save_dir+'-best.pt')
# elif early_stop_patience > 0:
# self.patience_counter += 1
# if self.patience_counter > early_stop_patience:
# print(("\nLoss stopped improving for %d num_epochs. "
# "stop training.") % self.patience_counter)
# self.patience_counter = 0
# break
|
[
"apex.amp.scale_loss",
"torch.softmax",
"numpy.array",
"apex.amp.initialize",
"transform.crop_pad",
"torch.squeeze",
"transform.pad",
"numpy.arange",
"torch.isnan",
"torch.utils.tensorboard.SummaryWriter",
"apex.amp.load_state_dict",
"pathlib.Path",
"data.resample_normalize_case",
"numpy.take",
"data.save_pred",
"torch.zeros_like",
"torchsummary.summary",
"apex.amp.state_dict",
"torch.argmax",
"nibabel.orientations.apply_orientation",
"transform.resize",
"numpy.random.shuffle",
"numpy.floor",
"numpy.argmax",
"numpy.squeeze",
"numpy.around",
"torch.save",
"data.load_case",
"transform.to_tensor",
"data.orient_crop_case",
"nibabel.orientations.io_orientation",
"data.regions_crop_case",
"tqdm.tqdm",
"torch.load",
"torch.sigmoid",
"torch.utils.data.RandomSampler",
"torch.tensor",
"numpy.zeros",
"numpy.expand_dims",
"torch.utils.data.DataLoader",
"data.CaseDataset",
"torch.no_grad",
"numpy.zeros_like",
"scipy.special.softmax"
] |
[((881, 903), 'transform.pad', 'pad', (['input', 'patch_size'], {}), '(input, patch_size)\n', (884, 903), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((958, 998), 'numpy.array', 'np.array', (['[(i // 2) for i in patch_size]'], {}), '([(i // 2) for i in patch_size])\n', (966, 998), True, 'import numpy as np\n'), ((1441, 1516), 'numpy.arange', 'np.arange', (['coord_start[0]', '(coord_end[0] + 1e-08)', 'step_size[0]'], {'dtype': 'np.int'}), '(coord_start[0], coord_end[0] + 1e-08, step_size[0], dtype=np.int)\n', (1450, 1516), True, 'import numpy as np\n'), ((1530, 1605), 'numpy.arange', 'np.arange', (['coord_start[1]', '(coord_end[1] + 1e-08)', 'step_size[1]'], {'dtype': 'np.int'}), '(coord_start[1], coord_end[1] + 1e-08, step_size[1], dtype=np.int)\n', (1539, 1605), True, 'import numpy as np\n'), ((1619, 1694), 'numpy.arange', 'np.arange', (['coord_start[2]', '(coord_end[2] + 1e-08)', 'step_size[2]'], {'dtype': 'np.int'}), '(coord_start[2], coord_end[2] + 1e-08, step_size[2], dtype=np.int)\n', (1628, 1694), True, 'import numpy as np\n'), ((3653, 3684), 'transform.crop_pad', 'crop_pad', (['result', 'origial_shape'], {}), '(result, origial_shape)\n', (3661, 3684), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((4160, 4222), 'data.resample_normalize_case', 'resample_normalize_case', (['case', 'target_spacing', 'normalize_stats'], {}), '(case, target_spacing, normalize_stats)\n', (4183, 4222), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((4655, 4706), 'transform.resize', 'resize', (['pred', 'orig_shape'], {'is_label': '(one_hot is False)'}), '(pred, orig_shape, is_label=one_hot is False)\n', (4661, 4706), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((5157, 5171), 'pathlib.Path', 'Path', (['load_dir'], {}), '(load_dir)\n', (5161, 5171), False, 'from pathlib import Path\n'), ((5185, 5222), 'data.CaseDataset', 'CaseDataset', (['load_dir'], {'load_meta': '(True)'}), '(load_dir, load_meta=True)\n', (5196, 5222), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((5303, 5319), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (5307, 5319), False, 'from tqdm import tqdm\n'), ((6698, 6761), 'data.regions_crop_case', 'regions_crop_case', (['case', 'region_threshold', 'crop_padding', '"""pred"""'], {}), "(case, region_threshold, crop_padding, 'pred')\n", (6715, 6761), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((6917, 6938), 'numpy.zeros_like', 'np.zeros_like', (['result'], {}), '(result)\n', (6930, 6938), True, 'import numpy as np\n'), ((8407, 8429), 'numpy.array', 'np.array', (['(result_n > 0)'], {}), '(result_n > 0)\n', (8415, 8429), True, 'import numpy as np\n'), ((9415, 9448), 'data.load_case', 'load_case', (['image_file', 'label_file'], {}), '(image_file, label_file)\n', (9424, 9448), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((9460, 9492), 'data.orient_crop_case', 'orient_crop_case', (['orig_case', 'air'], {}), '(orig_case, air)\n', (9476, 9492), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((10192, 10244), 'nibabel.orientations.io_orientation', 'nib.orientations.io_orientation', (["orig_case['affine']"], {}), "(orig_case['affine'])\n", (10223, 10244), True, 'import nibabel as nib\n'), ((10304, 10342), 'numpy.array', 'np.array', (["orig_case['image'].shape[:3]"], {}), "(orig_case['image'].shape[:3])\n", (10312, 10342), True, 'import numpy as np\n'), ((10360, 10388), 'numpy.take', 'np.take', (['orig_shape', 'indices'], {}), '(orig_shape, indices)\n', (10367, 10388), True, 'import numpy as np\n'), ((10430, 10466), 'numpy.zeros', 'np.zeros', (['orig_shape'], {'dtype': 'np.uint8'}), '(orig_shape, dtype=np.uint8)\n', (10438, 10466), True, 'import numpy as np\n'), ((10771, 10824), 'nibabel.orientations.apply_orientation', 'nib.orientations.apply_orientation', (['orig_pred', 'orient'], {}), '(orig_pred, orient)\n', (10805, 10824), True, 'import nibabel as nib\n'), ((11676, 11691), 'pathlib.Path', 'Path', (['image_dir'], {}), '(image_dir)\n', (11680, 11691), False, 'from pathlib import Path\n'), ((11862, 11878), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (11866, 11878), False, 'from tqdm import tqdm\n'), ((13446, 13461), 'pathlib.Path', 'Path', (['label_dir'], {}), '(label_dir)\n', (13450, 13461), False, 'from pathlib import Path\n'), ((13477, 13491), 'pathlib.Path', 'Path', (['pred_dir'], {}), '(pred_dir)\n', (13481, 13491), False, 'from pathlib import Path\n'), ((13719, 13735), 'tqdm.tqdm', 'tqdm', (['data_range'], {}), '(data_range)\n', (13723, 13735), False, 'from tqdm import tqdm\n'), ((2724, 2743), 'tqdm.tqdm', 'tqdm', (['patchs_slices'], {}), '(patchs_slices)\n', (2728, 2743), False, 'from tqdm import tqdm\n'), ((2800, 2815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2813, 2815), False, 'import torch\n'), ((5651, 5676), 'data.save_pred', 'save_pred', (['case', 'save_dir'], {}), '(case, save_dir)\n', (5660, 5676), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((8522, 8549), 'numpy.squeeze', 'np.squeeze', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8532, 8549), True, 'import numpy as np\n'), ((8567, 8584), 'numpy.around', 'np.around', (['result'], {}), '(result)\n', (8576, 8584), True, 'import numpy as np\n'), ((8612, 8640), 'scipy.special.softmax', 'spe.softmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8623, 8640), True, 'import scipy.special as spe\n'), ((8658, 8684), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (8667, 8684), True, 'import numpy as np\n'), ((10897, 10935), 'numpy.expand_dims', 'np.expand_dims', (["orig_case['image']", '(-1)'], {}), "(orig_case['image'], -1)\n", (10911, 10935), True, 'import numpy as np\n'), ((12639, 12664), 'data.save_pred', 'save_pred', (['case', 'save_dir'], {}), '(case, save_dir)\n', (12648, 12664), False, 'from data import CaseDataset, load_case, save_pred, orient_crop_case, regions_crop_case, resample_normalize_case\n'), ((15569, 15595), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (15586, 15595), True, 'import numpy as np\n'), ((16272, 16304), 'torchsummary.summary', 'summary', (['self.model', 'input_shape'], {}), '(self.model, input_shape)\n', (16279, 16304), False, 'from torchsummary import summary\n'), ((19003, 19016), 'tqdm.tqdm', 'tqdm', ([], {'total': '(0)'}), '(total=0)\n', (19007, 19016), False, 'from tqdm import tqdm\n'), ((22253, 22286), 'torch.save', 'torch.save', (['checkpoint', 'file_path'], {}), '(checkpoint, file_path)\n', (22263, 22286), False, 'import torch\n'), ((22351, 22372), 'torch.load', 'torch.load', (['file_path'], {}), '(file_path)\n', (22361, 22372), False, 'import torch\n'), ((1783, 1807), 'torch.zeros_like', 'torch.zeros_like', (['result'], {}), '(result)\n', (1799, 1807), False, 'import torch\n'), ((3432, 3460), 'torch.squeeze', 'torch.squeeze', (['result'], {'dim': '(0)'}), '(result, dim=0)\n', (3445, 3460), False, 'import torch\n'), ((3496, 3524), 'torch.softmax', 'torch.softmax', (['result'], {'dim': '(0)'}), '(result, dim=0)\n', (3509, 3524), False, 'import torch\n'), ((3546, 3574), 'torch.argmax', 'torch.argmax', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (3558, 3574), False, 'import torch\n'), ((14140, 14166), 'numpy.array', 'np.array', (['evaluate_results'], {}), '(evaluate_results)\n', (14148, 14166), True, 'import numpy as np\n'), ((15523, 15559), 'numpy.floor', 'np.floor', (['(valid_split * dataset_size)'], {}), '(valid_split * dataset_size)\n', (15531, 15559), True, 'import numpy as np\n'), ((18113, 18141), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['self.save_dir'], {}), '(self.save_dir)\n', (18126, 18141), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((18788, 18851), 'apex.amp.initialize', 'amp.initialize', (['self.model', 'self.optimizer'], {'opt_level': 'opt_level'}), '(self.model, self.optimizer, opt_level=opt_level)\n', (18802, 18851), False, 'from apex import amp\n'), ((19258, 19323), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['train_set', '(True)', 'self.num_samples'], {}), '(train_set, True, self.num_samples)\n', (19288, 19323), False, 'import torch\n'), ((19351, 19437), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'sampler': 'sampler'}), '(train_set, sampler=sampler, **self.\n dataloader_kwargs)\n', (19378, 19437), False, 'import torch\n'), ((19584, 19662), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'shuffle': '(True)'}), '(train_set, shuffle=True, **self.dataloader_kwargs)\n', (19611, 19662), False, 'import torch\n'), ((22228, 22244), 'apex.amp.state_dict', 'amp.state_dict', ([], {}), '()\n', (22242, 22244), False, 'from apex import amp\n'), ((2981, 3002), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (2994, 3002), False, 'import torch\n'), ((3046, 3074), 'torch.softmax', 'torch.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (3059, 3074), False, 'import torch\n'), ((12803, 12834), 'numpy.array', 'np.array', (["(case['pred'] == c + 1)"], {}), "(case['pred'] == c + 1)\n", (12811, 12834), True, 'import numpy as np\n'), ((12868, 12900), 'numpy.array', 'np.array', (["(case['label'] == c + 1)"], {}), "(case['label'] == c + 1)\n", (12876, 12900), True, 'import numpy as np\n'), ((17717, 17734), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (17728, 17734), False, 'import torch\n'), ((17968, 18003), 'numpy.array', 'np.array', (['[x[key] for x in results]'], {}), '([x[key] for x in results])\n', (17976, 18003), True, 'import numpy as np\n'), ((18933, 18973), 'apex.amp.load_state_dict', 'amp.load_state_dict', (['self.amp_state_dict'], {}), '(self.amp_state_dict)\n', (18952, 18973), False, 'from apex import amp\n'), ((20045, 20105), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['valid_set', '(True)', 'num_samples'], {}), '(valid_set, True, num_samples)\n', (20075, 20105), False, 'import torch\n'), ((20137, 20223), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {'sampler': 'sampler'}), '(valid_set, sampler=sampler, **self.\n dataloader_kwargs)\n', (20164, 20223), False, 'import torch\n'), ((20386, 20450), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_set'], {}), '(valid_set, **self.dataloader_kwargs)\n', (20413, 20450), False, 'import torch\n'), ((2112, 2128), 'transform.to_tensor', 'to_tensor', (['input'], {}), '(input)\n', (2121, 2128), False, 'from transform import pad, crop_pad, to_numpy, to_tensor, resize\n'), ((12937, 12955), 'torch.tensor', 'torch.tensor', (['pred'], {}), '(pred)\n', (12949, 12955), False, 'import torch\n'), ((12957, 12976), 'torch.tensor', 'torch.tensor', (['label'], {}), '(label)\n', (12969, 12976), False, 'import torch\n'), ((17005, 17020), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17018, 17020), False, 'import torch\n'), ((17256, 17292), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'self.optimizer'], {}), '(loss, self.optimizer)\n', (17270, 17292), False, 'from apex import amp\n')]
|
from contextlib import closing
import h5py
import numpy as np
def save_h5(outfile, dictionary):
""" Saves passed dictionary to an h5 file
Parameters
----------
outfile : string
Name of output h5 file
dictionary : dictionary
Dictionary that will be saved
"""
def save_layer(f, seed, dictionary):
for key, value in dictionary.items():
fullKey = f"{seed}/{key}"
if type(dictionary[key]) == dict:
f = save_layer(f, fullKey, value)
else:
f[fullKey] = dictionary[key]
return f
with closing(h5py.File(outfile, 'w')) as f:
for key, value in dictionary.items():
if type(dictionary[key]) == dict:
f = save_layer(f, key, value)
else:
f[key] = dictionary[key]
def load_h5(feature_file):
"""
Loads h5 contents to dictionary.
Single level dictionary with keys being full h5 paths.
Parameters
----------
feature_file : string
Name of input h5 file
Returns
-------
dictionary : dictionary
Dictionary of h5 contents
"""
def load_layer(f, seed, dictionary):
for key in f[seed].keys():
fullKey = f"{seed}/{key}"
if isinstance(f[fullKey], h5py.Dataset):
if (seed in dictionary.keys()):
dictionary[seed][key] = np.asarray(f[fullKey])
else:
dictionary[seed] = {key: np.asarray(f[fullKey])}
else:
dictionary = load_layer(f, fullKey, dictionary)
return dictionary
with h5py.File(feature_file, 'r') as f:
dictionary = {}
for key in f.keys():
if isinstance(f[key], h5py.Dataset):
dictionary[key] = np.asarray(f[key])
else:
dictionary = load_layer(f, key, dictionary)
return dictionary
|
[
"numpy.asarray",
"h5py.File"
] |
[((1659, 1687), 'h5py.File', 'h5py.File', (['feature_file', '"""r"""'], {}), "(feature_file, 'r')\n", (1668, 1687), False, 'import h5py\n'), ((622, 645), 'h5py.File', 'h5py.File', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (631, 645), False, 'import h5py\n'), ((1830, 1848), 'numpy.asarray', 'np.asarray', (['f[key]'], {}), '(f[key])\n', (1840, 1848), True, 'import numpy as np\n'), ((1426, 1448), 'numpy.asarray', 'np.asarray', (['f[fullKey]'], {}), '(f[fullKey])\n', (1436, 1448), True, 'import numpy as np\n'), ((1516, 1538), 'numpy.asarray', 'np.asarray', (['f[fullKey]'], {}), '(f[fullKey])\n', (1526, 1538), True, 'import numpy as np\n')]
|
import numpy
from fdm.geometry import create_close_point_finder
def create_weights_distributor(close_point_finder):
def distribute(point, value):
close_points = close_point_finder(point)
distance_sum = sum(close_points.values())
return dict(
{p: (1. - distance/distance_sum)*value for p, distance in close_points.items()},
)
return distribute
def apply_statics_bc(variables, matrix, vector, bcs):
extra_bcs = extract_extra_bcs(bcs)
replace_bcs = extract_replace_bcs(bcs)
extra_bcs_number = len(extra_bcs)
_matrix = numpy.copy(matrix)
_vector = numpy.copy(vector)
assert (_rows_number(_matrix) == len(variables), 'Number of BCs must be equal "vars_number" - "real_nodes_number"')
points = list(variables)
matrix_bc_applicator = create_matrix_bc_applicator(_matrix, points, variables)
vector_bc_applicator = create_vector_bc_applicator(_vector)
for i, (scheme, value, replace) in enumerate(replace_bcs):
matrix_bc_applicator(variables[replace], scheme)
vector_bc_applicator(variables[replace], value)
initial_idx = _rows_number(matrix) - extra_bcs_number
for i, (scheme, value, _) in enumerate(extra_bcs):
matrix_bc_applicator(initial_idx + i, scheme)
vector_bc_applicator(initial_idx + i, value)
return _matrix, _vector
def apply_dynamics_bc(variables, matrix_a, matrix_b, bcs):
extra_bcs = extract_extra_bcs(bcs)
replace_bcs = extract_replace_bcs(bcs)
extra_bcs_number = len(extra_bcs)
_matrix_a = numpy.copy(matrix_a)
_matrix_b = numpy.copy(matrix_b)
assert _rows_number(_matrix_a) == len(variables), 'Number of BCs must be equal "vars_number" - "real_nodes_number"'
points = list(variables)
matrix_a_bc_applicator = create_matrix_bc_applicator(_matrix_a, points, variables)
matrix_b_bc_applicator = create_matrix_bc_applicator(_matrix_b, points, variables)
for i, (scheme_a, scheme_b, replace) in enumerate(replace_bcs):
matrix_a_bc_applicator(variables[replace], scheme_a)
matrix_b_bc_applicator(variables[replace], scheme_b)
initial_idx = _rows_number(_matrix_a) - extra_bcs_number
for i, (scheme_a, scheme_b, _) in enumerate(extra_bcs):
matrix_a_bc_applicator(initial_idx + i, scheme_a)
matrix_b_bc_applicator(initial_idx + i, scheme_b)
return _matrix_a, _matrix_b
def extract_extra_bcs(bcs):
return [bc for bc in bcs if bc.replace is None]
def extract_replace_bcs(bcs):
return [bc for bc in bcs if bc.replace is not None]
def create_matrix_bc_applicator(matrix, points, variables, tol=1e-6):
def apply(row_idx, scheme):
matrix[row_idx, :] = 0.
if len(scheme):
distributor = SchemeToNodesDistributor(points)
scheme = distributor(scheme)
scheme = scheme.drop(tol)
for p, weight in scheme.items():
col_idx = variables[p]
matrix[row_idx, col_idx] = weight
return apply
def create_vector_bc_applicator(vector):
def apply(row_idx, value):
vector[row_idx] = value
return apply
def _zero_vector_last_rows(vector, number):
_vector = numpy.zeros(vector.shape)
_vector[:-number] = vector[:-number]
return _vector
def _zero_matrix_last_rows(matrix, number):
_matrix = numpy.zeros(matrix.shape)
_matrix[:-number, :] = matrix[:-number, :]
return _matrix
def _rows_number(matrix):
return matrix.shape[0]
def _cols_number(matrix):
return matrix.shape[1]
class SchemeToNodesDistributor(object):
def __init__(self, nodes):
self._distributor = WeightsDistributor(nodes)
def __call__(self, scheme):
return scheme.distribute(self._distributor)
class WeightsDistributor(object):
def __init__(self, nodes):
self._distributor = create_weights_distributor(
create_close_point_finder(nodes)
)
def __call__(self, point, weight):
return self._distributor(point, weight)
|
[
"fdm.geometry.create_close_point_finder",
"numpy.copy",
"numpy.zeros"
] |
[((589, 607), 'numpy.copy', 'numpy.copy', (['matrix'], {}), '(matrix)\n', (599, 607), False, 'import numpy\n'), ((622, 640), 'numpy.copy', 'numpy.copy', (['vector'], {}), '(vector)\n', (632, 640), False, 'import numpy\n'), ((1565, 1585), 'numpy.copy', 'numpy.copy', (['matrix_a'], {}), '(matrix_a)\n', (1575, 1585), False, 'import numpy\n'), ((1602, 1622), 'numpy.copy', 'numpy.copy', (['matrix_b'], {}), '(matrix_b)\n', (1612, 1622), False, 'import numpy\n'), ((3201, 3226), 'numpy.zeros', 'numpy.zeros', (['vector.shape'], {}), '(vector.shape)\n', (3212, 3226), False, 'import numpy\n'), ((3347, 3372), 'numpy.zeros', 'numpy.zeros', (['matrix.shape'], {}), '(matrix.shape)\n', (3358, 3372), False, 'import numpy\n'), ((3896, 3928), 'fdm.geometry.create_close_point_finder', 'create_close_point_finder', (['nodes'], {}), '(nodes)\n', (3921, 3928), False, 'from fdm.geometry import create_close_point_finder\n')]
|
from timebox.timebox import TimeBox
from timebox.utils.exceptions import InvalidPandasIndexError
import pandas as pd
import numpy as np
import unittest
import os
import logging
class TestTimeBoxPandas(unittest.TestCase):
def test_save_pandas(self):
file_name = 'save_pandas.npb'
df = pd.read_csv('timebox/tests/data/ETH-USD_combined_utc.csv', index_col=0)
tb = TimeBox.save_pandas(df, file_name)
self.assertTrue(os.path.exists(file_name))
tb_read = TimeBox(file_name)
df2 = tb_read.to_pandas()
df_columns = list(df)
df_columns.sort()
df2_columns = list(df2)
df2_columns.sort()
self.assertListEqual(df_columns, df2_columns)
os.remove(file_name)
return
def test_pandas_errors(self):
df = pd.DataFrame.from_dict(
{
'value_1': np.array([0, 1, 2], dtype=np.uint8)
},
orient='columns'
)
with self.assertRaises(InvalidPandasIndexError):
TimeBox.save_pandas(df, 'not_going_to_save.npb')
return
def test_io_pandas(self):
file_name = 'save_pandas.npb'
df = pd.read_csv('timebox/tests/data/test1.csv').set_index('date')
logging.debug('Starting test_io_pandas with df\n{}'.format(df))
tb = TimeBox.save_pandas(df, file_name)
tb_read = TimeBox(file_name)
df2 = tb_read.to_pandas()
self.assertListEqual(list(df.columns.sort_values()), list(df2.columns.sort_values()))
df = df.sort_index()
# ensure index is same
for i in range(0, len(df.index)):
self.assertEqual(pd.to_datetime(df.index[i]), pd.to_datetime(df2.index[i]))
# ensure each value is the same
columns = df.columns
for c in columns:
logging.debug('Testing column: {}'.format(c))
logging.debug('Original frame:{}'.format(df[c]))
logging.debug('TB frame:{}'.format(df2[c]))
self.assertEqual(df[c].sum(), df2[c].sum())
os.remove(file_name)
return
if __name__ == '__main__':
unittest.main()
|
[
"os.path.exists",
"pandas.read_csv",
"timebox.timebox.TimeBox.save_pandas",
"timebox.timebox.TimeBox",
"numpy.array",
"unittest.main",
"pandas.to_datetime",
"os.remove"
] |
[((2124, 2139), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2137, 2139), False, 'import unittest\n'), ((306, 377), 'pandas.read_csv', 'pd.read_csv', (['"""timebox/tests/data/ETH-USD_combined_utc.csv"""'], {'index_col': '(0)'}), "('timebox/tests/data/ETH-USD_combined_utc.csv', index_col=0)\n", (317, 377), True, 'import pandas as pd\n'), ((391, 425), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', 'file_name'], {}), '(df, file_name)\n', (410, 425), False, 'from timebox.timebox import TimeBox\n'), ((496, 514), 'timebox.timebox.TimeBox', 'TimeBox', (['file_name'], {}), '(file_name)\n', (503, 514), False, 'from timebox.timebox import TimeBox\n'), ((728, 748), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (737, 748), False, 'import os\n'), ((1329, 1363), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', 'file_name'], {}), '(df, file_name)\n', (1348, 1363), False, 'from timebox.timebox import TimeBox\n'), ((1382, 1400), 'timebox.timebox.TimeBox', 'TimeBox', (['file_name'], {}), '(file_name)\n', (1389, 1400), False, 'from timebox.timebox import TimeBox\n'), ((2056, 2076), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (2065, 2076), False, 'import os\n'), ((450, 475), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (464, 475), False, 'import os\n'), ((1036, 1084), 'timebox.timebox.TimeBox.save_pandas', 'TimeBox.save_pandas', (['df', '"""not_going_to_save.npb"""'], {}), "(df, 'not_going_to_save.npb')\n", (1055, 1084), False, 'from timebox.timebox import TimeBox\n'), ((877, 912), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.uint8'}), '([0, 1, 2], dtype=np.uint8)\n', (885, 912), True, 'import numpy as np\n'), ((1182, 1225), 'pandas.read_csv', 'pd.read_csv', (['"""timebox/tests/data/test1.csv"""'], {}), "('timebox/tests/data/test1.csv')\n", (1193, 1225), True, 'import pandas as pd\n'), ((1661, 1688), 'pandas.to_datetime', 'pd.to_datetime', (['df.index[i]'], {}), '(df.index[i])\n', (1675, 1688), True, 'import pandas as pd\n'), ((1690, 1718), 'pandas.to_datetime', 'pd.to_datetime', (['df2.index[i]'], {}), '(df2.index[i])\n', (1704, 1718), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
"""
Build the demos
Usage: python setup.py build_ext -i
"""
import numpy as np
from distutils.core import setup
from Cython.Build import cythonize
from setuptools.extension import Extension
from os.path import join
extending = Extension("extending",
sources=['extending.pyx'],
include_dirs=[np.get_include()])
distributions = Extension("extending_distributions",
sources=['extending_distributions.pyx',
join('..', '..', 'src',
'distributions', 'distributions.c')],
include_dirs=[np.get_include()])
extensions = [extending, distributions]
setup(
ext_modules=cythonize(extensions)
)
|
[
"Cython.Build.cythonize",
"os.path.join",
"numpy.get_include"
] |
[((760, 781), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (769, 781), False, 'from Cython.Build import cythonize\n'), ((361, 377), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (375, 377), True, 'import numpy as np\n'), ((534, 593), 'os.path.join', 'join', (['""".."""', '""".."""', '"""src"""', '"""distributions"""', '"""distributions.c"""'], {}), "('..', '..', 'src', 'distributions', 'distributions.c')\n", (538, 593), False, 'from os.path import join\n'), ((676, 692), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (690, 692), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
# set programatically the path to 'sim-environment' directory (alternately can also set PYTHONPATH)
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/sim-environment/src')
import measurement as m
import utils.constants as constants
import numpy as np
import torch
import random
from pathlib import Path
np.random.seed(constants.RANDOM_SEED)
random.seed(constants.RANDOM_SEED)
torch.cuda.manual_seed(constants.RANDOM_SEED)
torch.manual_seed(constants.RANDOM_SEED)
np.set_printoptions(precision=3)
Path("saved_models").mkdir(parents=True, exist_ok=True)
Path("best_models").mkdir(parents=True, exist_ok=True)
if __name__ == '__main__':
print('running measurement model training')
measurement = m.Measurement(render=False, pretrained=False)
train_epochs = 500
eval_epochs = 5
measurement.train(train_epochs, eval_epochs)
# file_name = '../bckp/dec_13/best_models/likelihood_mse_best.pth'
# measurement.test(file_name)
del measurement
|
[
"torch.manual_seed",
"sys.path.insert",
"pathlib.Path",
"random.seed",
"measurement.Measurement",
"sys.path.index",
"numpy.random.seed",
"torch.cuda.manual_seed",
"numpy.set_printoptions"
] |
[((486, 523), 'numpy.random.seed', 'np.random.seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (500, 523), True, 'import numpy as np\n'), ((524, 558), 'random.seed', 'random.seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (535, 558), False, 'import random\n'), ((559, 604), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (581, 604), False, 'import torch\n'), ((605, 645), 'torch.manual_seed', 'torch.manual_seed', (['constants.RANDOM_SEED'], {}), '(constants.RANDOM_SEED)\n', (622, 645), False, 'import torch\n'), ((646, 678), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (665, 678), True, 'import numpy as np\n'), ((885, 930), 'measurement.Measurement', 'm.Measurement', ([], {'render': '(False)', 'pretrained': '(False)'}), '(render=False, pretrained=False)\n', (898, 930), True, 'import measurement as m\n'), ((77, 97), 'sys.path.index', 'sys.path.index', (['path'], {}), '(path)\n', (91, 97), False, 'import sys\n'), ((680, 700), 'pathlib.Path', 'Path', (['"""saved_models"""'], {}), "('saved_models')\n", (684, 700), False, 'from pathlib import Path\n'), ((736, 755), 'pathlib.Path', 'Path', (['"""best_models"""'], {}), "('best_models')\n", (740, 755), False, 'from pathlib import Path\n'), ((129, 153), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (144, 153), False, 'import sys\n')]
|
import argparse
import os
import json
from torch.utils.tensorboard import SummaryWriter
import random
import numpy as np
import zipfile
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
from LAUG.nlu.jointBERT_new.dataloader import Dataloader
from LAUG.nlu.jointBERT_new.jointBERT import JointBERT
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
parser = argparse.ArgumentParser(description="Train a model.")
parser.add_argument('--config_path',
help='path to config file')
if __name__ == '__main__':
args = parser.parse_args()
config = json.load(open(args.config_path))
data_dir = config['data_dir']
output_dir = config['output_dir']
log_dir = config['log_dir']
DEVICE = config['DEVICE']
set_seed(config['seed'])
if 'multiwoz' in data_dir:
print('-'*20 + 'dataset:multiwoz' + '-'*20)
from LAUG.nlu.jointBERT_new.multiwoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'camrest' in data_dir:
print('-' * 20 + 'dataset:camrest' + '-' * 20)
from LAUG.nlu.jointBERT_new.camrest.postprocess import is_slot_da, calculateF1, recover_intent
elif 'crosswoz' in data_dir:
print('-' * 20 + 'dataset:crosswoz' + '-' * 20)
from LAUG.nlu.jointBERT_new.crosswoz.postprocess import is_slot_da, calculateF1, recover_intent
elif 'frames' in data_dir:
print('-' * 20 + 'dataset:frames' + '-' * 20)
from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent
intent_vocab = json.load(open(os.path.join(data_dir, 'intent_vocab.json')))
tag_vocab = json.load(open(os.path.join(data_dir, 'tag_vocab.json')))
req_vocab = json.load(open(os.path.join(data_dir, 'req_vocab.json')))
req_slot_vocab = json.load(open(os.path.join(data_dir, 'req_slot_vocab.json')))
slot_intent_vocab = json.load(open(os.path.join(data_dir,'slot_intent_vocab.json')))
print('intent_vocab = ',intent_vocab)
print('tag_vocab = ', tag_vocab)
print('req_vocab = ', req_vocab)
print('req_slot_vocab = ', req_slot_vocab)
print('='*100)
dataloader = Dataloader(intent_vocab=intent_vocab, tag_vocab=tag_vocab, req_vocab=req_vocab, req_slot_vocab=req_slot_vocab, slot_intent_vocab=slot_intent_vocab,
pretrained_weights=config['model']['pretrained_weights'])
print('intent num:', len(intent_vocab))
print('tag num:', len(tag_vocab))
print('req num:', len(req_vocab))
for data_key in ['train', 'val', 'test']:
dataloader.load_data(json.load(open(os.path.join(data_dir, '{}_data.json'.format(data_key)))), data_key,
cut_sen_len=config['cut_sen_len'], use_bert_tokenizer=config['use_bert_tokenizer'])
print('{} set size: {}'.format(data_key, len(dataloader.data[data_key])))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir)
model = JointBERT(config['model'], DEVICE, dataloader.tag_dim, dataloader.intent_dim, dataloader.req_dim, dataloader, dataloader.intent_weight, dataloader.req_weight)
model.to(DEVICE)
if config['model']['finetune']:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if
not any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': config['model']['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=config['model']['learning_rate'],
eps=config['model']['adam_epsilon'])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config['model']['warmup_steps'],
num_training_steps=config['model']['max_step'])
else:
for n, p in model.named_parameters():
if 'bert' in n:
p.requires_grad = False
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=config['model']['learning_rate'])
for name, param in model.named_parameters():
print(name, param.shape, param.device, param.requires_grad)
max_step = config['model']['max_step']
check_step = config['model']['check_step']
batch_size = config['model']['batch_size']
print('check_step = {}, batch_size = {}'.format(check_step, batch_size))
model.zero_grad()
train_slot_loss, train_intent_loss, train_req_loss = 0, 0, 0
best_val_f1 = 0.
writer.add_text('config', json.dumps(config))
for step in range(1, max_step + 1):
model.train()
batched_data = dataloader.get_train_batch(batch_size)
batched_data = tuple(t.to(DEVICE) for t in batched_data)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor,base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = batched_data
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
_, _, _, slot_loss, intent_loss, req_loss = model.forward(word_seq_tensor, word_mask_tensor, tag_seq_tensor, tag_mask_tensor,
intent_tensor, req_tensor, req_mask_tensor, context_seq_tensor, context_mask_tensor)
train_slot_loss += slot_loss.item()
train_intent_loss += intent_loss.item()
train_req_loss += req_loss.item()
loss = slot_loss + intent_loss + req_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
if config['model']['finetune']:
scheduler.step() # Update learning rate schedule
model.zero_grad()
if step % check_step == 0:
train_slot_loss = train_slot_loss / check_step
train_intent_loss = train_intent_loss / check_step
train_req_loss = train_req_loss / check_step
print('[%d|%d] step' % (step, max_step))
print('\t slot loss:', train_slot_loss)
print('\t intent loss:', train_intent_loss)
print('\t request loss:', train_req_loss)
predict_golden = {'intent': [], 'slot': [], 'req':[],'overall': []}
val_slot_loss, val_intent_loss,val_req_loss = 0, 0,0
model.eval()
for pad_batch, ori_batch, real_batch_size in dataloader.yield_batches(batch_size, data_key='val'):
pad_batch = tuple(t.to(DEVICE) for t in pad_batch)
word_seq_tensor, tag_seq_tensor, intent_tensor, req_tensor, req_mask_tensor, word_mask_tensor, tag_mask_tensor, base_tag_mask_tensor, context_seq_tensor, context_mask_tensor = pad_batch
if not config['model']['context']:
context_seq_tensor, context_mask_tensor = None, None
with torch.no_grad():
slot_logits, intent_logits, req_logits,slot_loss, intent_loss,req_loss = model.forward(word_seq_tensor,
word_mask_tensor,
tag_seq_tensor,
tag_mask_tensor,
intent_tensor,
req_tensor,
req_mask_tensor,
context_seq_tensor,
context_mask_tensor)
val_slot_loss += slot_loss.item() * real_batch_size
val_intent_loss += intent_loss.item() * real_batch_size
val_req_loss += req_loss.item() * real_batch_size
for j in range(real_batch_size):
predict_intent, predict_req, predict_slot, predict_overall = recover_intent(dataloader, intent_logits[j], req_logits[j*dataloader.req_dim: (j+1)*dataloader.req_dim], slot_logits[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim], base_tag_mask_tensor[j*dataloader.slot_intent_dim:(j+1)*dataloader.slot_intent_dim],
ori_batch[j][0], ori_batch[j][-4])
#assert(ori_batch[j][3] != [])
predict_golden['overall'].append({
'predict': predict_overall,
'golden': ori_batch[j][3]
})
predict_golden['req'].append({
'predict':predict_req,
'golden':ori_batch[j][5] #req
})
'''
predict_golden['slot'].append({
'predict': predict_slot,#[x for x in predicts if is_slot_da(x)],
'golden': ori_batch[j][1]#tag
})
'''
predict_golden['intent'].append({
'predict': predict_intent,
'golden': ori_batch[j][2]#intent
})
for j in range(10):
writer.add_text('val_sample_{}'.format(j),
json.dumps(predict_golden['overall'][j], indent=2, ensure_ascii=False),
global_step=step)
total = len(dataloader.data['val'])
val_slot_loss /= total
val_intent_loss /= total
val_req_loss /= total
print('%d samples val' % total)
print('\t slot loss:', val_slot_loss)
print('\t intent loss:', val_intent_loss)
print('\t req loss:', val_req_loss)
writer.add_scalar('intent_loss/train', train_intent_loss, global_step=step)
writer.add_scalar('intent_loss/val', val_intent_loss, global_step=step)
writer.add_scalar('req_loss/train', train_req_loss, global_step=step)
writer.add_scalar('req_loss/val', val_req_loss, global_step=step)
writer.add_scalar('slot_loss/train', train_slot_loss, global_step=step)
writer.add_scalar('slot_loss/val', val_slot_loss, global_step=step)
for x in ['intent','req','overall']:
#for x in ['intent', 'slot', 'req','overall']:# pass slot
precision, recall, F1 = calculateF1(predict_golden[x], x=='overall')
print('-' * 20 + x + '-' * 20)
print('\t Precision: %.2f' % (100 * precision))
print('\t Recall: %.2f' % (100 * recall))
print('\t F1: %.2f' % (100 * F1))
writer.add_scalar('val_{}/precision'.format(x), precision, global_step=step)
writer.add_scalar('val_{}/recall'.format(x), recall, global_step=step)
writer.add_scalar('val_{}/F1'.format(x), F1, global_step=step)
if F1 > best_val_f1:
best_val_f1 = F1
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.bin'))
print('best val F1 %.4f' % best_val_f1)
print('save on', output_dir)
train_slot_loss, train_intent_loss = 0, 0
writer.add_text('val overall F1', '%.2f' % (100 * best_val_f1))
writer.close()
model_path = os.path.join(output_dir, 'pytorch_model.bin')
zip_path = config['zipped_model_path']
print('zip model to', zip_path)
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
zf.write(model_path)
|
[
"torch.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"LAUG.nlu.jointBERT_new.frames.postprocess.calculateF1",
"argparse.ArgumentParser",
"os.makedirs",
"zipfile.ZipFile",
"transformers.AdamW",
"transformers.get_linear_schedule_with_warmup",
"json.dumps",
"os.path.join",
"LAUG.nlu.jointBERT_new.frames.postprocess.recover_intent",
"random.seed",
"LAUG.nlu.jointBERT_new.jointBERT.JointBERT",
"LAUG.nlu.jointBERT_new.dataloader.Dataloader",
"numpy.random.seed",
"torch.no_grad"
] |
[((452, 505), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a model."""'}), "(description='Train a model.')\n", (475, 505), False, 'import argparse\n'), ((365, 382), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (376, 382), False, 'import random\n'), ((388, 408), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (402, 408), True, 'import numpy as np\n'), ((414, 437), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (431, 437), False, 'import torch\n'), ((2259, 2479), 'LAUG.nlu.jointBERT_new.dataloader.Dataloader', 'Dataloader', ([], {'intent_vocab': 'intent_vocab', 'tag_vocab': 'tag_vocab', 'req_vocab': 'req_vocab', 'req_slot_vocab': 'req_slot_vocab', 'slot_intent_vocab': 'slot_intent_vocab', 'pretrained_weights': "config['model']['pretrained_weights']"}), "(intent_vocab=intent_vocab, tag_vocab=tag_vocab, req_vocab=\n req_vocab, req_slot_vocab=req_slot_vocab, slot_intent_vocab=\n slot_intent_vocab, pretrained_weights=config['model']['pretrained_weights']\n )\n", (2269, 2479), False, 'from LAUG.nlu.jointBERT_new.dataloader import Dataloader\n'), ((3133, 3155), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3146, 3155), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3171, 3338), 'LAUG.nlu.jointBERT_new.jointBERT.JointBERT', 'JointBERT', (["config['model']", 'DEVICE', 'dataloader.tag_dim', 'dataloader.intent_dim', 'dataloader.req_dim', 'dataloader', 'dataloader.intent_weight', 'dataloader.req_weight'], {}), "(config['model'], DEVICE, dataloader.tag_dim, dataloader.\n intent_dim, dataloader.req_dim, dataloader, dataloader.intent_weight,\n dataloader.req_weight)\n", (3180, 3338), False, 'from LAUG.nlu.jointBERT_new.jointBERT import JointBERT\n'), ((12210, 12255), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model.bin"""'], {}), "(output_dir, 'pytorch_model.bin')\n", (12222, 12255), False, 'import os\n'), ((2989, 3015), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3003, 3015), False, 'import os\n'), ((3026, 3049), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3037, 3049), False, 'import os\n'), ((3062, 3085), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3076, 3085), False, 'import os\n'), ((3096, 3116), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3107, 3116), False, 'import os\n'), ((3884, 3997), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': "config['model']['learning_rate']", 'eps': "config['model']['adam_epsilon']"}), "(optimizer_grouped_parameters, lr=config['model']['learning_rate'],\n eps=config['model']['adam_epsilon'])\n", (3889, 3997), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((4042, 4187), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': "config['model']['warmup_steps']", 'num_training_steps': "config['model']['max_step']"}), "(optimizer, num_warmup_steps=config['model']\n ['warmup_steps'], num_training_steps=config['model']['max_step'])\n", (4073, 4187), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((5019, 5037), 'json.dumps', 'json.dumps', (['config'], {}), '(config)\n', (5029, 5037), False, 'import json\n'), ((12349, 12401), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(zip_path, 'w', zipfile.ZIP_DEFLATED)\n", (12364, 12401), False, 'import zipfile\n'), ((1683, 1726), 'os.path.join', 'os.path.join', (['data_dir', '"""intent_vocab.json"""'], {}), "(data_dir, 'intent_vocab.json')\n", (1695, 1726), False, 'import os\n'), ((1761, 1801), 'os.path.join', 'os.path.join', (['data_dir', '"""tag_vocab.json"""'], {}), "(data_dir, 'tag_vocab.json')\n", (1773, 1801), False, 'import os\n'), ((1836, 1876), 'os.path.join', 'os.path.join', (['data_dir', '"""req_vocab.json"""'], {}), "(data_dir, 'req_vocab.json')\n", (1848, 1876), False, 'import os\n'), ((1916, 1961), 'os.path.join', 'os.path.join', (['data_dir', '"""req_slot_vocab.json"""'], {}), "(data_dir, 'req_slot_vocab.json')\n", (1928, 1961), False, 'import os\n'), ((2004, 2052), 'os.path.join', 'os.path.join', (['data_dir', '"""slot_intent_vocab.json"""'], {}), "(data_dir, 'slot_intent_vocab.json')\n", (2016, 2052), False, 'import os\n'), ((11242, 11288), 'LAUG.nlu.jointBERT_new.frames.postprocess.calculateF1', 'calculateF1', (['predict_golden[x]', "(x == 'overall')"], {}), "(predict_golden[x], x == 'overall')\n", (11253, 11288), False, 'from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent\n'), ((7397, 7412), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7410, 7412), False, 'import torch\n'), ((8720, 9055), 'LAUG.nlu.jointBERT_new.frames.postprocess.recover_intent', 'recover_intent', (['dataloader', 'intent_logits[j]', 'req_logits[j * dataloader.req_dim:(j + 1) * dataloader.req_dim]', 'slot_logits[j * dataloader.slot_intent_dim:(j + 1) * dataloader.slot_intent_dim\n ]', 'base_tag_mask_tensor[j * dataloader.slot_intent_dim:(j + 1) * dataloader.\n slot_intent_dim]', 'ori_batch[j][0]', 'ori_batch[j][-4]'], {}), '(dataloader, intent_logits[j], req_logits[j * dataloader.\n req_dim:(j + 1) * dataloader.req_dim], slot_logits[j * dataloader.\n slot_intent_dim:(j + 1) * dataloader.slot_intent_dim],\n base_tag_mask_tensor[j * dataloader.slot_intent_dim:(j + 1) *\n dataloader.slot_intent_dim], ori_batch[j][0], ori_batch[j][-4])\n', (8734, 9055), False, 'from LAUG.nlu.jointBERT_new.frames.postprocess import is_slot_da, calculateF1, recover_intent\n'), ((10075, 10145), 'json.dumps', 'json.dumps', (["predict_golden['overall'][j]"], {'indent': '(2)', 'ensure_ascii': '(False)'}), "(predict_golden['overall'][j], indent=2, ensure_ascii=False)\n", (10085, 10145), False, 'import json\n'), ((11892, 11937), 'os.path.join', 'os.path.join', (['output_dir', '"""pytorch_model.bin"""'], {}), "(output_dir, 'pytorch_model.bin')\n", (11904, 11937), False, 'import os\n')]
|
import pandas as pd
import numpy as np
import yaml
import os
import argparse
from sklearn.impute import KNNImputer
from logger import App_Logger
file_object=open("application_logging/Loggings.txt", 'a+')
logger_object=App_Logger()
def read_params(config_path):
with open(config_path) as yaml_file:
config=yaml.safe_load(yaml_file)
return config
def preprocessing(config_path):
config= read_params(config_path)
train_data_path= config["split_data"]["train_path"]
test_data_path= config["split_data"]["test_path"]
train_processed_path= config["processed"]["train_path"]
test_processed_path= config["processed"]["test_path"]
"""
Method Name: preprocessing
Description: This method replacing the missing values with Nan values and perform the imputer to both train and test
Output: A pandas DataFrame .csv.
On Failure: Raise Exception
"""
logger_object.log(file_object,'Entered the preprocessing')
try:
train_data=pd.read_csv(train_data_path)
test_data=pd.read_csv(test_data_path)
# ? is replaced with np.nan in train data
for column in train_data.columns:
count = train_data[column][ train_data[column]=='?'].count()
if count!=0:
train_data[column] = train_data[column].replace('?',np.nan)
train_data['sex'] = train_data ['sex'].replace({'F' : 0, 'M' : 1})
for column in train_data.columns:
if len(train_data[column].unique())==2:
train_data[column] = train_data[column].replace({'f' : 0, 't' : 1})
elif len(train_data[column].unique())==1:
train_data[column] = train_data[column].replace({'f' : 0})
train_data['Class'] = train_data['Class'].replace({'negative' : 0, 'compensated_hypothyroid' : 1,'primary_hypothyroid' :2,'secondary_hypothyroid':3})
train_data["Class"] = train_data["Class"].apply(lambda value : 1 if value >=1 else 0)
imputer=KNNImputer(n_neighbors=3, weights='uniform',missing_values=np.nan)
new_array=imputer.fit_transform(train_data)
train_impu_data=pd.DataFrame(data=np.round(new_array), columns=train_data.columns)
train_impu_data.to_csv(train_processed_path,index=False)
#############################################################################################################################################
# ? is replaced with np.nan in test data
for column in test_data.columns:
count = test_data[column][ test_data[column]=='?'].count()
if count!=0:
test_data[column] = test_data[column].replace('?',np.nan)
test_data['sex'] = test_data['sex'].replace({'F' : 0, 'M' : 1})
for column in test_data.columns:
if len(test_data[column].unique())==2:
test_data[column] = test_data[column].replace({'f' : 0, 't' : 1})
elif len(test_data[column].unique())==1:
test_data[column] = test_data[column].replace({'f' : 0})
test_data['Class'] = test_data['Class'].replace({'negative' : 0, 'compensated_hypothyroid' : 1,'primary_hypothyroid' :2,'secondary_hypothyroid':3})
test_data["Class"] = test_data["Class"].apply(lambda value : 1 if value >=1 else 0)
imputer=KNNImputer(n_neighbors=3, weights='uniform',missing_values=np.nan)
new_array=imputer.fit_transform(test_data)
test_impu_data=pd.DataFrame(data=np.round(new_array), columns=test_data.columns)
test_impu_data.to_csv(test_processed_path,index=False)
logger_object.log(file_object,'preprocessing was done Successful and Exited')
except Exception as e:
logger_object.log(file_object,'Exception occured in preprocessing . Exception message: '+str(e))
logger_object.log(file_object,'preprocessing Unsuccessful')
raise Exception()
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
data = preprocessing(config_path=parsed_args.config)
|
[
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.impute.KNNImputer",
"yaml.safe_load",
"logger.App_Logger",
"numpy.round"
] |
[((228, 240), 'logger.App_Logger', 'App_Logger', ([], {}), '()\n', (238, 240), False, 'from logger import App_Logger\n'), ((4068, 4093), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4091, 4093), False, 'import argparse\n'), ((332, 357), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (346, 357), False, 'import yaml\n'), ((1029, 1057), 'pandas.read_csv', 'pd.read_csv', (['train_data_path'], {}), '(train_data_path)\n', (1040, 1057), True, 'import pandas as pd\n'), ((1077, 1104), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (1088, 1104), True, 'import pandas as pd\n'), ((2064, 2131), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {'n_neighbors': '(3)', 'weights': '"""uniform"""', 'missing_values': 'np.nan'}), "(n_neighbors=3, weights='uniform', missing_values=np.nan)\n", (2074, 2131), False, 'from sklearn.impute import KNNImputer\n'), ((3425, 3492), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {'n_neighbors': '(3)', 'weights': '"""uniform"""', 'missing_values': 'np.nan'}), "(n_neighbors=3, weights='uniform', missing_values=np.nan)\n", (3435, 3492), False, 'from sklearn.impute import KNNImputer\n'), ((2227, 2246), 'numpy.round', 'np.round', (['new_array'], {}), '(new_array)\n', (2235, 2246), True, 'import numpy as np\n'), ((3586, 3605), 'numpy.round', 'np.round', (['new_array'], {}), '(new_array)\n', (3594, 3605), True, 'import numpy as np\n')]
|
import numpy as np
import csv as csv
from clean_data import clean_data
from join_columns import join_columns
from fix_decimals import add_int, cut_decimals
def preprocess_dataset():
preprocess_data('train', False)
preprocess_data('test', False)
preprocess_data('train', True)
preprocess_data('test', True)
def preprocess_data(data_name, encode_features):
name = data_name
raw = list()
with open("./data/raw_" + data_name + ".csv") as f:
raw_reader = csv.reader(f, delimiter=",")
for row in raw_reader:
raw.append(row)
raw = np.array(raw)
raw = clean_data(raw)
if encode_features:
raw = join_columns(raw, ["sanitario1", "sanitario2", "sanitario3", "sanitario5", "sanitario6"], ["c","c","c","c","o1"], "sanitario", [1,2,3,4], {"o1":"sanioth"})
raw = join_columns(raw, ["energcocinar1", "energcocinar2", "energcocinar3", "energcocinar4"], ["c","c","c","c"], "energcocinar", [1,4,2,3])
raw = join_columns(raw, ["elimbasu1", "elimbasu2", "elimbasu3", "elimbasu4", "elimbasu6"], ["c","c","c","c","o1"], "elimbasu", [4,3,2,1], {"o1":"elimoth"})
#raw = np.delete(raw, np.where(raw[0,:] == "elimbasu5")[0][0], axis=1) #this column has been removed inside the clean_data function since it has 0 mean and 0 variance
raw = join_columns(raw, ["epared1", "epared2", "epared3"], ["c","c","c"], "epared", [1,2,3])
raw = join_columns(raw, ["etecho1", "etecho2", "etecho3"], ["c","c","c"], "etecho", [1,2,3])
raw = join_columns(raw, ["eviv1", "eviv2", "eviv3"], ["c","c","c"], "eviv", [1,2,3])
raw = join_columns(raw, ["female", "male"], ["c","c"], "gender", [0,1])
raw = join_columns(raw, ["parentesco1", "parentesco2", "parentesco3", "parentesco4", "parentesco5", "parentesco6", "parentesco7", "parentesco8", "parentesco9", "parentesco10", "parentesco11", "parentesco12"], ["c","c","c","c","c","c","c","c","c","c","c","c"], "parentesco", [1,2,3,4,5,6,7,8,9,10,11,12])
raw = join_columns(raw, ["instlevel1", "instlevel2", "instlevel3", "instlevel4", "instlevel5", "instlevel6", "instlevel7", "instlevel8", "instlevel9"], ["c","c","c","c","c","c","c","c","c"], "instlevel", [1,2,3,4,5,6,7,8,9])
raw = join_columns(raw, ["tipovivi1", "tipovivi2", "tipovivi3", "tipovivi4", "tipovivi5"], ["c","c","c","c","o1"], "tipovivi", [1,2,3,4], {"o1":"tipooth"})
raw = join_columns(raw, ["area2", "area1"], ["c","c"], "area", [0,1])
name = name + '_enc'
raw = add_int(raw, 0)
raw = cut_decimals(raw, 2)
#saving new dataset
print('exporting ' + name + '.csv')
np.savetxt('./data/' + name + '.csv', raw, delimiter=';', fmt='%s')
|
[
"fix_decimals.cut_decimals",
"fix_decimals.add_int",
"numpy.array",
"numpy.savetxt",
"clean_data.clean_data",
"csv.reader",
"join_columns.join_columns"
] |
[((586, 599), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (594, 599), True, 'import numpy as np\n'), ((610, 625), 'clean_data.clean_data', 'clean_data', (['raw'], {}), '(raw)\n', (620, 625), False, 'from clean_data import clean_data\n'), ((2508, 2523), 'fix_decimals.add_int', 'add_int', (['raw', '(0)'], {}), '(raw, 0)\n', (2515, 2523), False, 'from fix_decimals import add_int, cut_decimals\n'), ((2534, 2554), 'fix_decimals.cut_decimals', 'cut_decimals', (['raw', '(2)'], {}), '(raw, 2)\n', (2546, 2554), False, 'from fix_decimals import add_int, cut_decimals\n'), ((2624, 2691), 'numpy.savetxt', 'np.savetxt', (["('./data/' + name + '.csv')", 'raw'], {'delimiter': '""";"""', 'fmt': '"""%s"""'}), "('./data/' + name + '.csv', raw, delimiter=';', fmt='%s')\n", (2634, 2691), True, 'import numpy as np\n'), ((488, 516), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (498, 516), True, 'import csv as csv\n'), ((664, 836), 'join_columns.join_columns', 'join_columns', (['raw', "['sanitario1', 'sanitario2', 'sanitario3', 'sanitario5', 'sanitario6']", "['c', 'c', 'c', 'c', 'o1']", '"""sanitario"""', '[1, 2, 3, 4]', "{'o1': 'sanioth'}"], {}), "(raw, ['sanitario1', 'sanitario2', 'sanitario3', 'sanitario5',\n 'sanitario6'], ['c', 'c', 'c', 'c', 'o1'], 'sanitario', [1, 2, 3, 4], {\n 'o1': 'sanioth'})\n", (676, 836), False, 'from join_columns import join_columns\n'), ((834, 977), 'join_columns.join_columns', 'join_columns', (['raw', "['energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4']", "['c', 'c', 'c', 'c']", '"""energcocinar"""', '[1, 4, 2, 3]'], {}), "(raw, ['energcocinar1', 'energcocinar2', 'energcocinar3',\n 'energcocinar4'], ['c', 'c', 'c', 'c'], 'energcocinar', [1, 4, 2, 3])\n", (846, 977), False, 'from join_columns import join_columns\n'), ((982, 1148), 'join_columns.join_columns', 'join_columns', (['raw', "['elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4', 'elimbasu6']", "['c', 'c', 'c', 'c', 'o1']", '"""elimbasu"""', '[4, 3, 2, 1]', "{'o1': 'elimoth'}"], {}), "(raw, ['elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',\n 'elimbasu6'], ['c', 'c', 'c', 'c', 'o1'], 'elimbasu', [4, 3, 2, 1], {\n 'o1': 'elimoth'})\n", (994, 1148), False, 'from join_columns import join_columns\n'), ((1321, 1415), 'join_columns.join_columns', 'join_columns', (['raw', "['epared1', 'epared2', 'epared3']", "['c', 'c', 'c']", '"""epared"""', '[1, 2, 3]'], {}), "(raw, ['epared1', 'epared2', 'epared3'], ['c', 'c', 'c'],\n 'epared', [1, 2, 3])\n", (1333, 1415), False, 'from join_columns import join_columns\n'), ((1422, 1516), 'join_columns.join_columns', 'join_columns', (['raw', "['etecho1', 'etecho2', 'etecho3']", "['c', 'c', 'c']", '"""etecho"""', '[1, 2, 3]'], {}), "(raw, ['etecho1', 'etecho2', 'etecho3'], ['c', 'c', 'c'],\n 'etecho', [1, 2, 3])\n", (1434, 1516), False, 'from join_columns import join_columns\n'), ((1523, 1609), 'join_columns.join_columns', 'join_columns', (['raw', "['eviv1', 'eviv2', 'eviv3']", "['c', 'c', 'c']", '"""eviv"""', '[1, 2, 3]'], {}), "(raw, ['eviv1', 'eviv2', 'eviv3'], ['c', 'c', 'c'], 'eviv', [1,\n 2, 3])\n", (1535, 1609), False, 'from join_columns import join_columns\n'), ((1616, 1683), 'join_columns.join_columns', 'join_columns', (['raw', "['female', 'male']", "['c', 'c']", '"""gender"""', '[0, 1]'], {}), "(raw, ['female', 'male'], ['c', 'c'], 'gender', [0, 1])\n", (1628, 1683), False, 'from join_columns import join_columns\n'), ((1696, 2031), 'join_columns.join_columns', 'join_columns', (['raw', "['parentesco1', 'parentesco2', 'parentesco3', 'parentesco4', 'parentesco5',\n 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9',\n 'parentesco10', 'parentesco11', 'parentesco12']", "['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c']", '"""parentesco"""', '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]'], {}), "(raw, ['parentesco1', 'parentesco2', 'parentesco3',\n 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7',\n 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',\n 'parentesco12'], ['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c',\n 'c'], 'parentesco', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n", (1708, 2031), False, 'from join_columns import join_columns\n'), ((2008, 2254), 'join_columns.join_columns', 'join_columns', (['raw', "['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5',\n 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9']", "['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c']", '"""instlevel"""', '[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), "(raw, ['instlevel1', 'instlevel2', 'instlevel3', 'instlevel4',\n 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9'],\n ['c', 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c'], 'instlevel', [1, 2, 3, 4,\n 5, 6, 7, 8, 9])\n", (2020, 2254), False, 'from join_columns import join_columns\n'), ((2241, 2407), 'join_columns.join_columns', 'join_columns', (['raw', "['tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5']", "['c', 'c', 'c', 'c', 'o1']", '"""tipovivi"""', '[1, 2, 3, 4]', "{'o1': 'tipooth'}"], {}), "(raw, ['tipovivi1', 'tipovivi2', 'tipovivi3', 'tipovivi4',\n 'tipovivi5'], ['c', 'c', 'c', 'c', 'o1'], 'tipovivi', [1, 2, 3, 4], {\n 'o1': 'tipooth'})\n", (2253, 2407), False, 'from join_columns import join_columns\n'), ((2405, 2470), 'join_columns.join_columns', 'join_columns', (['raw', "['area2', 'area1']", "['c', 'c']", '"""area"""', '[0, 1]'], {}), "(raw, ['area2', 'area1'], ['c', 'c'], 'area', [0, 1])\n", (2417, 2470), False, 'from join_columns import join_columns\n')]
|
import numpy as np
from napari_plugin_engine import napari_hook_implementation
from napari_tools_menu import register_function
from napari_time_slicer import time_slicer, slice_by_slice
import napari
from napari.types import ImageData, LabelsData
@napari_hook_implementation
def napari_experimental_provide_function():
return [
gaussian_blur,
threshold_otsu,
connected_component_labeling,
sobel_edge_detector,
binary_fill_holes,
seeded_watershed,
split_touching_objects,
euclidean_distance_map
]
@register_function(menu="Filtering / noise removal > Gaussian (n-mahotas)")
@time_slicer
def gaussian_blur(image:ImageData, sigma: float = 1, viewer: napari.Viewer = None) -> ImageData:
"""
Filters an image using a Gaussian kernel with a given sigma.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.gaussian_filter
"""
import mahotas as mh
return mh.gaussian_filter(image, sigma)
def _8bit(image):
return (image / image.max() * 255).astype(np.uint8)
@register_function(menu="Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)")
@time_slicer
def threshold_otsu(image:ImageData, viewer: napari.Viewer = None) -> LabelsData:
"""
Thresholds an image using Otsu's technique
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.otsu
"""
import mahotas as mh
image_8bit = _8bit(image)
t = mh.otsu(image_8bit)
return image_8bit > t
@register_function(menu="Segmentation / labeling > Connected component labeling (n-mahotas)")
@time_slicer
def connected_component_labeling(binary_image: LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Label connected regions in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.label
"""
labeled, nr_objects = mh.label(binary_image)
return labeled
@register_function(menu="Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)")
@time_slicer
def sobel_edge_detector(image:ImageData, viewer: napari.Viewer = None) -> ImageData:
"""
Enhances edges using a sobel operator
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.sobel
"""
import mahotas as mh
return mh.sobel(image, just_filter=True)
@register_function(menu="Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)")
@slice_by_slice
@time_slicer
def binary_fill_holes(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Fill holes in a binary image
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.close_holes
"""
import mahotas as mh
return mh.close_holes(binary_image)
@register_function(menu="Segmentation / labeling > Seeded watershed (n-mahotas)")
@time_slicer
def seeded_watershed(image:ImageData, labeled_seeds:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Labels all pixels in an image by flooding intensity valleys in a given image starting from labeled region seeds.
See also
--------
..[0] https://mahotas.readthedocs.io/en/latest/api.html#mahotas.cwatershed
"""
import mahotas as mh
labels = mh.cwatershed(image, labeled_seeds)
return labels
@register_function(menu="Measurement > Euclidean distance map (n-mahotas)")
@time_slicer
def euclidean_distance_map(binary_image:LabelsData, viewer: napari.Viewer = None) -> LabelsData:
"""
Draws a Euclidean distance map from a binary image. Non-zero values in th binary image will be
replaced by the distance to the next zero pixel.
See also
--------
..[0] https://en.wikipedia.org/wiki/Distance_transform
"""
import mahotas as mh
return mh.distance(binary_image)
def _sobel_3d(image):
from scipy import ndimage as ndi
kernel = np.asarray([
[
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], [
[0, 1, 0],
[1, -6, 1],
[0, 1, 0]
], [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
])
return ndi.convolve(image, kernel)
@register_function(menu="Segmentation post-processing > Split touching objects (n-mahotas)")
@time_slicer
def split_touching_objects(binary:LabelsData, sigma:float=3.5, viewer: napari.Viewer = None) -> LabelsData:
"""
Takes a binary image and draws cuts in the objects similar to the ImageJ watershed algorithm.
See also
--------
.. [0] https://imagej.nih.gov/ij/docs/menus/process.html#watershed
"""
import mahotas as mh
binary = _8bit(np.asarray(binary))
# typical way of using scikit-image watershed
distance = mh.distance(binary)
blurred_distance = mh.gaussian_filter(distance, sigma=sigma)
fp = np.ones((3,) * binary.ndim)
markers, num_labels = mh.label(mh.regmax(blurred_distance, Bc=fp))
labels = mh.cwatershed(-blurred_distance, markers)
# identify label-cutting edges
if len(binary.shape) == 2:
edges = mh.sobel(labels, just_filter=True)
edges2 = mh.sobel(binary, just_filter=True)
else: # assuming 3D
edges = _sobel_3d(labels)
edges2 = _sobel_3d(binary)
almost = np.logical_not(np.logical_xor(edges != 0, edges2 != 0)) * binary
return mh.open(almost) != 0
|
[
"mahotas.label",
"mahotas.distance",
"numpy.ones",
"mahotas.close_holes",
"numpy.asarray",
"mahotas.cwatershed",
"scipy.ndimage.convolve",
"numpy.logical_xor",
"mahotas.gaussian_filter",
"mahotas.regmax",
"napari_tools_menu.register_function",
"mahotas.open",
"mahotas.sobel",
"mahotas.otsu"
] |
[((574, 648), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Filtering / noise removal > Gaussian (n-mahotas)"""'}), "(menu='Filtering / noise removal > Gaussian (n-mahotas)')\n", (591, 648), False, 'from napari_tools_menu import register_function\n'), ((1099, 1198), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)"""'}), "(menu=\n 'Segmentation / binarization > Threshold (Otsu et al 1979, n-mahotas)')\n", (1116, 1198), False, 'from napari_tools_menu import register_function\n'), ((1562, 1659), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / labeling > Connected component labeling (n-mahotas)"""'}), "(menu=\n 'Segmentation / labeling > Connected component labeling (n-mahotas)')\n", (1579, 1659), False, 'from napari_tools_menu import register_function\n'), ((2005, 2120), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)"""'}), "(menu=\n 'Filtering / edge enhancement > Sobel edge detection (slice-by-slice, n-mahotas)'\n )\n", (2022, 2120), False, 'from napari_tools_menu import register_function\n'), ((2440, 2552), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)"""'}), "(menu=\n 'Segmentation post-processing > Binary fill holes (slice_by_slice, n-mahotas)'\n )\n", (2457, 2552), False, 'from napari_tools_menu import register_function\n'), ((2888, 2973), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation / labeling > Seeded watershed (n-mahotas)"""'}), "(menu='Segmentation / labeling > Seeded watershed (n-mahotas)'\n )\n", (2905, 2973), False, 'from napari_tools_menu import register_function\n'), ((3424, 3498), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Measurement > Euclidean distance map (n-mahotas)"""'}), "(menu='Measurement > Euclidean distance map (n-mahotas)')\n", (3441, 3498), False, 'from napari_tools_menu import register_function\n'), ((4312, 4408), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Segmentation post-processing > Split touching objects (n-mahotas)"""'}), "(menu=\n 'Segmentation post-processing > Split touching objects (n-mahotas)')\n", (4329, 4408), False, 'from napari_tools_menu import register_function\n'), ((987, 1019), 'mahotas.gaussian_filter', 'mh.gaussian_filter', (['image', 'sigma'], {}), '(image, sigma)\n', (1005, 1019), True, 'import mahotas as mh\n'), ((1514, 1533), 'mahotas.otsu', 'mh.otsu', (['image_8bit'], {}), '(image_8bit)\n', (1521, 1533), True, 'import mahotas as mh\n'), ((1961, 1983), 'mahotas.label', 'mh.label', (['binary_image'], {}), '(binary_image)\n', (1969, 1983), True, 'import mahotas as mh\n'), ((2404, 2437), 'mahotas.sobel', 'mh.sobel', (['image'], {'just_filter': '(True)'}), '(image, just_filter=True)\n', (2412, 2437), True, 'import mahotas as mh\n'), ((2856, 2884), 'mahotas.close_holes', 'mh.close_holes', (['binary_image'], {}), '(binary_image)\n', (2870, 2884), True, 'import mahotas as mh\n'), ((3368, 3403), 'mahotas.cwatershed', 'mh.cwatershed', (['image', 'labeled_seeds'], {}), '(image, labeled_seeds)\n', (3381, 3403), True, 'import mahotas as mh\n'), ((3899, 3924), 'mahotas.distance', 'mh.distance', (['binary_image'], {}), '(binary_image)\n', (3910, 3924), True, 'import mahotas as mh\n'), ((3999, 4122), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, -6, 1], [0, 1, 0]], [[0,\n 0, 0], [0, 1, 0], [0, 0, 0]]]'], {}), '([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, -6, 1], [0, \n 1, 0]], [[0, 0, 0], [0, 1, 0], [0, 0, 0]]])\n', (4009, 4122), True, 'import numpy as np\n'), ((4281, 4308), 'scipy.ndimage.convolve', 'ndi.convolve', (['image', 'kernel'], {}), '(image, kernel)\n', (4293, 4308), True, 'from scipy import ndimage as ndi\n'), ((4868, 4887), 'mahotas.distance', 'mh.distance', (['binary'], {}), '(binary)\n', (4879, 4887), True, 'import mahotas as mh\n'), ((4911, 4952), 'mahotas.gaussian_filter', 'mh.gaussian_filter', (['distance'], {'sigma': 'sigma'}), '(distance, sigma=sigma)\n', (4929, 4952), True, 'import mahotas as mh\n'), ((4962, 4989), 'numpy.ones', 'np.ones', (['((3,) * binary.ndim)'], {}), '((3,) * binary.ndim)\n', (4969, 4989), True, 'import numpy as np\n'), ((5074, 5115), 'mahotas.cwatershed', 'mh.cwatershed', (['(-blurred_distance)', 'markers'], {}), '(-blurred_distance, markers)\n', (5087, 5115), True, 'import mahotas as mh\n'), ((4782, 4800), 'numpy.asarray', 'np.asarray', (['binary'], {}), '(binary)\n', (4792, 4800), True, 'import numpy as np\n'), ((5025, 5059), 'mahotas.regmax', 'mh.regmax', (['blurred_distance'], {'Bc': 'fp'}), '(blurred_distance, Bc=fp)\n', (5034, 5059), True, 'import mahotas as mh\n'), ((5199, 5233), 'mahotas.sobel', 'mh.sobel', (['labels'], {'just_filter': '(True)'}), '(labels, just_filter=True)\n', (5207, 5233), True, 'import mahotas as mh\n'), ((5251, 5285), 'mahotas.sobel', 'mh.sobel', (['binary'], {'just_filter': '(True)'}), '(binary, just_filter=True)\n', (5259, 5285), True, 'import mahotas as mh\n'), ((5470, 5485), 'mahotas.open', 'mh.open', (['almost'], {}), '(almost)\n', (5477, 5485), True, 'import mahotas as mh\n'), ((5409, 5448), 'numpy.logical_xor', 'np.logical_xor', (['(edges != 0)', '(edges2 != 0)'], {}), '(edges != 0, edges2 != 0)\n', (5423, 5448), True, 'import numpy as np\n')]
|
from __future__ import print_function, division, absolute_import, unicode_literals
from numbers import Number
import numpy as np
from voluptuous import Schema, Required, Any, Range
from mitxgraders.comparers.baseclasses import CorrelatedComparer
from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero
from mitxgraders.helpers.validatorfuncs import text_string
from mitxgraders.exceptions import ConfigError
def get_linear_fit_error(x, y):
"""
Get total error in a linear regression y = ax + b between samples x and y.
If x is constant, returns the result of get_offset_fit_error(x, y).
Arguments:
x, y: flat numpy array
Usage
=====
Zero error in a linear relationship:
>>> x = np.array([2, 5, 8])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered linearly related
>>> x = np.array([1, 1, 1])
>>> result = get_linear_fit_error(x, 2*x + 1)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_linear_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack([x, np.ones(len(x))]).T
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
if rank == 1:
# The input values x are constant. Return the linear offset error.
return get_offset_fit_error(x, y)
return np.sqrt(residuals.item())
def get_proportional_fit_error(x, y):
"""
Get total error in a linear regression y = ax between samples x and y, with
zero constant term.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not proportional:
>>> x = np.array([2, 5, 8])
>>> result = get_proportional_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
0.76200...
Zero error in a proportional relationship:
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant and y is constant, they are considered proportional
>>> x = np.array([1, 1, 1])
>>> result = get_proportional_fit_error(x, 2*x)
>>> round(result, 6)
0.0
If x is constant but y is not, the error associated with the best fit of a constant is computed
>>> x = np.array([1, 1, 1])
>>> y = np.array([0, 1, 2])
>>> result = get_proportional_fit_error(x, y)
>>> result == np.sqrt(2)
True
"""
A = np.vstack(x)
coeffs, residuals, rank, singular_vals = np.linalg.lstsq(A, y, rcond=-1)
return np.sqrt(residuals.item())
def get_offset_fit_error(x, y):
"""
Get total error in a linear regression y = x + b between samples x and y,
with slope term equal to 1.
Arguments:
x, y: flat numpy array
Usage
=====
Reveals error if relationship is not constant-offset:
>>> x = np.array([2, 5, 8])
>>> result = get_offset_fit_error(x, 2*x + 1)
>>> result # doctest: +ELLIPSIS
4.242640...
Zero error in a constant-offset relationship:
>>> result = get_offset_fit_error(x, x + 5)
>>> round(result, 6)
0.0
"""
mean = np.mean(y - x)
return np.sqrt(sum(np.square(x + mean - y)))
def get_equals_fit_error(x, y):
"""
Get total error in the difference between two samples.
Arguments:
x, y: compatible numpy arrays
"""
return np.sqrt(sum(np.square(x - y)))
class LinearComparer(CorrelatedComparer):
"""
Used to check that there is an linear relationship between student's input
and the expected answer.
The general linear relationship is expected = a * student + b. The comparer
can check for four subtypes:
equals: (a, b) = (1, 0)
proportional: b = 0
offset: a = 1
linear: neither a nor b fixed
Configuration
=============
The first four configuration keys determine the amount of partial credit
given for a specific type of linear relationship. If set to None, the
relationship is not checked.
equals (None | number): defaults to 1.0
proportional (None | number): defaults to 0.5
offset (None | number): defaults to None
linear (None | number): defaults to None
The remaining configuration keys specify a feedback message to be given
in each case:
equals_msg (str): defaults to ''
proportional_msg (str): defaults to 'The submitted answer differs from
an expected answer by a constant factor.'
offset_msg (str): defaults to ''
linear_msg (str): defaults to ''
NOTE:
LinearComparer can be used with MatrixGrader, but the linear
relationship must be the same for all entries. Essentially, this means
we test for
expected_array = sclar_a * expected_array + scalar_b * ONES
where ONES is a matrix of all ones.
The ONES offset works as expected for vectors, but is probably not what
you want for matrices.
"""
schema_config = Schema({
Required('equals', default=1.0): Any(None, Range(0, 1)),
Required('proportional', default=0.5): Any(None, Range(0, 1)),
Required('offset', default=None): Any(None, Range(0, 1)),
Required('linear', default=None): Any(None, Range(0, 1)),
Required('equals_msg', default=''): text_string,
Required('proportional_msg', default=(
'The submitted answer differs from an expected answer by a '
'constant factor.'
)): text_string,
Required('offset_msg', default=''): text_string,
Required('linear_msg', default=''): text_string,
})
all_modes = ('equals', 'proportional', 'offset', 'linear')
zero_compatible_modes = ('equals', 'offset')
def __init__(self, config=None, **kwargs):
super(LinearComparer, self).__init__(config, **kwargs)
self.modes = tuple(mode for mode in self.all_modes if self.config[mode] is not None)
error_calculators = {
'equals': get_equals_fit_error,
'proportional': get_proportional_fit_error,
'offset': get_offset_fit_error,
'linear': get_linear_fit_error,
}
@staticmethod
def check_comparing_zero(comparer_params_evals, student_evals, tolerance):
"""
Check whether student input is nearly zero, or author input is exactly zero
"""
student_zero = all([
is_nearly_zero(x, tolerance, reference=y)
for x, y in zip(student_evals, comparer_params_evals)
])
expected_zero = all(np.all(x == 0.0) for [x] in comparer_params_evals)
return student_zero or expected_zero
def get_valid_modes(self, is_comparing_zero):
"""
Returns a copy of self.modes, first removing 'proportional' and 'linear'
when is_comparing_zero is truthy.
"""
if is_comparing_zero:
return tuple(mode for mode in self.modes
if mode in self.zero_compatible_modes)
return self.modes
def __call__(self, comparer_params_evals, student_evals, utils):
student_evals_norm = np.linalg.norm(student_evals)
# Validate student input shape...only needed for MatrixGrader
if hasattr(utils, 'validate_shape'):
# in numpy, scalars have empty tuples as their shapes
expected_0 = comparer_params_evals[0][0]
scalar_expected = isinstance(expected_0, Number)
shape = tuple() if scalar_expected else expected_0.shape
utils.validate_shape(student_evals[0], shape)
# Raise an error if there is less than 3 samples
if len(student_evals) < 3:
msg = 'Cannot perform linear comparison with less than 3 samples'
raise ConfigError(msg)
is_comparing_zero = self.check_comparing_zero(comparer_params_evals,
student_evals, utils.tolerance)
filtered_modes = self.get_valid_modes(is_comparing_zero)
# Get the result for each mode
# flatten in case individual evals are arrays (as in MatrixGrader)
student = np.array(student_evals).flatten()
expected = np.array(comparer_params_evals).flatten()
errors = [self.error_calculators[mode](student, expected) for mode in filtered_modes]
results = [
{'grade_decimal': self.config[mode], 'msg': self.config[mode+'_msg']}
if is_nearly_zero(error, utils.tolerance, reference=student_evals_norm)
else
{'grade_decimal': 0, 'msg': ''}
for mode, error in zip(filtered_modes, errors)
]
# Get the best result using max.
# For a list of pairs, max compares by 1st index and uses 2nd to break ties
key = lambda result: (result['grade_decimal'], result['msg'])
return max(results, key=key)
|
[
"numpy.mean",
"voluptuous.Required",
"mitxgraders.exceptions.ConfigError",
"numpy.all",
"numpy.square",
"numpy.array",
"numpy.vstack",
"numpy.linalg.lstsq",
"numpy.linalg.norm",
"mitxgraders.helpers.calc.mathfuncs.is_nearly_zero",
"voluptuous.Range"
] |
[((1370, 1401), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': '(-1)'}), '(A, y, rcond=-1)\n', (1385, 1401), True, 'import numpy as np\n'), ((2612, 2624), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2621, 2624), True, 'import numpy as np\n'), ((2670, 2701), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': '(-1)'}), '(A, y, rcond=-1)\n', (2685, 2701), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.mean', 'np.mean', (['(y - x)'], {}), '(y - x)\n', (3353, 3360), True, 'import numpy as np\n'), ((7327, 7356), 'numpy.linalg.norm', 'np.linalg.norm', (['student_evals'], {}), '(student_evals)\n', (7341, 7356), True, 'import numpy as np\n'), ((3384, 3407), 'numpy.square', 'np.square', (['(x + mean - y)'], {}), '(x + mean - y)\n', (3393, 3407), True, 'import numpy as np\n'), ((3594, 3610), 'numpy.square', 'np.square', (['(x - y)'], {}), '(x - y)\n', (3603, 3610), True, 'import numpy as np\n'), ((5231, 5262), 'voluptuous.Required', 'Required', (['"""equals"""'], {'default': '(1.0)'}), "('equals', default=1.0)\n", (5239, 5262), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5296, 5333), 'voluptuous.Required', 'Required', (['"""proportional"""'], {'default': '(0.5)'}), "('proportional', default=0.5)\n", (5304, 5333), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5367, 5399), 'voluptuous.Required', 'Required', (['"""offset"""'], {'default': 'None'}), "('offset', default=None)\n", (5375, 5399), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5433, 5465), 'voluptuous.Required', 'Required', (['"""linear"""'], {'default': 'None'}), "('linear', default=None)\n", (5441, 5465), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5499, 5533), 'voluptuous.Required', 'Required', (['"""equals_msg"""'], {'default': '""""""'}), "('equals_msg', default='')\n", (5507, 5533), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5556, 5680), 'voluptuous.Required', 'Required', (['"""proportional_msg"""'], {'default': '"""The submitted answer differs from an expected answer by a constant factor."""'}), "('proportional_msg', default=\n 'The submitted answer differs from an expected answer by a constant factor.'\n )\n", (5564, 5680), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5732, 5766), 'voluptuous.Required', 'Required', (['"""offset_msg"""'], {'default': '""""""'}), "('offset_msg', default='')\n", (5740, 5766), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5789, 5823), 'voluptuous.Required', 'Required', (['"""linear_msg"""'], {'default': '""""""'}), "('linear_msg', default='')\n", (5797, 5823), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((7969, 7985), 'mitxgraders.exceptions.ConfigError', 'ConfigError', (['msg'], {}), '(msg)\n', (7980, 7985), False, 'from mitxgraders.exceptions import ConfigError\n'), ((5274, 5285), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5279, 5285), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5345, 5356), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5350, 5356), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5411, 5422), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5416, 5422), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((5477, 5488), 'voluptuous.Range', 'Range', (['(0)', '(1)'], {}), '(0, 1)\n', (5482, 5488), False, 'from voluptuous import Schema, Required, Any, Range\n'), ((6614, 6655), 'mitxgraders.helpers.calc.mathfuncs.is_nearly_zero', 'is_nearly_zero', (['x', 'tolerance'], {'reference': 'y'}), '(x, tolerance, reference=y)\n', (6628, 6655), False, 'from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero\n'), ((6761, 6777), 'numpy.all', 'np.all', (['(x == 0.0)'], {}), '(x == 0.0)\n', (6767, 6777), True, 'import numpy as np\n'), ((8348, 8371), 'numpy.array', 'np.array', (['student_evals'], {}), '(student_evals)\n', (8356, 8371), True, 'import numpy as np\n'), ((8401, 8432), 'numpy.array', 'np.array', (['comparer_params_evals'], {}), '(comparer_params_evals)\n', (8409, 8432), True, 'import numpy as np\n'), ((8655, 8723), 'mitxgraders.helpers.calc.mathfuncs.is_nearly_zero', 'is_nearly_zero', (['error', 'utils.tolerance'], {'reference': 'student_evals_norm'}), '(error, utils.tolerance, reference=student_evals_norm)\n', (8669, 8723), False, 'from mitxgraders.helpers.calc.mathfuncs import is_nearly_zero\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import typing
import numpy as np
import jax.numpy as jnp
import xarray as xr
import seaborn as sns
from jax_cfd.data import xarray_utils as xru
import jax_cfd.base as cfd
from dynamical_system import Lorenz96, KolmogorovFlow
from util import jnp_to_aa_tuple, aa_tuple_to_jnp
plot_colors = {
'b': '#5A7D9F',
'r': '#c23b22',
'y': '#ffdb58',
}
def load_da_results(
filenames: list,
retained_variables: list,
retained_attrs: list,
) -> xr.Dataset:
"""
Loads data assimilations for analysis.
Args:
filenames: list of files that contain the four the computed setups.
retained_variables: variables to keep in the dataset for analysis.
retained_attrs: attributes to keep in the dataset for analysis.
Returns:
Data assimilation data for analysis.
"""
ds_list = []
initialization_coords = set()
optspace_coords = set()
# get all data and extract relevant variables
for fname in filenames:
data = xr.open_dataset(fname)
initialization_coords.add(data.attrs['da_init'])
optspace_coords.add(data.attrs['opt_space'])
ds_list.append(data[retained_variables])
initialization_coords = list(initialization_coords)
optspace_coords = list(optspace_coords)
# organize data in nested data structure
num_init = len(initialization_coords)
num_optspace = len(optspace_coords)
ds_grid = np.empty((num_init, num_optspace), dtype=object)
for ds in ds_list:
i = initialization_coords.index(ds.attrs['da_init'])
j = optspace_coords.index(ds.attrs['opt_space'])
ds.attrs = {attr: ds.attrs[attr] for attr in retained_attrs}
ds_grid[i][j] = ds
ds = (
xr.combine_nested(
ds_grid.tolist(),
concat_dim=['init', 'opt_space'],
combine_attrs='identical',
)
.assign_coords(
{'init': initialization_coords, 'opt_space':optspace_coords},
)
)
return ds
def compute_vorticity(ds: xr.Dataset, grid: cfd.grids.Grid) -> xr.Dataset:
"""
Computes vorticity of a dataset containing Kolmogorov flow trajectories.
Args:
ds: dataset conntaining variables with with Kolmogorov flow trajectories.
grid: grid over which to compute vorticity.
Returns:
Vorticity of the Kolmogorov flow trajectories.
"""
coords = xru.construct_coords(grid)
ds = ds.assign_coords(coords)
dy = ds.y[1] - ds.y[0]
dx = ds.x[1] - ds.x[0]
dv_dx = (ds.sel(v=1).roll(x=-1, roll_coords=False) - ds.sel(v=1)) / dx
du_dy = (ds.sel(v=0).roll(y=-1, roll_coords=False) - ds.sel(v=0)) / dy
return (dv_dx - du_dy)
def integrate_kolmogorov_xr(
dyn_sys: KolmogorovFlow,
X0_da: xr.DataArray,
n_steps: int,
) -> xr.DataArray:
"""
Integrates Kolmogorov flow from and to an `xarray.DataArray`.
Args:
dyn_sys: Kolmogorov flow dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0 = jnp.asarray(X0_da.data)
batch_dimensions = X0.shape[:-3]
state_dimensions = X0.shape[-3:]
final_shape = batch_dimensions + (n_steps,) + state_dimensions
X0_flat = X0.reshape((-1,) + X0.shape[-3:])
X = dyn_sys.batch_integrate(X0_flat, n_steps, None, True).reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-3, 't')
X_da = xr.DataArray(X, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_kolmogorov(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for Kolmogorov flow.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(
X - X.sel(data_type=comparison_var)
).sum(dim=['x', 'y']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def integrate_lorenz96_xr(
dyn_sys: Lorenz96,
X0_da: xr.DataArray,
n_steps: int,
) -> xr. DataArray:
"""
Integrates the Lorenz96 model from and to an `xarray.DataArray`.
Args:
dyn_sys: Lorenz96 dynamical system.
X0_da: initial states.
n_steps: number of integration steps.
Returns:
Integrated trajectories.
"""
X0_jnp = X0_da.data
grid_size = X0_jnp.shape[-1]
batch_dimensions = X0_jnp.shape[:-1]
final_shape = list(batch_dimensions) + [n_steps, grid_size]
X0_jnp_flat = X0_jnp.reshape(-1, grid_size)
X_jnp_flat = dyn_sys.batch_integrate(X0_jnp_flat, n_steps)
X_jnp = X_jnp_flat.reshape(final_shape)
dims = list(X0_da.dims)
dims.insert(-1, 't')
X_da = xr.DataArray(X_jnp, dims=dims, coords=X0_da.coords)
return X_da
def compute_l1_error_lorenz96(
X: xr.Dataset,
comparison_var: str,
scale: float = 1,
) -> xr.Dataset:
"""
Computes the scaled L1 error for the Lorenz96 model.
Args:
X: data to compute L1 error of.
comparison_var: base variable to compute deviation from.
scale: error scale.
Returns:
Scaled L1 error.
"""
data_types = list(X.data_type.values)
data_types.remove(comparison_var)
l1_error = np.abs(X - X.sel(data_type=comparison_var)).sum(dim=['x']) / scale
return l1_error.sel(data_type=data_types, drop=True)
def adjust_row_labels(g: sns.FacetGrid, labels: list):
"""
Adjust row `labels` of a seaborn FaceGrid object `g`.
"""
for ax in g.axes.flat:
if ax.texts:
# ylabel text on the right side
txt = ax.texts[0]
ax.text(txt.get_unitless_position()[0], txt.get_unitless_position()[1],
labels.pop(0),
transform=ax.transAxes,
va='center',
rotation=-90)
# remove original text
ax.texts[0].remove()
|
[
"jax_cfd.data.xarray_utils.construct_coords",
"jax.numpy.asarray",
"numpy.empty",
"xarray.DataArray",
"xarray.open_dataset"
] |
[((2013, 2061), 'numpy.empty', 'np.empty', (['(num_init, num_optspace)'], {'dtype': 'object'}), '((num_init, num_optspace), dtype=object)\n', (2021, 2061), True, 'import numpy as np\n'), ((2931, 2957), 'jax_cfd.data.xarray_utils.construct_coords', 'xru.construct_coords', (['grid'], {}), '(grid)\n', (2951, 2957), True, 'from jax_cfd.data import xarray_utils as xru\n'), ((3591, 3614), 'jax.numpy.asarray', 'jnp.asarray', (['X0_da.data'], {}), '(X0_da.data)\n', (3602, 3614), True, 'import jax.numpy as jnp\n'), ((3935, 3982), 'xarray.DataArray', 'xr.DataArray', (['X'], {'dims': 'dims', 'coords': 'X0_da.coords'}), '(X, dims=dims, coords=X0_da.coords)\n', (3947, 3982), True, 'import xarray as xr\n'), ((5302, 5353), 'xarray.DataArray', 'xr.DataArray', (['X_jnp'], {'dims': 'dims', 'coords': 'X0_da.coords'}), '(X_jnp, dims=dims, coords=X0_da.coords)\n', (5314, 5353), True, 'import xarray as xr\n'), ((1614, 1636), 'xarray.open_dataset', 'xr.open_dataset', (['fname'], {}), '(fname)\n', (1629, 1636), True, 'import xarray as xr\n')]
|
# -*- coding: utf-8 -*-
from numpy import log2
from pickle import load
"""
* Clase que se encarga de ver la información mutua que hay entre dos tokens
* sirve para determinar si es colocación o no
"""
class MI:
def __init__(self):
self.words = load(open("./models/words.d",'r'))
self.ngrams = load(open("./models/ngrams.d","r"))
self.count = self.count()
def count(self):
cnt = 0
for i in self.words:
cnt += self.words[i]
return cnt
def eval(self,str1,str2):
try:
sup = float(self.ngrams[str1+"_"+str2])/float(self.count)
inf = float(self.words[str1]) * float(self.words[str2])
if inf <= 0 or sup <= 0:
return 0
else:
inf = inf/(float(self.count)*float(self.count))
return log2(sup/inf)
except:
return 0
|
[
"numpy.log2"
] |
[((722, 737), 'numpy.log2', 'log2', (['(sup / inf)'], {}), '(sup / inf)\n', (726, 737), False, 'from numpy import log2\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file predict_bw_lstm1.py
# @author <NAME> (<NAME> <<EMAIL>>
# @date 2019-04-22
# 2022-03-23 - updated for TensorFlow version 2.6
#
# @brief Predict channel bandwidth.
#
# @remarks This code is based on the nice sample code from:
# https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
# import modules
import numpy as np
import tensorflow as tf
import tensorflow.keras # required for TF ver. 2.6
from skimage.util import view_as_windows
# define dataset
bws = np.load('bandwidths.npy')
X = view_as_windows(bws, 3, step=1)[:-1] # 3-sample sliding window over bws (except the last one, i.e., '[:-1]')
y = bws[3:]
# reshape from [samples, timesteps] into [samples, timesteps, features]
X = X.reshape((X.shape[0], X.shape[1], 1))
# define model
model = tf.keras.Sequential()
# model.add(tf.keras.layers.LSTM(units=50, activation='relu', input_shape=(3, 1)))
model.add(tf.keras.layers.LSTM(units=50, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=1000, verbose=0)
# demonstrate prediction
for i in range(10):
x_input = X[i]
x_input = x_input.reshape((1, 3, 1))
yhat = model.predict(x_input, verbose=0)
print(f"{','.join([str(int(i)) for i in x_input.flatten()])} -> {yhat.flatten()[0]:.2e} (true value: {int(y[i]):d})")
|
[
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"numpy.load",
"skimage.util.view_as_windows"
] |
[((594, 619), 'numpy.load', 'np.load', (['"""bandwidths.npy"""'], {}), "('bandwidths.npy')\n", (601, 619), True, 'import numpy as np\n'), ((885, 906), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (904, 906), True, 'import tensorflow as tf\n'), ((624, 655), 'skimage.util.view_as_windows', 'view_as_windows', (['bws', '(3)'], {'step': '(1)'}), '(bws, 3, step=1)\n', (639, 655), False, 'from skimage.util import view_as_windows\n'), ((1000, 1049), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', ([], {'units': '(50)', 'activation': '"""relu"""'}), "(units=50, activation='relu')\n", (1020, 1049), True, 'import tensorflow as tf\n'), ((1061, 1085), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1082, 1085), True, 'import tensorflow as tf\n')]
|
# author: <NAME>, <NAME>
# title: occasionally trivial support functions for aggregating data for python 2/3 [only numpy as dependency]
# NOTE: these functions are generally tested meant for 1D although they may apply or be easily extended to nd
# license: 3-clause BSD
import numpy as np
flat_max = np.max
flat_min = np.min
flat_percentile = np.percentile
flat_mean = np.average
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def flat_abs_minimum(data, preserve_sign=True):
"""
Function to return the absolute minimum value in an array. Note that, by default, this function will
reserve the sign.
For example, if an array contains [-100, -24, 1, 2] then the function will return 1 because that value
has the smallest magnitude. If an array contained [-100, -50, -2, -1] the the function would return -1
because that value has the smallest magnitude; however, the sign would preserved (by default).
Removing the sign preservation basically makes this function a composite of abs and min.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: smallest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmin(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset]
def partition_top(data, n, return_indices=False):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:param return_indices: whether to return the indices array
:return: top n values if n < data.size or all values if n is None, <=0 or >= data.size, also index array if `return_indices`
"""
data = np.asarray(data)
if n is None or n <= 0 or n >= data.size:
return data
n = min(data.size, n) - 1
idx = np.argpartition(data, n)[:n]
result = data[idx]
if return_indices:
return result, idx
return result
def flat_top_average(data, n):
"""
Function to return the average of the top n values in an array
:param data: data array source
:param n: the number of values of interest (n)
:return: average of top n values if n < data.size or average of data if n > data.size
"""
return np.average(partition_top(data, n, return_indices=False))
|
[
"numpy.abs",
"numpy.argpartition",
"numpy.asarray",
"numpy.argmax",
"numpy.argmin"
] |
[((1057, 1073), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1067, 1073), True, 'import numpy as np\n'), ((1089, 1101), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1095, 1101), True, 'import numpy as np\n'), ((2024, 2040), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2034, 2040), True, 'import numpy as np\n'), ((2056, 2068), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2062, 2068), True, 'import numpy as np\n'), ((2621, 2637), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2631, 2637), True, 'import numpy as np\n'), ((1132, 1151), 'numpy.argmax', 'np.argmax', (['abs_data'], {}), '(abs_data)\n', (1141, 1151), True, 'import numpy as np\n'), ((2099, 2118), 'numpy.argmin', 'np.argmin', (['abs_data'], {}), '(abs_data)\n', (2108, 2118), True, 'import numpy as np\n'), ((2744, 2768), 'numpy.argpartition', 'np.argpartition', (['data', 'n'], {}), '(data, n)\n', (2759, 2768), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('begineer_tutorial')
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
#self.image_pub = rospy.Publisher("image_topic_2",Image,queue_size=10)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_color",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#cv2.imshow('cv_image', cv_image)
image = cv2.cvtColor(cv_image , cv2.COLOR_BGR2HSV)
lower_range = np.array([30,150,50])
upper_range = np.array([255,255,180])
mask = cv2.inRange(image , lower_range, upper_range)
res = cv2.bitwise_and(cv_image, cv_image, mask=mask)
cv2.imshow("Image window", res)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
[
"rospy.init_node",
"cv2.inRange",
"cv2.bitwise_and",
"roslib.load_manifest",
"cv_bridge.CvBridge",
"numpy.array",
"cv2.imshow",
"cv2.destroyAllWindows",
"rospy.spin",
"cv2.cvtColor",
"rospy.Subscriber"
] |
[((75, 116), 'roslib.load_manifest', 'roslib.load_manifest', (['"""begineer_tutorial"""'], {}), "('begineer_tutorial')\n", (95, 116), False, 'import roslib\n'), ((1035, 1085), 'rospy.init_node', 'rospy.init_node', (['"""image_converter"""'], {'anonymous': '(True)'}), "('image_converter', anonymous=True)\n", (1050, 1085), False, 'import rospy\n'), ((1167, 1190), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1188, 1190), False, 'import cv2\n'), ((423, 433), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (431, 433), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((455, 520), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/camera/rgb/image_color"""', 'Image', 'self.callback'], {}), "('/camera/rgb/image_color', Image, self.callback)\n", (471, 520), False, 'import rospy\n'), ((715, 756), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2HSV'], {}), '(cv_image, cv2.COLOR_BGR2HSV)\n', (727, 756), False, 'import cv2\n'), ((776, 799), 'numpy.array', 'np.array', (['[30, 150, 50]'], {}), '([30, 150, 50])\n', (784, 799), True, 'import numpy as np\n'), ((816, 841), 'numpy.array', 'np.array', (['[255, 255, 180]'], {}), '([255, 255, 180])\n', (824, 841), True, 'import numpy as np\n'), ((851, 895), 'cv2.inRange', 'cv2.inRange', (['image', 'lower_range', 'upper_range'], {}), '(image, lower_range, upper_range)\n', (862, 895), False, 'import cv2\n'), ((907, 953), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cv_image', 'cv_image'], {'mask': 'mask'}), '(cv_image, cv_image, mask=mask)\n', (922, 953), False, 'import cv2\n'), ((958, 989), 'cv2.imshow', 'cv2.imshow', (['"""Image window"""', 'res'], {}), "('Image window', res)\n", (968, 989), False, 'import cv2\n'), ((1097, 1109), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1107, 1109), False, 'import rospy\n')]
|
import numpy
import numbers
import math
import struct
from six.moves import zip
from .. import SetIntersectionIndexBase, SearchResults, EmptySearchResults
def _check_numpy ():
missing = []
for fn in ("zeros", "empty", "digitize", "resize", "concatenate", "unique", "bincount", "argsort"):
if not getattr (numpy, fn, False):
missing.append (fn)
if missing:
raise ImportError ("setix.backends.numpy: required functions not provided by installed numpy: " + ", ".join(missing))
_check_numpy ()
class SetIntersectionIndex (SetIntersectionIndexBase):
def __init__ (self,
max_sets=2**32,
max_symbols=2**16,
init_bucket_size=16,
support_most_frequent=True,
support_find_similar=True):
self._sets = numpy.empty (64, dtype="object")
self._num_sets = 0
self._symbols = []
self._index = {}
self._sets_by_sig = {}
self._init_bs = init_bucket_size
self._packers = {}
self._support_most_frequent = bool (support_most_frequent)
self._support_find_similar = bool (support_find_similar)
if not isinstance (max_sets, numbers.Number):
raise TypeError ("max_sets")
if not isinstance (max_symbols, numbers.Number):
raise TypeError ("max_symbols")
if not isinstance (init_bucket_size, numbers.Number):
raise TypeError ("init_bucket_size")
if max_sets < 1 or max_sets >= 2**64:
raise ValueError ("max_sets")
if max_symbols < 1 or max_symbols >= 2**64:
raise ValueError ("max_sets")
if init_bucket_size < 4:
raise ValueError ("init_bucket_size")
set_bits = int (round (math.log (max_sets, 2)))
symbol_bits = int (round (math.log (max_symbols, 2)))
sz = (9, 17, 33, 65)
dt = (numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64)
sf = ("B", "H", "I", "L")
x = numpy.digitize([set_bits], sz)[0]
self._dtype_sets = dt[x]
self._max_sets = 2 ** (sz[x]-1)
x = numpy.digitize([symbol_bits], sz)[0]
self._dtype_symbols = dt[x]
self._max_symbols = 2 ** (sz[x]-1)
self._struct_symbols = sf[x]
if support_find_similar:
self._set_sizes = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_symbols)
if support_most_frequent:
self._symbol_counts = numpy.zeros (8 * init_bucket_size, dtype=self._dtype_sets)
@property
def symbol_count (self):
return len (self._symbols)
@property
def set_count (self):
return self._num_sets
@property
def symbols (self):
return tuple (self._symbols)
@property
def payloads (self):
for s in self._sets:
for pl in s:
yield pl
@property
def supports_most_frequent (self):
return self._support_most_frequent
@property
def supports_find_similar (self):
return self._support_find_similar
@property
def max_sets (self):
return self._max_sets
@property
def max_symbols (self):
return self._max_symbols
def __getstate__ (self):
state = dict (self.__dict__)
del state["_packers"]
return state
def __setstate__ (self, state):
self.__dict__ = state
state["_packers"] = {}
def add (self, iterable, payload=SetIntersectionIndexBase._SENTINEL):
if payload is self._SENTINEL:
payload = iterable
max_sets = self._max_sets
max_symbols = self._max_symbols
init_bs = self._init_bs
symbols = self._symbols
index = self._index
buckets = [] # list of per-symbol buckets this set belongs in
sig = set() # set of symbol ids for identifying the set
num_syms = len (symbols)
for symbol in iterable:
bucket = index.get (symbol)
if bucket is None:
# register new symbol
id = len (symbols)
if id >= max_symbols:
raise RuntimeError ("index full: maximum number of symbols reached")
bucket = index[symbol] = [id, 0, numpy.zeros (init_bs, dtype=self._dtype_sets)]
symbols.append (symbol)
buckets.append (bucket)
sig.add (bucket[0])
sig = sorted (sig)
# packed signature used as a key in self._sets
# this saves memory compared to a tuple of ints
lsig = len (sig)
packer = self._packers[lsig] = self._packers.get(lsig) or struct.Struct(self._struct_symbols * lsig).pack
ssig = packer (*sig)
S = self._sets_by_sig.get (ssig)
if S is None:
# register new set
sid = self._num_sets
if sid >= max_sets:
raise RuntimeError ("index full: maximum number of sets reached")
self._num_sets += 1
sets = self._sets
if sid >= sets.size:
sets = self._sets = numpy.resize (sets, int(sid * 1.25))
S = self._sets_by_sig[ssig] = []
sets[sid] = S
if self._support_find_similar:
if self._set_sizes.size <= sid:
self._set_sizes = numpy.resize (self._set_sizes, int(sid * 1.25))
self._set_sizes[sid] = len (buckets)
# add set to per-symbol buckets
for bucket in buckets:
arr = bucket[2]
idx = bucket[1]
if arr.size <= idx:
arr = bucket[2] = numpy.resize (arr, int(idx * 1.25))
arr[idx] = sid
bucket[1] += 1
if self._support_most_frequent:
# update counts of symbol occurrences
symbol_counts = self._symbol_counts
new_syms = len (symbols)
if new_syms > num_syms and new_syms >= symbol_counts.size:
self._symbol_counts = symbol_counts = numpy.resize (symbol_counts, int(new_syms * 1.25))
symbol_counts[num_syms:new_syms] = 0
if len (sig) == len (buckets): #no repetitions
symbol_counts[ numpy.array (sig, dtype=self._dtype_symbols) ] += 1
else:
for bucket in buckets:
symbol_counts[bucket[0]] += 1
S.append (payload)
def _find (self, iterable):
buckets = []
sig = set()
occurrences = []
L = 0
for symbol in iterable:
L += 1
bucket = self._index.get (symbol)
if bucket is not None:
buckets.append (bucket)
sig.add (bucket[0])
if bucket[1]:
occurrences.append (bucket[2][0:bucket[1]])
if occurrences:
sids, indices = numpy.unique (numpy.concatenate (occurrences), return_inverse=True)
counts = numpy.bincount (indices)
return L, sids, indices, counts
else:
return L, [], [], []
class SearchResults (SearchResults):
def __init__ (self, sids, scores, sets):
self._sids = sids
self._scores = scores
self._sets = sets
self._sort = None
self._list = None
self._list_for = None
def get (self, max_results=None):
scores = self._scores
sort = self._sort = self._sort or numpy.argsort (scores)
if max_results is not None:
sort = sort[-max_results:]
sort = sort[::-1]
r_sids = self._sids[sort]
r_counts = scores[sort]
return zip (r_counts, self._sets[r_sids])
def __len__ (self):
return self._scores.size
def find (self, iterable, threshold=1, max_results=None):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold < 1 and threshold >= 0:
raise ValueError ("threshold")
L, sids, indices, counts = self._find (iterable)
if threshold < 0:
threshold = L + threshold
if threshold < 1:
raise ValueError ("threshold")
if len (counts) == 0:
return EmptySearchResults ()
mask = counts >= threshold
counts = counts[mask]
sids = sids[mask]
return self.SearchResults (sids, counts, self._sets)
def find_similar (self, iterable, threshold=0.3):
if not isinstance (threshold, numbers.Number):
raise TypeError ("threshold")
if threshold > 1 or not (threshold > 0):
raise ValueError ("threshold")
if not self._support_find_similar:
raise RuntimeError ("find_similar support disabled")
L, sids, indices, counts = self._find (iterable)
if len (counts) == 0:
return EmptySearchResults ()
smls = counts / (self._set_sizes[sids] + (L * 1.0) - counts)
mask = smls >= threshold
smls = smls[mask]
sids = sids[mask]
return self.SearchResults (sids, smls, self._sets)
def most_frequent (self, threshold=2.0/3.0, max_results=None, with_counts=False):
if not self._support_most_frequent:
raise RuntimeError ("most_frequent support disabled")
counts = self._symbol_counts
if self._num_sets == 0:
return
sort = numpy.argsort (counts[0:len(self._symbols)])
limit = counts[sort[-1]] * 1.0 * threshold
symbols = self._symbols
if max_results:
sort = sort[-max_results:]
if with_counts:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield (symbols[x], count)
else:
for x in sort[::-1]:
count = counts[x]
if count < limit:
break
yield symbols[x]
|
[
"numpy.digitize",
"math.log",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"struct.Struct",
"numpy.bincount",
"six.moves.zip"
] |
[((844, 875), 'numpy.empty', 'numpy.empty', (['(64)'], {'dtype': '"""object"""'}), "(64, dtype='object')\n", (855, 875), False, 'import numpy\n'), ((2111, 2141), 'numpy.digitize', 'numpy.digitize', (['[set_bits]', 'sz'], {}), '([set_bits], sz)\n', (2125, 2141), False, 'import numpy\n'), ((2239, 2272), 'numpy.digitize', 'numpy.digitize', (['[symbol_bits]', 'sz'], {}), '([symbol_bits], sz)\n', (2253, 2272), False, 'import numpy\n'), ((2464, 2524), 'numpy.zeros', 'numpy.zeros', (['(8 * init_bucket_size)'], {'dtype': 'self._dtype_symbols'}), '(8 * init_bucket_size, dtype=self._dtype_symbols)\n', (2475, 2524), False, 'import numpy\n'), ((2603, 2660), 'numpy.zeros', 'numpy.zeros', (['(8 * init_bucket_size)'], {'dtype': 'self._dtype_sets'}), '(8 * init_bucket_size, dtype=self._dtype_sets)\n', (2614, 2660), False, 'import numpy\n'), ((7443, 7466), 'numpy.bincount', 'numpy.bincount', (['indices'], {}), '(indices)\n', (7457, 7466), False, 'import numpy\n'), ((8254, 8287), 'six.moves.zip', 'zip', (['r_counts', 'self._sets[r_sids]'], {}), '(r_counts, self._sets[r_sids])\n', (8257, 8287), False, 'from six.moves import zip\n'), ((1862, 1883), 'math.log', 'math.log', (['max_sets', '(2)'], {}), '(max_sets, 2)\n', (1870, 1883), False, 'import math\n'), ((1921, 1945), 'math.log', 'math.log', (['max_symbols', '(2)'], {}), '(max_symbols, 2)\n', (1929, 1945), False, 'import math\n'), ((4952, 4994), 'struct.Struct', 'struct.Struct', (['(self._struct_symbols * lsig)'], {}), '(self._struct_symbols * lsig)\n', (4965, 4994), False, 'import struct\n'), ((7368, 7398), 'numpy.concatenate', 'numpy.concatenate', (['occurrences'], {}), '(occurrences)\n', (7385, 7398), False, 'import numpy\n'), ((7986, 8007), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (7999, 8007), False, 'import numpy\n'), ((4537, 4581), 'numpy.zeros', 'numpy.zeros', (['init_bs'], {'dtype': 'self._dtype_sets'}), '(init_bs, dtype=self._dtype_sets)\n', (4548, 4581), False, 'import numpy\n'), ((6670, 6713), 'numpy.array', 'numpy.array', (['sig'], {'dtype': 'self._dtype_symbols'}), '(sig, dtype=self._dtype_symbols)\n', (6681, 6713), False, 'import numpy\n')]
|
import numpy as np
from myutils import *
from easydict import EasyDict as edict
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def measure_rec_quality(path_data):
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
metrics_names = ["ndcg", "hr", "recall", "precision"]
metrics = edict()
for metric in metrics_names:
metrics[metric] = {"Overall": []}
for values in attribute_list.values():
if len(attribute_list) == 1: break
attribute_to_name = values[1]
for _, name in attribute_to_name.items():
metrics[metric][name] = []
topk_matches = path_data.uid_topk
test_labels = path_data.test_labels
test_user_idxs = list(test_labels.keys())
invalid_users = []
for uid in test_user_idxs:
if uid not in topk_matches: continue
if len(topk_matches[uid]) < 10:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid], test_labels[uid]
if len(pred_list) == 0:
continue
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
# Based on attribute
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
metrics["ndcg"][attr_name].append(ndcg)
metrics["recall"][attr_name].append(recall)
metrics["precision"][attr_name].append(precision)
metrics["hr"][attr_name].append(hit)
metrics["ndcg"]["Overall"].append(ndcg)
metrics["recall"]["Overall"].append(recall)
metrics["precision"]["Overall"].append(precision)
metrics["hr"]["Overall"].append(hit)
return metrics
def print_rec_metrics(dataset_name, flags, metrics):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Recommandation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metrics.items():
print("{}: {:.3f}".format(metric, np.array(values["Overall"]).mean()), end=" | ")
print("")
for attribute_category, values in attribute_list.items():
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in values[1].values():
print("{} group".format(attribute), end=" ")
for metric_name, groups_values in metrics.items():
print("{}: {:.3f}".format(metric_name, np.array(groups_values[attribute]).mean()), end=" | ")
print("")
print("\n")
"""
Explanation metrics
"""
def topk_ETV(path_data):
dataset_name = path_data.dataset_name
def simpson_index(topk):
n_path_for_patterns = {k: 0 for k in set(PATH_TYPES[dataset_name])}
N = 0
for path in topk:
path = path
path_type = get_path_type(path)
if path_type == 'self_loop':
path_type = 'described_as'
n_path_for_patterns[path_type] += 1
N += 1
numerator = 0
for path_type, n_path_type_ith in n_path_for_patterns.items():
numerator += n_path_type_ith * (n_path_type_ith - 1)
# N = 0
# for item_path in pred_uv_paths.items():
# N += len(item_path[1])
if N * (N - 1) == 0:
return 0
return 1 - (numerator / (N * (N - 1)))
ETVs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
ETV = simpson_index([path_data.uid_pid_explanation[uid][pid] for pid in topk])
ETVs[uid] = ETV
return ETVs
def avg_ETV(path_data):
uid_ETVs = topk_ETV(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETV = {}
groups_ETV_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETV_scores[attribute_label] = []
if "Overall" not in groups_ETV_scores:
groups_ETV_scores["Overall"] = []
for uid, ETV in uid_ETVs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue # Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETV_scores[attr_name].append(ETV)
groups_ETV_scores["Overall"].append(ETV)
for attribute_label, group_scores in groups_ETV_scores.items():
avg_groups_ETV[attribute_label] = np.array(group_scores).mean()
explanation_type_variety = edict(
avg_groups_ETV=avg_groups_ETV,
groups_ETV_scores=groups_ETV_scores
)
return explanation_type_variety
def avg_LID(path_data):
uid_LIDs = topk_LID(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_LID = {}
groups_LID_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LID_scores[attribute_label] = []
if "Overall" not in groups_LID_scores:
groups_LID_scores["Overall"] = []
for uid, LID in uid_LIDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LID_scores[attr_name].append(LID)
groups_LID_scores["Overall"].append(LID)
for attribute_label, group_scores in groups_LID_scores.items():
avg_groups_LID[attribute_label] = np.array(group_scores).mean()
linked_interaction_diversity_results = edict(
avg_groups_LID=avg_groups_LID,
groups_LID_scores=groups_LID_scores
)
return linked_interaction_diversity_results
def topk_LID(path_data):
LIDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_linked_interaction = set()
count = 0
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
li = get_linked_interaction_id(current_path)
if current_path[1][0] == "mention":
li += 10000 #pad in order to not make them overlap, this is a stupid workaround, fix it
unique_linked_interaction.add(li)
if len(topk) == 0 or len(unique_linked_interaction) == 0:
count += 1
LID = len(unique_linked_interaction) / len(topk)
LIDs[uid] = LID
print(count)
return LIDs
def avg_SED(path_data):
uid_SEDs = topk_SED(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_SED = {}
groups_SED_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SED_scores[attribute_label] = []
if "Overall" not in groups_SED_scores:
groups_SED_scores["Overall"] = []
for uid, SED in uid_SEDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SED_scores[attr_name].append(SED)
groups_SED_scores["Overall"].append(SED)
for attribute_label, group_scores in groups_SED_scores.items():
avg_groups_SED[attribute_label] = np.array(group_scores).mean()
shared_entity_diversity_results = edict(
avg_groups_SED=avg_groups_SED,
groups_SED_scores=groups_SED_scores
)
return shared_entity_diversity_results
def topk_SED(path_data):
SEDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_shared_entities = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
se = get_shared_entity_id(current_path)
unique_shared_entities.add(se)
if len(topk) > 0:
SED = len(unique_shared_entities) / len(topk)
else:
SED = 1
SEDs[uid] = SED
return SEDs
def topk_ETD(path_data):
ETDs = {}
for uid, topk in path_data.uid_topk.items():
if uid not in path_data.test_labels: continue
unique_path_types = set()
for pid in topk:
if pid not in path_data.uid_pid_explanation[uid]:
continue
current_path = path_data.uid_pid_explanation[uid][pid]
path_type = get_path_type(current_path)
unique_path_types.add(path_type)
ETD = len(unique_path_types) / TOTAL_PATH_TYPES[path_data.dataset_name]
ETDs[uid] = ETD
return ETDs
def get_attribute_list(dataset_name, flags):
attribute_list = {}
for attribute, flag in flags.items():
if flag and DATASET_SENSIBLE_ATTRIBUTE_MATRIX[dataset_name][attribute]:
attribute_list[attribute] = []
for attribute in attribute_list.keys():
if attribute == "Gender":
user2attribute, attribute2name = get_kg_uid_to_gender_map(dataset_name)
elif attribute == "Age":
user2attribute, attribute2name = get_kg_uid_to_age_map(dataset_name)
elif attribute == "Occupation":
user2attribute, attribute2name = get_kg_uid_to_occupation_map(dataset_name)
elif attribute == "Country":
pass #implement country
else:
print("Unknown attribute")
attribute_list[attribute] = [user2attribute, attribute2name]
return attribute_list
def avg_ETD(path_data):
uid_ETDs = topk_ETD(path_data)
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
avg_groups_ETD = {}
groups_ETD_scores = {}
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_ETD_scores[attribute_label] = []
if "Overall" not in groups_ETD_scores:
groups_ETD_scores["Overall"] = []
for uid, ETD in uid_ETDs.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_ETD_scores[attr_name].append(ETD)
groups_ETD_scores["Overall"].append(ETD)
for attribute_label, group_scores in groups_ETD_scores.items():
avg_groups_ETD[attribute_label] = np.array(group_scores).mean()
diversity_results = edict(
avg_groups_ETD=avg_groups_ETD,
groups_ETD_scores=groups_ETD_scores
)
return diversity_results
#Extract the value of LIR for the given user item path from the LIR_matrix
def LIR_single(path_data, path):
uid = int(path[0][-1])
if uid not in path_data.uid_timestamp or uid not in path_data.LIR_matrix or len(path_data.uid_timestamp[uid]) <= 1: return 0. #Should not enter there
predicted_path = path
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
#Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "watched" or linked_interaction_type == "listened" or linked_interaction_type == "purchase":
LIR = path_data.LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
return LIR
# Returns a dict where to every uid is associated a value of LIR calculated based on his topk
def topk_LIR(path_data):
LIR_topk = {}
# Precompute user timestamps weigths
LIR_matrix = path_data.LIR_matrix
for uid in path_data.test_labels.keys(): #modified for pgpr labels
LIR_single_topk = []
if uid not in LIR_matrix or uid not in path_data.uid_topk:
continue
for pid in path_data.uid_topk[uid]:
predicted_path = path_data.uid_pid_explanation[uid][pid]
linked_interaction = int(get_interaction_id(predicted_path))
linked_interaction_type = get_interaction_type(predicted_path)
# Handle the case of Amazon Dataset where a path may have different interaction types
if linked_interaction_type == "mentions":
LIR = path_data.LIR_matrix_words[uid][linked_interaction]
elif linked_interaction_type == "purchase" or linked_interaction_type == "watched" or linked_interaction_type == "listened":
LIR = LIR_matrix[uid][linked_interaction]
else:
LIR = 0.
LIR_single_topk.append(LIR)
LIR_topk[uid] = np.array(LIR_single_topk).mean() if len(LIR_single_topk) != 0 else 0
return LIR_topk
# Returns an avg value for the LIR of a given group
def avg_LIR(path_data):
uid_LIR_score = topk_LIR(path_data)
avg_groups_LIR = {}
groups_LIR_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
#Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_LIR_scores[attribute_label] = []
if "Overall" not in groups_LIR_scores:
groups_LIR_scores["Overall"] = []
for uid, LIR_score in uid_LIR_score.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_LIR_scores[attr_name].append(LIR_score)
groups_LIR_scores["Overall"].append(LIR_score)
for attribute_label, group_scores in groups_LIR_scores.items():
avg_groups_LIR[attribute_label] = np.array(group_scores).mean()
LIR = edict(
avg_groups_LIR=avg_groups_LIR,
groups_LIR_scores=groups_LIR_scores,
)
return LIR
#Extract the value of SEP for the given user item path from the SEP_matrix
def SEP_single(path_data, path):
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = path_data.SEP_matrix[related_entity_type][related_entity_id]
return SEP
def topks_SEP(path_data):
SEP_topk = {}
# Precompute entity distribution
exp_serendipity_matrix = path_data.SEP_matrix
#Measure explanation serendipity for topk
for uid in path_data.test_labels:
SEP_single_topk = []
if uid not in path_data.uid_topk: continue
for pid in path_data.uid_topk[uid]:
if pid not in path_data.uid_pid_explanation[uid]:
#print("strano 2")
continue
path = path_data.uid_pid_explanation[uid][pid]
related_entity_type, related_entity_id = get_shared_entity(path)
SEP = exp_serendipity_matrix[related_entity_type][related_entity_id]
SEP_single_topk.append(SEP)
if len(SEP_single_topk) == 0: continue
SEP_topk[uid] = np.array(SEP_single_topk).mean()
return SEP_topk
def avg_SEP(path_data):
uid_SEP = topks_SEP(path_data)
avg_groups_SEP = {}
groups_SEP_scores = {}
# Evaluate only the attributes that have been chosen and are avaiable in the chosen dataset
flags = path_data.sens_attribute_flags
attribute_list = get_attribute_list(path_data.dataset_name, flags)
# Initialize group scores with empty list
for attribute in attribute_list.keys():
for _, attribute_label in attribute_list[attribute][1].items():
groups_SEP_scores[attribute_label] = []
if "Overall" not in groups_SEP_scores:
groups_SEP_scores["Overall"] = []
for uid, SEP_score in uid_SEP.items():
for attribute in attribute_list.keys():
if uid not in attribute_list[attribute][0]: continue
attr_value = attribute_list[attribute][0][uid]
if attr_value not in attribute_list[attribute][1]: continue #Few users may have the attribute missing (LASTFM)
attr_name = attribute_list[attribute][1][attr_value]
groups_SEP_scores[attr_name].append(SEP_score)
groups_SEP_scores["Overall"].append(SEP_score)
for attribute_label, group_scores in groups_SEP_scores.items():
avg_groups_SEP[attribute_label] = np.array(group_scores).mean()
serendipity_results = edict(
avg_groups_SEP=avg_groups_SEP,
groups_SEP_scores=groups_SEP_scores,
)
return serendipity_results
def print_expquality_metrics(dataset_name, flags, metric_values):
attribute_list = get_attribute_list(dataset_name, flags)
print("\n---Explanation Quality---")
print("Average for the entire user base:", end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values["Overall"]), end= " | ")
print("")
for attribute_category, values in attribute_list.items():
attributes = values[1].values()
print("\n-Statistic with user grouped by {} attribute".format(attribute_category))
for attribute in attributes:
print("{} group".format(attribute), end=" ")
for metric, values in metric_values.items():
print("{}: {:.3f}".format(metric, values[attribute]), end=" | ")
print("")
|
[
"numpy.asfarray",
"easydict.EasyDict",
"numpy.array",
"numpy.arange"
] |
[((920, 927), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (925, 927), True, 'from easydict import EasyDict as edict\n'), ((6127, 6200), 'easydict.EasyDict', 'edict', ([], {'avg_groups_ETV': 'avg_groups_ETV', 'groups_ETV_scores': 'groups_ETV_scores'}), '(avg_groups_ETV=avg_groups_ETV, groups_ETV_scores=groups_ETV_scores)\n', (6132, 6200), True, 'from easydict import EasyDict as edict\n'), ((7570, 7643), 'easydict.EasyDict', 'edict', ([], {'avg_groups_LID': 'avg_groups_LID', 'groups_LID_scores': 'groups_LID_scores'}), '(avg_groups_LID=avg_groups_LID, groups_LID_scores=groups_LID_scores)\n', (7575, 7643), True, 'from easydict import EasyDict as edict\n'), ((9860, 9933), 'easydict.EasyDict', 'edict', ([], {'avg_groups_SED': 'avg_groups_SED', 'groups_SED_scores': 'groups_SED_scores'}), '(avg_groups_SED=avg_groups_SED, groups_SED_scores=groups_SED_scores)\n', (9865, 9933), True, 'from easydict import EasyDict as edict\n'), ((13339, 13412), 'easydict.EasyDict', 'edict', ([], {'avg_groups_ETD': 'avg_groups_ETD', 'groups_ETD_scores': 'groups_ETD_scores'}), '(avg_groups_ETD=avg_groups_ETD, groups_ETD_scores=groups_ETD_scores)\n', (13344, 13412), True, 'from easydict import EasyDict as edict\n'), ((16994, 17067), 'easydict.EasyDict', 'edict', ([], {'avg_groups_LIR': 'avg_groups_LIR', 'groups_LIR_scores': 'groups_LIR_scores'}), '(avg_groups_LIR=avg_groups_LIR, groups_LIR_scores=groups_LIR_scores)\n', (16999, 17067), True, 'from easydict import EasyDict as edict\n'), ((19527, 19600), 'easydict.EasyDict', 'edict', ([], {'avg_groups_SEP': 'avg_groups_SEP', 'groups_SEP_scores': 'groups_SEP_scores'}), '(avg_groups_SEP=avg_groups_SEP, groups_SEP_scores=groups_SEP_scores)\n', (19532, 19600), True, 'from easydict import EasyDict as edict\n'), ((119, 133), 'numpy.asfarray', 'np.asfarray', (['r'], {}), '(r)\n', (130, 133), True, 'import numpy as np\n'), ((6065, 6087), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (6073, 6087), True, 'import numpy as np\n'), ((7495, 7517), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (7503, 7517), True, 'import numpy as np\n'), ((9790, 9812), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (9798, 9812), True, 'import numpy as np\n'), ((13283, 13305), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (13291, 13305), True, 'import numpy as np\n'), ((16953, 16975), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (16961, 16975), True, 'import numpy as np\n'), ((18164, 18189), 'numpy.array', 'np.array', (['SEP_single_topk'], {}), '(SEP_single_topk)\n', (18172, 18189), True, 'import numpy as np\n'), ((19470, 19492), 'numpy.array', 'np.array', (['group_scores'], {}), '(group_scores)\n', (19478, 19492), True, 'import numpy as np\n'), ((15547, 15572), 'numpy.array', 'np.array', (['LIR_single_topk'], {}), '(LIR_single_topk)\n', (15555, 15572), True, 'import numpy as np\n'), ((3234, 3261), 'numpy.array', 'np.array', (["values['Overall']"], {}), "(values['Overall'])\n", (3242, 3261), True, 'import numpy as np\n'), ((226, 250), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 1)'], {}), '(2, r.size + 1)\n', (235, 250), True, 'import numpy as np\n'), ((317, 341), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 2)'], {}), '(2, r.size + 2)\n', (326, 341), True, 'import numpy as np\n'), ((3670, 3704), 'numpy.array', 'np.array', (['groups_values[attribute]'], {}), '(groups_values[attribute])\n', (3678, 3704), True, 'import numpy as np\n')]
|
import numpy as np
#Simulater Setting
#------------------------------
MINUTES=60000000000
TIMESTEP = np.timedelta64(10*MINUTES)
PICKUPTIMEWINDOW = np.timedelta64(10*MINUTES)
#It can enable the neighbor car search system to determine the search range according to the set search distance and the size of the grid.
#It use dfs to find the nearest idle vehicles in the area.
NeighborCanServer = False
#You can adjust the size of the experimental area by entering latitude and longitude.
#The order, road network and grid division will be adaptive. Adjust to fit selected area
FocusOnLocalRegion = False
LocalRegionBound = (104.035,104.105,30.625,30.695)
if FocusOnLocalRegion == False:
LocalRegionBound = (104.011, 104.125, 30.618, 30.703)
#Input parameters
VehiclesNumber = 6000
SideLengthMeter = 800
VehiclesServiceMeter = 800
DispatchMode = "Simulation"
DemandPredictionMode = "None"
#["TransportationClustering","KmeansClustering","SpectralClustering"]
ClusterMode = "Grid"
|
[
"numpy.timedelta64"
] |
[((102, 130), 'numpy.timedelta64', 'np.timedelta64', (['(10 * MINUTES)'], {}), '(10 * MINUTES)\n', (116, 130), True, 'import numpy as np\n'), ((148, 176), 'numpy.timedelta64', 'np.timedelta64', (['(10 * MINUTES)'], {}), '(10 * MINUTES)\n', (162, 176), True, 'import numpy as np\n')]
|
import subprocess
from hop import Stream
from hop.auth import Auth
from hop import auth
from hop.io import StartPosition
from hop.models import GCNCircular
import argparse
import random
import threading
import time
from functools import wraps
import datetime
import numpy
import uuid
from dotenv import load_dotenv
import os
from unittest.mock import Mock
import unittest
from mongoengine import connect, disconnect
# from hypothesis import given
# from hypothesis.strategies import lists, integers
# from hop.apps.SNalert import model as M
# from hop.apps.SNalert import decider
# from hop.apps.SNalert import db_storage
# from . import demo
# from .. import test_anything
test_locations = ["Houston", "New York", "Boston", "Not Texas"]
# load environment variables
load_dotenv(dotenv_path='./../.env')
# for measuring function execution time
# https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
print("Function %s called %d times. " % (fname, data[0]))
print('Execution time max: %.3f, average: %.3f' % (max_time, avg_time))
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
def exponentialDistribution(mean):
"""
Produce exponential distribution data.
:param mean: Mean of exponential distribution.
:return:
"""
return numpy.random.exponential(mean)
class integrationTest(object):
# @given(
# timeout=integers(min_value=1),
# mean=integers(min_value=1),
# totalTime=integers(min_value=1)
# )
def __init__(self, timeout, mean, totalTime):
"""
The constructor.
:param timeout: Time expiration parameter
:param mean:
:param totalTime:
"""
self.count = 0
self.topic = os.getenv("OBSERVATION_TOPIC")
self.mean = mean
self.totalTime = totalTime
# self.minTime = min
# self.maxTime = max
self.timeOut = timeout
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
def run(self):
"""
Run the model for the integration test.
:return: none
"""
t1 = threading.Thread(target=self.readNumMsg, args=(self.topic,))
t1.start()
m = subprocess.Popen(['python3',
'../hop/apps/SNalert/model.py',
'--f',
'./../config.env',
'--no-auth'
])
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# randomTime = random.randint(self.minTime, self.maxTime)
randomTime = exponentialDistribution(self.mean)
start2 = time.monotonic()
while True:
if time.monotonic() - start2 > randomTime:
break
# write message with current time
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
# newFileName = self.writeMessage(now)
stream = Stream(auth=self.auth)
with stream.open(os.getenv("TESTING_TOPIC"), "w") as s:
s.write(self.writeMessage(now))
m.kill()
def readNumMsg(self, topic):
"""
Read the number of alert messages.
:param topic:
:param configFilePath:
:return:
"""
# gcnFormat = "json"
stream = Stream(persist=True, auth=self.auth)
# print("===")
# print(topic)
with stream.open(topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
print("====")
# if gcn_dict['header']['subject'] == "TEST":
# self.count += 1
self.count += 1
def getCount(self):
return self.count
def writeMessage(self, time):
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = "Test Detector"
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = time
msg["header"]["NEUTRINO TIME"] = time
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Observation"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing purposes."
return msg
# def functionalTest():
#
# pass
class latencyTest(object):
def __init__(self, topic, numDetector=50, time=3000):
"""
The constructor.
"""
self.numMsgPublished = 0
self.numMsgReceived = 0
self.totalLatency = 0
self.numDetector = numDetector
self.detectorThreads = {}
self.countMsg = {}
self.totalTime = time
self.topic = topic
self.auth = Auth(os.getenv("USERNAME"), os.getenv("PASSWORD"), method=auth.SASLMethod.PLAIN)
self.idsWritten = set()
self.idsReceived = set()
self.lock = threading.Lock()
def oneDetectorThread(self, uuid):
# lock = threading.Lock()
print(uuid)
# print(timeout)
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < self.totalTime:
# print(time.monotonic() - startTime)
# print(self.totalTime)
# msg = self.writeMessage(uuid)
stream = Stream(auth=self.auth)
with stream.open(self.topic, "w") as s:
msg = self.writeMessage(uuid)
s.write(msg)
with self.lock:
self.numMsgPublished += 1
self.idsWritten.add(msg["header"]["MESSAGE ID"])
# def countWrittenMsgThread(self):
def runTest(self):
"""
Run the latency test.
:return:
"""
# create the topic if doesn't exist
stream = Stream(auth=self.auth)
# with stream.open(self.topic, "w") as s:
# s.write({"TEST": "TEST"})
# first run the thread that logs every message received
logThread = threading.Thread(target=self.logMsgs)
logThread.start()
# wait a few seconds
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 10:
foo = 1
for i in range(self.numDetector):
# print(i)
id = uuid.uuid4()
# print(id)
t = threading.Thread(target=self.oneDetectorThread, args=(str(id),))
# self.oneDetectorThread(id)
self.detectorThreads[id] = t
t.start()
# # first run the thread that logs every message received
# logThread = threading.Thread(target=self.logMsgs)
# logThread.start()
def countMsgThread(self, msg_dict):
"""
A single thread for process the message received for Latency test.
:param msg_dict:
:return:
"""
# msg_dict = msg.asdict()['content']
id = msg_dict['header']['DETECTOR']
msg_id = msg_dict["header"]["MESSAGE ID"]
receivedTime = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
sentTime = msg_dict['header']['MESSAGE SENT TIME']
timeDiff = datetime.datetime.strptime(receivedTime, os.getenv("TIME_STRING_FORMAT")) - datetime.datetime.strptime(sentTime, os.getenv("TIME_STRING_FORMAT"))
timeDiff_inSeconds = timeDiff.total_seconds()
# print("HERE")
with self.lock:
# print("____")
self.numMsgReceived += 1
self.totalLatency += timeDiff_inSeconds
self.idsReceived.add(msg_id)
def logMsgs(self):
# stream = Stream(persist=True, auth=self.auth, start_at=StartPosition.EARLIEST)
stream = Stream(persist=True, auth=self.auth)
with stream.open(self.topic, "r") as s:
for msg in s: # set timeout=0 so it doesn't stop listening to the topic
t = threading.Thread(target=self.countMsgThread, args=(msg.asdict()['content'],))
t.start()
def calculateAvgLatency(self):
"""
Calculate the latency.
:return:
"""
return self.totalLatency * 1.0 / self.numMsgReceived
def writeMessage(self, detector_id):
"""
Return a dictionary of the message in the required format.
:param uuid:
:return:
"""
now = datetime.datetime.utcnow().strftime(os.getenv("TIME_STRING_FORMAT"))
msg = {}
msg["header"] = {}
msg["header"]["MESSAGE ID"] = str(uuid.uuid4())
msg["header"]["DETECTOR"] = detector_id
msg["header"]["SUBJECT"] = "Test"
msg["header"]["MESSAGE SENT TIME"] = now
msg["header"]["NEUTRINO TIME"] = now
msg["header"]["LOCATION"] = test_locations[random.randint(0, 3)]
msg["header"]["P VALUE"] = "0.5"
msg["header"]["STATUS"] = "On"
msg["header"]["MESSAGE TYPE"] = "Latency Testing"
msg["header"]["FROM"] = "<NAME> <<EMAIL>>"
msg["body"] = "This is an alert message generated at run time for testing message latency."
return msg
def check(self):
assert self.numMsgReceived == self.numMsgPublished
if __name__ == '__main__':
print("Latency Test")
print("----------------------------------------")
print("Integration Test #1")
test = latencyTest("kafka://dev.hop.scimma.org:9092/snews-latencyTest", 5, 50)
print(test.totalTime)
test.runTest()
print("------")
startTime = time.monotonic()
# randomly publish messages
while time.monotonic() - startTime < 100:
foo = 1
# print(time.monotonic() - startTime)
print(test.calculateAvgLatency())
print(" %d messages written." % test.numMsgPublished)
print(" %d messages received and read." % test.numMsgReceived)
# print(" %d messages written." % len(test.idsWritten))
# print(" %d messages received and read." % len(test.idsReceived))
# print(" %d messages read in written." % len(test.idsReceived.intersection(test.idsWritten)))
assert test.numMsgPublished == test.numMsgReceived
|
[
"os.getenv",
"datetime.datetime.utcnow",
"subprocess.Popen",
"time.monotonic",
"threading.Lock",
"numpy.random.exponential",
"functools.wraps",
"dotenv.load_dotenv",
"hop.Stream",
"uuid.uuid4",
"threading.Thread",
"time.time",
"random.randint"
] |
[((774, 810), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': '"""./../.env"""'}), "(dotenv_path='./../.env')\n", (785, 810), False, 'from dotenv import load_dotenv\n'), ((981, 990), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (986, 990), False, 'from functools import wraps\n'), ((1905, 1935), 'numpy.random.exponential', 'numpy.random.exponential', (['mean'], {}), '(mean)\n', (1929, 1935), False, 'import numpy\n'), ((10437, 10453), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (10451, 10453), False, 'import time\n'), ((1053, 1064), 'time.time', 'time.time', ([], {}), '()\n', (1062, 1064), False, 'import time\n'), ((2352, 2382), 'os.getenv', 'os.getenv', (['"""OBSERVATION_TOPIC"""'], {}), "('OBSERVATION_TOPIC')\n", (2361, 2382), False, 'import os\n'), ((2761, 2821), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.readNumMsg', 'args': '(self.topic,)'}), '(target=self.readNumMsg, args=(self.topic,))\n', (2777, 2821), False, 'import threading\n'), ((2854, 2958), 'subprocess.Popen', 'subprocess.Popen', (["['python3', '../hop/apps/SNalert/model.py', '--f', './../config.env',\n '--no-auth']"], {}), "(['python3', '../hop/apps/SNalert/model.py', '--f',\n './../config.env', '--no-auth'])\n", (2870, 2958), False, 'import subprocess\n'), ((3127, 3143), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3141, 3143), False, 'import time\n'), ((4097, 4133), 'hop.Stream', 'Stream', ([], {'persist': '(True)', 'auth': 'self.auth'}), '(persist=True, auth=self.auth)\n', (4103, 4133), False, 'from hop import Stream\n'), ((5831, 5847), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5845, 5847), False, 'import threading\n'), ((5987, 6003), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6001, 6003), False, 'import time\n'), ((6745, 6767), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (6751, 6767), False, 'from hop import Stream\n'), ((6943, 6980), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.logMsgs'}), '(target=self.logMsgs)\n', (6959, 6980), False, 'import threading\n'), ((7057, 7073), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (7071, 7073), False, 'import time\n'), ((8666, 8702), 'hop.Stream', 'Stream', ([], {'persist': '(True)', 'auth': 'self.auth'}), '(persist=True, auth=self.auth)\n', (8672, 8702), False, 'from hop import Stream\n'), ((1124, 1135), 'time.time', 'time.time', ([], {}), '()\n', (1133, 1135), False, 'import time\n'), ((2558, 2579), 'os.getenv', 'os.getenv', (['"""USERNAME"""'], {}), "('USERNAME')\n", (2567, 2579), False, 'import os\n'), ((2581, 2602), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (2590, 2602), False, 'import os\n'), ((3392, 3408), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3406, 3408), False, 'import time\n'), ((3723, 3745), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (3729, 3745), False, 'from hop import Stream\n'), ((4642, 4654), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4652, 4654), False, 'import uuid\n'), ((4897, 4917), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (4911, 4917), False, 'import random\n'), ((5669, 5690), 'os.getenv', 'os.getenv', (['"""USERNAME"""'], {}), "('USERNAME')\n", (5678, 5690), False, 'import os\n'), ((5692, 5713), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (5701, 5713), False, 'import os\n'), ((6252, 6274), 'hop.Stream', 'Stream', ([], {'auth': 'self.auth'}), '(auth=self.auth)\n', (6258, 6274), False, 'from hop import Stream\n'), ((7262, 7274), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7272, 7274), False, 'import uuid\n'), ((8019, 8050), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8028, 8050), False, 'import os\n'), ((9350, 9381), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (9359, 9381), False, 'import os\n'), ((9469, 9481), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9479, 9481), False, 'import uuid\n'), ((9718, 9738), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (9732, 9738), False, 'import random\n'), ((10496, 10512), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (10510, 10512), False, 'import time\n'), ((3194, 3210), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3208, 3210), False, 'import time\n'), ((3618, 3649), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (3627, 3649), False, 'import os\n'), ((6054, 6070), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6068, 6070), False, 'import time\n'), ((7124, 7140), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (7138, 7140), False, 'import time\n'), ((7983, 8009), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (8007, 8009), False, 'import datetime\n'), ((8171, 8202), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8180, 8202), False, 'import os\n'), ((8243, 8274), 'os.getenv', 'os.getenv', (['"""TIME_STRING_FORMAT"""'], {}), "('TIME_STRING_FORMAT')\n", (8252, 8274), False, 'import os\n'), ((9314, 9340), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9338, 9340), False, 'import datetime\n'), ((3582, 3608), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3606, 3608), False, 'import datetime\n'), ((3775, 3801), 'os.getenv', 'os.getenv', (['"""TESTING_TOPIC"""'], {}), "('TESTING_TOPIC')\n", (3784, 3801), False, 'import os\n'), ((3452, 3468), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3466, 3468), False, 'import time\n')]
|
from ConfigSpace import ConfigurationSpace, CategoricalHyperparameter
import time
import warnings
import os
import numpy as np
import pickle as pkl
from sklearn.metrics.scorer import balanced_accuracy_scorer
from solnml.utils.logging_utils import get_logger
from solnml.components.evaluators.base_evaluator import _BaseEvaluator
from solnml.components.evaluators.evaluate_func import validation
from solnml.components.feature_engineering.task_space import get_task_hyperparameter_space
from solnml.components.feature_engineering.parse import parse_config, construct_node
from solnml.components.utils.topk_saver import CombinedTopKModelSaver
from solnml.components.utils.class_loader import get_combined_candidtates
from solnml.components.models.regression import _regressors, _addons
from solnml.components.utils.constants import *
def get_estimator(config, estimator_id):
regressor_type = estimator_id
config_ = config.copy()
config_['%s:random_state' % regressor_type] = 1
hpo_config = dict()
for key in config_:
key_name = key.split(':')[0]
if regressor_type == key_name:
act_key = key.split(':')[1]
hpo_config[act_key] = config_[key]
_candidates = get_combined_candidtates(_regressors, _addons)
estimator = _candidates[regressor_type](**hpo_config)
if hasattr(estimator, 'n_jobs'):
setattr(estimator, 'n_jobs', 1)
return regressor_type, estimator
def get_hpo_cs(estimator_id, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if estimator_id in _candidates:
rgs_class = _candidates[estimator_id]
else:
raise ValueError("Algorithm %s not supported!" % estimator_id)
cs = rgs_class.get_hyperparameter_search_space()
return cs
def get_cash_cs(include_algorithms=None, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if include_algorithms is not None:
_candidates = set(include_algorithms).intersection(set(_candidates.keys()))
if len(_candidates) == 0:
raise ValueError("No algorithms included! Please check the spelling of the included algorithms!")
cs = ConfigurationSpace()
algo = CategoricalHyperparameter('algorithm', list(_candidates))
cs.add_hyperparameter(algo)
for estimator_id in _candidates:
estimator_cs = get_hpo_cs(estimator_id)
parent_hyperparameter = {'parent': algo,
'value': estimator_id}
cs.add_configuration_space(estimator_id, estimator_cs, parent_hyperparameter=parent_hyperparameter)
return cs
def get_fe_cs(task_type=REGRESSION, include_image=False, include_text=False, include_preprocessors=None):
cs = get_task_hyperparameter_space(task_type=task_type, include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
return cs
def get_combined_cs(task_type=REGRESSION, include_image=False, include_text=False,
include_preprocessors=None):
cash_cs = get_cash_cs(task_type)
fe_cs = get_fe_cs(task_type,
include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
for hp in fe_cs.get_hyperparameters():
cash_cs.add_hyperparameter(hp)
for cond in fe_cs.get_conditions():
cash_cs.add_condition(cond)
for bid in fe_cs.get_forbiddens():
cash_cs.add_forbidden_clause(bid)
return cash_cs
class RegressionEvaluator(_BaseEvaluator):
def __init__(self, fixed_config=None, scorer=None, data_node=None, task_type=REGRESSION, resampling_strategy='cv',
resampling_params=None, timestamp=None, output_dir=None, seed=1):
self.resampling_strategy = resampling_strategy
self.resampling_params = resampling_params
self.fixed_config = fixed_config
self.scorer = scorer if scorer is not None else balanced_accuracy_scorer
self.task_type = task_type
self.data_node = data_node
self.output_dir = output_dir
self.seed = seed
self.onehot_encoder = None
self.logger = get_logger(self.__module__ + "." + self.__class__.__name__)
self.continue_training = False
self.train_node = data_node.copy_()
self.val_node = data_node.copy_()
self.timestamp = timestamp
def __call__(self, config, **kwargs):
start_time = time.time()
return_dict = dict()
self.seed = 1
downsample_ratio = kwargs.get('resource_ratio', 1.0)
# Convert Configuration into dictionary
if not isinstance(config, dict):
config = config.get_dictionary().copy()
else:
config = config.copy()
if self.fixed_config is not None:
config.update(self.fixed_config)
self.estimator_id = config['algorithm']
if 'holdout' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regression gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score):
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
elif 'cv' in self.resampling_strategy:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if 'cv' in self.resampling_strategy:
if self.resampling_params is None or 'folds' not in self.resampling_params:
folds = 5
else:
folds = self.resampling_params['folds']
from sklearn.model_selection import KFold
kfold = KFold(n_splits=folds, random_state=self.seed, shuffle=False)
scores = list()
for train_index, test_index in kfold.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
_score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
scores.append(_score)
score = np.mean(scores)
elif 'partial' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
if downsample_ratio != 1:
down_ss = ShuffleSplit(n_splits=1, test_size=downsample_ratio,
random_state=self.seed)
for _, _val_index in down_ss.split(_x_train, _y_train):
_act_x_train, _act_y_train = _x_train[_val_index], _y_train[_val_index]
else:
_act_x_train, _act_y_train = _x_train, _y_train
_val_index = list(range(len(_x_train)))
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# Regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _act_x_train, _act_y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score) and downsample_ratio == 1:
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
else:
raise ValueError('Invalid resampling strategy: %s!' % self.resampling_strategy)
try:
self.logger.info('Evaluation<%s> | Score: %.4f | Time cost: %.2f seconds | Shape: %s' %
(regressor_id,
self.scorer._sign * score,
time.time() - start_time, _x_train.shape))
except:
pass
# Turn it into a minimization problem.
return_dict['objective_value'] = -score
return -score
|
[
"solnml.components.feature_engineering.task_space.get_task_hyperparameter_space",
"os.path.exists",
"solnml.components.feature_engineering.parse.parse_config",
"numpy.mean",
"pickle.dump",
"warnings.catch_warnings",
"pickle.load",
"sklearn.model_selection.ShuffleSplit",
"solnml.components.utils.class_loader.get_combined_candidtates",
"solnml.utils.logging_utils.get_logger",
"numpy.isfinite",
"solnml.components.evaluators.evaluate_func.validation",
"solnml.components.utils.topk_saver.CombinedTopKModelSaver.get_path_by_config",
"solnml.components.feature_engineering.parse.construct_node",
"sklearn.model_selection.KFold",
"ConfigSpace.ConfigurationSpace",
"time.time",
"warnings.filterwarnings"
] |
[((1220, 1266), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1244, 1266), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((1511, 1557), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1535, 1557), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((1872, 1918), 'solnml.components.utils.class_loader.get_combined_candidtates', 'get_combined_candidtates', (['_regressors', '_addons'], {}), '(_regressors, _addons)\n', (1896, 1918), False, 'from solnml.components.utils.class_loader import get_combined_candidtates\n'), ((2195, 2215), 'ConfigSpace.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (2213, 2215), False, 'from ConfigSpace import ConfigurationSpace, CategoricalHyperparameter\n'), ((2746, 2907), 'solnml.components.feature_engineering.task_space.get_task_hyperparameter_space', 'get_task_hyperparameter_space', ([], {'task_type': 'task_type', 'include_image': 'include_image', 'include_text': 'include_text', 'include_preprocessors': 'include_preprocessors'}), '(task_type=task_type, include_image=\n include_image, include_text=include_text, include_preprocessors=\n include_preprocessors)\n', (2775, 2907), False, 'from solnml.components.feature_engineering.task_space import get_task_hyperparameter_space\n'), ((4223, 4282), 'solnml.utils.logging_utils.get_logger', 'get_logger', (["(self.__module__ + '.' + self.__class__.__name__)"], {}), "(self.__module__ + '.' + self.__class__.__name__)\n", (4233, 4282), False, 'from solnml.utils.logging_utils import get_logger\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((6427, 6519), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_x_train', '_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,\n random_state=self.seed)\n', (6437, 6519), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((6563, 6581), 'numpy.isfinite', 'np.isfinite', (['score'], {}), '(score)\n', (6574, 6581), True, 'import numpy as np\n'), ((5060, 5085), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5083, 5085), False, 'import warnings\n'), ((5103, 5136), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5126, 5136), False, 'import warnings\n'), ((5448, 5517), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'test_size', 'random_state': 'self.seed'}), '(n_splits=1, test_size=test_size, random_state=self.seed)\n', (5460, 5517), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((5997, 6047), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (6009, 6047), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((6126, 6160), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (6140, 6160), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((6612, 6699), 'solnml.components.utils.topk_saver.CombinedTopKModelSaver.get_path_by_config', 'CombinedTopKModelSaver.get_path_by_config', (['self.output_dir', 'config', 'self.timestamp'], {}), '(self.output_dir, config, self.\n timestamp)\n', (6653, 6699), False, 'from solnml.components.utils.topk_saver import CombinedTopKModelSaver\n'), ((6719, 6745), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (6733, 6745), False, 'import os\n'), ((7276, 7301), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7299, 7301), False, 'import warnings\n'), ((7319, 7352), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (7342, 7352), False, 'import warnings\n'), ((7710, 7770), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds', 'random_state': 'self.seed', 'shuffle': '(False)'}), '(n_splits=folds, random_state=self.seed, shuffle=False)\n', (7715, 7770), False, 'from sklearn.model_selection import KFold\n'), ((8979, 8994), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (8986, 8994), True, 'import numpy as np\n'), ((10948, 11048), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_act_x_train', '_act_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _act_x_train, _act_y_train, _x_val, _y_val,\n random_state=self.seed)\n', (10958, 11048), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((6825, 6859), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (6833, 6859), True, 'import pickle as pkl\n'), ((6973, 6984), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (6981, 6984), True, 'import pickle as pkl\n'), ((8298, 8348), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (8310, 8348), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((8435, 8469), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (8449, 8469), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((8784, 8876), 'solnml.components.evaluators.evaluate_func.validation', 'validation', (['clf', 'self.scorer', '_x_train', '_y_train', '_x_val', '_y_val'], {'random_state': 'self.seed'}), '(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,\n random_state=self.seed)\n', (8794, 8876), False, 'from solnml.components.evaluators.evaluate_func import validation\n'), ((9098, 9123), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9121, 9123), False, 'import warnings\n'), ((9141, 9174), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9164, 9174), False, 'import warnings\n'), ((9486, 9555), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'test_size', 'random_state': 'self.seed'}), '(n_splits=1, test_size=test_size, random_state=self.seed)\n', (9498, 9555), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((10035, 10085), 'solnml.components.feature_engineering.parse.parse_config', 'parse_config', (['self.train_node', 'config'], {'record': '(True)'}), '(self.train_node, config, record=True)\n', (10047, 10085), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((10164, 10198), 'solnml.components.feature_engineering.parse.construct_node', 'construct_node', (['_val_node', 'op_list'], {}), '(_val_node, op_list)\n', (10178, 10198), False, 'from solnml.components.feature_engineering.parse import parse_config, construct_node\n'), ((10313, 10389), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': 'downsample_ratio', 'random_state': 'self.seed'}), '(n_splits=1, test_size=downsample_ratio, random_state=self.seed)\n', (10325, 10389), False, 'from sklearn.model_selection import ShuffleSplit\n'), ((11092, 11110), 'numpy.isfinite', 'np.isfinite', (['score'], {}), '(score)\n', (11103, 11110), True, 'import numpy as np\n'), ((11167, 11254), 'solnml.components.utils.topk_saver.CombinedTopKModelSaver.get_path_by_config', 'CombinedTopKModelSaver.get_path_by_config', (['self.output_dir', 'config', 'self.timestamp'], {}), '(self.output_dir, config, self.\n timestamp)\n', (11208, 11254), False, 'from solnml.components.utils.topk_saver import CombinedTopKModelSaver\n'), ((7108, 7142), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (7116, 7142), True, 'import pickle as pkl\n'), ((11274, 11300), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (11288, 11300), False, 'import os\n'), ((12118, 12129), 'time.time', 'time.time', ([], {}), '()\n', (12127, 12129), False, 'import time\n'), ((11380, 11414), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (11388, 11414), True, 'import pickle as pkl\n'), ((11528, 11539), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (11536, 11539), True, 'import pickle as pkl\n'), ((11663, 11697), 'pickle.dump', 'pkl.dump', (['[op_list, clf, score]', 'f'], {}), '([op_list, clf, score], f)\n', (11671, 11697), True, 'import pickle as pkl\n')]
|
#!/usr/bin/env python3
import argparse
import gc
import numpy as np
import os
import pandas as pd
import pysam
# Number of SVs to process before resetting pysam (close and re-open file). Avoids a memory leak in pysam.
PYSAM_RESET_INTERVAL = 1000
def get_read_depth(df_subset, bam_file_name, mapq, ref_filename=None):
"""
Get read depths over one or more breakpoints.
:param df_subset: Subset dataframe with a column for contigs (first column) and one or more columns for the
location of breakpoints to quantify.
:param bam_file_name: Name of alignment file to query.
:param mapq: Minimum mapping quality.
:return: A Series with with one element for each row of `df_subset` containing the average of read depths over
the breakpoints for each variant.
"""
# Init pysam query count (for memory leak prevention)
pysam_count = 0
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
# Init dataframe
df_subset = df_subset.copy()
n_loc_cols = df_subset.shape[1] - 1 # Number of location columns; depth is averaged for each
df_subset.columns = ['CONTIG'] + ['LOC_{}'.format(col) for col in range(n_loc_cols)]
# Init count
df_subset['N'] = np.zeros(df_subset.shape[0], np.float64)
n_index = df_subset.shape[1] - 1
# Count
for subset_index in range(n_loc_cols):
# Use numeric index, skip chromosome column
subset_index += 1
for row_index in range(df_subset.shape[0]):
n_reads = 0
# Get position
contig = df_subset.iloc[row_index, 0]
pos = df_subset.iloc[row_index, subset_index]
# Reset pysam periodically (avoids memory leak)
pysam_count += 1
if pysam_count >= PYSAM_RESET_INTERVAL:
if bam_file is not None:
bam_file.close()
gc.collect()
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
pysam_count = 0
# Count
for segment in bam_file.fetch(str(contig), pos, pos + 1):
if segment.mapping_quality >= mapq and segment.is_proper_pair:
n_reads += 1
df_subset.iloc[row_index, n_index] += n_reads
# Return mean of depths (divide by the number of locations)
return df_subset['N'] / n_loc_cols
def get_ref_contig_sizes(altref_file):
"""
Get a Series of contigs lengths. Includes primary and alt contigs.
:param altref_file: BED file of contig information where each record spans the whole contig. Must contain
columns "#CHROM" and "END".
:return: Series of contig lengths indexed by the contig name.
"""
# Get reference chromosome sizes
ref_len_series = pd.read_table(altref_file, header=0)
ref_len_series.index = ref_len_series['#CHROM']
ref_len_series = ref_len_series['END']
return ref_len_series
def annotate_variant_info(variant_table, ref_len_series, flank):
"""
Annotate variant info with locations reads will be extracted from.
:param variant_table: Variant info table.
:param ref_len_series: Series of contig sizes.
:param flank: Number of bases from variant breakpoints.
:return: `variant_table` with additional fields.
"""
# Annotate variant info with flank locations
variant_table['FLANK_L_REF'] = variant_table['POS'] - flank
variant_table['FLANK_L_REF'] = variant_table['FLANK_L_REF'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_REF'] = variant_table['END'] + flank
variant_table['FLANK_R_REF'] = variant_table.apply(lambda row: min(row['FLANK_R_REF'], ref_len_series[row['#CHROM']]), axis=1)
variant_table['FLANK_L_CTG'] = variant_table['CONTIG_START'] - flank
variant_table['FLANK_L_CTG'] = variant_table['FLANK_L_CTG'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_CTG'] = variant_table['CONTIG_END'] + flank
variant_table['FLANK_R_CTG'] = variant_table.apply(lambda row: min(row['FLANK_R_CTG'], ref_len_series[row['CONTIG']]), axis=1)
# Annotate with the midpoint of the variant sequence
variant_table['VAR_CONTIG'] = variant_table.apply(lambda row: row['#CHROM'] if row['SVTYPE'] == 'DEL' else row['CONTIG'], axis=1)
variant_table['VAR_MIDPOINT'] = variant_table.apply(
lambda row:
(row['POS'] + row['END']) / 2 if row['SVTYPE'] == 'DEL' else (row['CONTIG_START'] + row['CONTIG_END']) / 2,
axis=1)
variant_table['VAR_MIDPOINT'] = variant_table['VAR_MIDPOINT'].astype(np.int64)
return variant_table
# Main
if __name__ == '__main__':
# Get arguments
arg_parser = argparse.ArgumentParser(description='Get insert size deltas on the reference over the SV breakpoints.')
arg_parser.add_argument('bam', help='BAM file of short read alignments.')
arg_parser.add_argument('bed', help='SV info BED file with columns "#CHROM", "POS", "END", "SVTYPE", "CONTIG", '
'"CONTIG_START", and "CONTIG_END", including a header line.')
arg_parser.add_argument('alt_info', help='BED file of contigs in the reference.')
arg_parser.add_argument('out', help='Output file.')
arg_parser.add_argument('--out_stats',
help='Output depth distribution statistics.')
arg_parser.add_argument('--mapq', type=int, default=20,
help='Minimum mapping quality of aligned reads.')
arg_parser.add_argument('--flank', type=int, default=100,
help='Number of reference bases on each side of the SV for flanking regions.')
arg_parser.add_argument('--ref', nargs='?',
default=None, help='Reference for records are aligned against.')
args = arg_parser.parse_args()
# Check arguments
if not os.path.isfile(args.bam):
raise RuntimeError('Input BAM file does not exist or is not a regular file: {}'.format(args.bam))
if args.mapq < 0:
raise RuntimeError('Mapping quality is negative: {}'.format(args.mapq))
if args.flank < 0:
raise RuntimeError('Flank is negative: {}'.format(args.flank))
args.out = args.out.strip()
if not args.out:
raise RuntimeError('Output file name is empty.')
# Get variant info
df_bed = pd.read_table(args.bed, header=0)
# Get reference chromosome sizes
ref_len = get_ref_contig_sizes(args.alt_info)
# Annotate variant info with locations reads are extracted from
df_bed = annotate_variant_info(df_bed, ref_len, args.flank)
# Count reads over variant midpoint
df_bed['DP_N_VAR'] =\
get_read_depth(df_bed.loc[:, ['VAR_CONTIG', 'VAR_MIDPOINT']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over reference flank
df_bed['DP_N_PROX_REF'] =\
get_read_depth(df_bed.loc[:, ['#CHROM', 'FLANK_L_REF', 'FLANK_R_REF']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over contig flank
df_bed['DP_N_PROX_CTG'] =\
get_read_depth(df_bed.loc[:, ['CONTIG', 'FLANK_L_CTG', 'FLANK_R_CTG']], args.bam, args.mapq, ref_filename=args.ref)
# Get global stats
ref_mean = np.mean(df_bed['DP_N_PROX_REF'])
ref_sd = np.std(df_bed['DP_N_PROX_REF'])
if ref_mean == 0:
raise RuntimeError('Cannot compute global depth stats: Global mean of proximal reference breakpoint depths is 0')
# Combine total depths
df_bed['DP_N_VAR_PROX_REF'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_REF']
df_bed['DP_N_VAR_PROX_CTG'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_CTG']
# Set relative ratios
df_bed['DP_VAR_REF'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_REF'] if row['DP_N_VAR_PROX_REF'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_CTG'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_CTG'] if row['DP_N_VAR_PROX_CTG'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_GLOBAL'] = df_bed['DP_N_VAR'] / ref_mean
# Write
df_features = df_bed.loc[
:, ('INDEX', 'DP_VAR_REF', 'DP_VAR_CTG', 'DP_VAR_GLOBAL', 'DP_N_VAR', 'DP_N_PROX_REF', 'DP_N_PROX_CTG')
]
df_features.to_csv(
args.out, sep='\t', index=False
)
# Write stats
if args.out_stats:
with open(args.out_stats, 'w') as stats_out:
stats_out.write('ref_mean\t{:.6f}\n'.format(ref_mean))
stats_out.write('ref_sd\t{:.6f}\n'.format(ref_sd))
|
[
"numpy.mean",
"argparse.ArgumentParser",
"pysam.AlignmentFile",
"os.path.isfile",
"numpy.zeros",
"pandas.read_table",
"numpy.std",
"gc.collect"
] |
[((899, 971), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_name', '"""r"""'], {'reference_filename': 'ref_filename'}), "(bam_file_name, 'r', reference_filename=ref_filename)\n", (918, 971), False, 'import pysam\n'), ((1255, 1295), 'numpy.zeros', 'np.zeros', (['df_subset.shape[0]', 'np.float64'], {}), '(df_subset.shape[0], np.float64)\n', (1263, 1295), True, 'import numpy as np\n'), ((2834, 2870), 'pandas.read_table', 'pd.read_table', (['altref_file'], {'header': '(0)'}), '(altref_file, header=0)\n', (2847, 2870), True, 'import pandas as pd\n'), ((4749, 4857), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get insert size deltas on the reference over the SV breakpoints."""'}), "(description=\n 'Get insert size deltas on the reference over the SV breakpoints.')\n", (4772, 4857), False, 'import argparse\n'), ((6414, 6447), 'pandas.read_table', 'pd.read_table', (['args.bed'], {'header': '(0)'}), '(args.bed, header=0)\n', (6427, 6447), True, 'import pandas as pd\n'), ((7276, 7308), 'numpy.mean', 'np.mean', (["df_bed['DP_N_PROX_REF']"], {}), "(df_bed['DP_N_PROX_REF'])\n", (7283, 7308), True, 'import numpy as np\n'), ((7322, 7353), 'numpy.std', 'np.std', (["df_bed['DP_N_PROX_REF']"], {}), "(df_bed['DP_N_PROX_REF'])\n", (7328, 7353), True, 'import numpy as np\n'), ((5935, 5959), 'os.path.isfile', 'os.path.isfile', (['args.bam'], {}), '(args.bam)\n', (5949, 5959), False, 'import os\n'), ((1920, 1932), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1930, 1932), False, 'import gc\n'), ((1961, 2033), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bam_file_name', '"""r"""'], {'reference_filename': 'ref_filename'}), "(bam_file_name, 'r', reference_filename=ref_filename)\n", (1980, 2033), False, 'import pysam\n')]
|
import pandas as pd
import numpy as np
function2idx = {"negative": 0, "ferritin": 1, "gpcr": 2, "p450": 3, "protease": 4}
input_dir = '../data/raw/'
data_dir = '../data/processed/'
max_seq_len = 800
def read_and_concat_data():
df_cysteine = pd.read_csv(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_cysteine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_cysteine.columns = ['id', 'sequence']
df_cysteine['function'] = function2idx['protease']
df_serine = pd.read_csv(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_serine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_serine.columns = ['id', 'sequence']
df_serine['function'] = function2idx['protease']
df_gpcr = pd.read_csv(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_gpcr.drop(['Entry name', "Status"], axis=1, inplace=True)
df_gpcr.columns = ['id', 'sequence']
df_gpcr['function'] = function2idx['gpcr']
df_p450 = pd.read_csv(input_dir + 'uniprot-p450+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_p450.drop(['Entry name', "Status"], axis=1, inplace=True)
df_p450.columns = ['id', 'sequence']
df_p450['function'] = function2idx['p450']
df_f = pd.read_csv(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_f.drop(['Entry name', "Status"], axis=1, inplace=True)
df_f.columns = ['id', 'sequence']
df_f['function'] = function2idx['ferritin']
df_positive = pd.concat([df_cysteine, df_serine, df_f, df_gpcr, df_p450], ignore_index=True)
duplicates = list(df_positive[df_positive.duplicated('id')].id)
df_uniprot = pd.read_csv(input_dir + 'uniprot-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_uniprot = df_uniprot.drop(["Entry name", "Status", "Gene names", "Gene ontology (molecular function)",
"Gene ontology IDs", "Gene ontology (cellular component)",
"Gene ontology (biological process)", "Gene ontology (GO)"], axis=1)
df_uniprot['function'] = function2idx['negative']
df_uniprot.columns = ['id', 'sequence', 'function']
df_uniprot[~df_uniprot.id.isin(duplicates)]
df_all = pd.concat([df_uniprot, df_positive], ignore_index=True)
df_all.sort_values(by='function', inplace=True, ascending=False)
df_all = df_all.drop_duplicates(subset='id').reset_index(drop=True)
print("Finished reading raw data and concating")
return df_all
def clean_sequence_length(dataframe):
# Add 800 amino acids from C-terminus for the longest proteins
reverse_rows = []
for index, row in dataframe[dataframe.sequence.apply(len) > max_seq_len].iterrows():
reverse_rows.append([row.id + '_r', row.sequence[::-1], row.function])
reverse_rows = pd.DataFrame(reverse_rows, columns=['id', 'sequence', 'function'])
dataframe = pd.concat([dataframe, reverse_rows], ignore_index=True)
# Cut all sequences to 800 char
dataframe['sequence'] = dataframe.sequence.apply(lambda x: x[:max_seq_len])
dataframe['length'] = dataframe.sequence.apply(len)
dataframe = dataframe.sort_values(by='length').reset_index(drop=True)
print("Finished cleaning sequences by length")
return dataframe
df = read_and_concat_data()
df = clean_sequence_length(df)
np.savetxt(data_dir + 'sequence.txt', df.sequence.values, fmt='%s')
np.savetxt(data_dir + 'function.txt', df.function.values, fmt='%s')
print("Saved sequence and function to txt")
|
[
"pandas.DataFrame",
"pandas.concat",
"numpy.savetxt",
"pandas.read_csv"
] |
[((3531, 3598), 'numpy.savetxt', 'np.savetxt', (["(data_dir + 'sequence.txt')", 'df.sequence.values'], {'fmt': '"""%s"""'}), "(data_dir + 'sequence.txt', df.sequence.values, fmt='%s')\n", (3541, 3598), True, 'import numpy as np\n'), ((3599, 3666), 'numpy.savetxt', 'np.savetxt', (["(data_dir + 'function.txt')", 'df.function.values'], {'fmt': '"""%s"""'}), "(data_dir + 'function.txt', df.function.values, fmt='%s')\n", (3609, 3666), True, 'import numpy as np\n'), ((249, 358), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab',\n sep='\\t', skiprows=0, header=0)\n", (260, 358), True, 'import pandas as pd\n'), ((575, 683), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab', sep\n ='\\t', skiprows=0, header=0)\n", (586, 683), True, 'import pandas as pd\n'), ((889, 985), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab', sep='\\t',\n skiprows=0, header=0)\n", (900, 985), True, 'import pandas as pd\n'), ((1154, 1250), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-p450+AND+reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-p450+AND+reviewed_yes.tab', sep='\\t',\n skiprows=0, header=0)\n", (1165, 1250), True, 'import pandas as pd\n'), ((1416, 1522), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab', sep=\n '\\t', skiprows=0, header=0)\n", (1427, 1522), True, 'import pandas as pd\n'), ((1689, 1767), 'pandas.concat', 'pd.concat', (['[df_cysteine, df_serine, df_f, df_gpcr, df_p450]'], {'ignore_index': '(True)'}), '([df_cysteine, df_serine, df_f, df_gpcr, df_p450], ignore_index=True)\n', (1698, 1767), True, 'import pandas as pd\n'), ((1854, 1941), 'pandas.read_csv', 'pd.read_csv', (["(input_dir + 'uniprot-reviewed_yes.tab')"], {'sep': '"""\t"""', 'skiprows': '(0)', 'header': '(0)'}), "(input_dir + 'uniprot-reviewed_yes.tab', sep='\\t', skiprows=0,\n header=0)\n", (1865, 1941), True, 'import pandas as pd\n'), ((2420, 2475), 'pandas.concat', 'pd.concat', (['[df_uniprot, df_positive]'], {'ignore_index': '(True)'}), '([df_uniprot, df_positive], ignore_index=True)\n', (2429, 2475), True, 'import pandas as pd\n'), ((3007, 3073), 'pandas.DataFrame', 'pd.DataFrame', (['reverse_rows'], {'columns': "['id', 'sequence', 'function']"}), "(reverse_rows, columns=['id', 'sequence', 'function'])\n", (3019, 3073), True, 'import pandas as pd\n'), ((3090, 3145), 'pandas.concat', 'pd.concat', (['[dataframe, reverse_rows]'], {'ignore_index': '(True)'}), '([dataframe, reverse_rows], ignore_index=True)\n', (3099, 3145), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 11:03:51 2019
@author: ivanpauno
"""
import matplotlib.pyplot as plt
import numpy as np
def main():
# A = sqrt(10^(.1*alpha_min-1)/10^(.1*alpha_max-1))
A = np.logspace(np.log10(2), np.log10(100), num=200)
ws_array = [1.1, 1.5, 2, 3]
n_butter = [np.log(A)/np.log(ws) for ws in ws_array]
n_cheby = [np.arccosh(A)/np.arccosh(ws) for ws in ws_array]
# Para verlo redondeado, descomentar las dos lineas siguientes.
n_butter = np.ceil(n_butter)
n_cheby = np.ceil(n_cheby)
for i in range(len(n_butter)):
fig, ax = plt.subplots()
ax.ticklabel_format(useOffset=False)
ax.set_xlabel('A')
ax.set_ylabel('n')
ax.grid(True)
ax.plot(A, n_butter[i], 'k')
ax.plot(A, n_cheby[i], 'r')
title = 'Order comparison ws={}'.format(ws_array[i])
fig.suptitle(title)
fig.canvas.set_window_title(title)
plt.show()
if __name__ == '__main__':
main()
|
[
"numpy.ceil",
"numpy.log10",
"numpy.log",
"numpy.arccosh",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((526, 543), 'numpy.ceil', 'np.ceil', (['n_butter'], {}), '(n_butter)\n', (533, 543), True, 'import numpy as np\n'), ((558, 574), 'numpy.ceil', 'np.ceil', (['n_cheby'], {}), '(n_cheby)\n', (565, 574), True, 'import numpy as np\n'), ((973, 983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (981, 983), True, 'import matplotlib.pyplot as plt\n'), ((253, 264), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (261, 264), True, 'import numpy as np\n'), ((266, 279), 'numpy.log10', 'np.log10', (['(100)'], {}), '(100)\n', (274, 279), True, 'import numpy as np\n'), ((628, 642), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (640, 642), True, 'import matplotlib.pyplot as plt\n'), ((338, 347), 'numpy.log', 'np.log', (['A'], {}), '(A)\n', (344, 347), True, 'import numpy as np\n'), ((348, 358), 'numpy.log', 'np.log', (['ws'], {}), '(ws)\n', (354, 358), True, 'import numpy as np\n'), ((394, 407), 'numpy.arccosh', 'np.arccosh', (['A'], {}), '(A)\n', (404, 407), True, 'import numpy as np\n'), ((408, 422), 'numpy.arccosh', 'np.arccosh', (['ws'], {}), '(ws)\n', (418, 422), True, 'import numpy as np\n')]
|
import numpy as np
def eval_rerr(X, X_hat, X0=None):
"""
:param X: tensor, X0 or X0+noise
:param X_hat: output for apporoximation
:param X0: true signal, tensor
:return: the relative error = ||X- X_hat||_F/ ||X_0||_F
"""
if X0 is not None:
error = X0 - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X0), 1), 'fro')
error = X - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X), 1), 'fro')
|
[
"numpy.size"
] |
[((493, 507), 'numpy.size', 'np.size', (['error'], {}), '(error)\n', (500, 507), True, 'import numpy as np\n'), ((561, 571), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (568, 571), True, 'import numpy as np\n'), ((339, 353), 'numpy.size', 'np.size', (['error'], {}), '(error)\n', (346, 353), True, 'import numpy as np\n'), ((407, 418), 'numpy.size', 'np.size', (['X0'], {}), '(X0)\n', (414, 418), True, 'import numpy as np\n')]
|
# Author: <NAME>, <NAME>, <NAME>
# Date: 2020/11/27
"""Compare the performance of different classifier and train the best model given cross_validate results .
Usage: src/clf_comparison.py <input_file> <input_file1> <output_file> <output_file1>
Options:
<input_file> Path (including filename and file extension) to transformed train file
<input_file1> Path (including filename and file extension) to transformed test file
<output_file> Path (including filename and file extension) to cross validate result file
<output_file1> Path (including filename and file extension) to store untuned model predictions
"""
#import packages
from docopt import docopt
import pandas as pd
import sys
import os
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (
cross_validate,
GridSearchCV,
RandomizedSearchCV
)
from joblib import dump, load
from sklearn.metrics import f1_score, make_scorer
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_file, input_file1, output_file, output_file1):
# read train_df.csv
train = pd.read_csv(input_file)
test = pd.read_csv(input_file1)
# create split the train_df
X_train, y_train = train.drop(columns=["quality_level"]), train["quality_level"]
X_test, y_test = test.drop(columns=["quality_level"]), test["quality_level"]
# check if target folder exists
try:
os.makedirs(os.path.dirname(output_file))
except FileExistsError:
pass
# define classifiers
classifiers = {
"Logistic_Regression": LogisticRegression(random_state = 123, class_weight = 'balanced'),
"Random_Forest": RandomForestClassifier(random_state = 123, class_weight = 'balanced'),
"DummyClassifier": DummyClassifier(random_state = 123),
"SVC" : SVC(random_state = 123, class_weight = 'balanced'),
"K_Nearest_Neighbors": KNeighborsClassifier()
}
f1 = make_scorer(f1_score, average = 'weighted', labels = ['Excellent'])
def score_with_metrics(models, scoring=f1):
"""
Return cross-validation scores for given models as a dataframe.
Parameters
----------
models : dict
a dictionary with names and scikit-learn models
scoring : list/dict/string
scoring parameter values for cross-validation
Returns
----------
None
"""
results_df = {}
for (name, model) in models.items():
clf = model
scores = cross_validate(
clf, X_train, y_train, return_train_score=True, scoring=scoring
)
df = pd.DataFrame(scores)
results_df[name] = df.mean()
clf.fit(X_train, y_train)
# save the model
dump(clf, 'results/'+name+'.joblib')
return pd.DataFrame(results_df)
res = score_with_metrics(classifiers)
res = res.transpose()
best_model = res.idxmax()['test_score']
best_clf = classifiers[best_model]
best_clf.fit(X_train, y_train)
pred = best_clf.predict(X_test)
test_scores = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_score = pd.DataFrame({'Model': [best_model], 'Test_Score':[test_scores]})
res.to_csv(output_file, index = True)
best_score.to_csv(output_file1, index = False)
# perform hyperparameter tuning on two of the best models
param_RF = {'n_estimators':[int(i) for i in np.linspace(start = 100, stop = 1000, num = 10).tolist()],
'max_depth':[int(i) for i in np.linspace(start = 10, stop = 1000, num = 100).tolist()]}
param_log = {
"C": [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000]}
rf_search = RandomizedSearchCV(classifiers['Random_Forest'],
param_RF, cv = 5,
n_jobs = -1,
scoring = f1,
n_iter = 20, random_state = 123)
log_search = GridSearchCV(classifiers['Logistic_Regression'],
param_log, cv = 5,
n_jobs = -1,
scoring = f1
)
rf_search.fit(X_train, y_train)
log_search.fit(X_train, y_train)
rf_best = rf_search.best_estimator_
log_best = log_search.best_estimator_
tuned_results = {}
rf_score = cross_validate(rf_best, X_train, y_train, return_train_score=True, scoring=f1)
log_score = cross_validate(log_best, X_train, y_train, return_train_score=True, scoring=f1)
tuned_results['Random Forest'] = pd.DataFrame(rf_score).mean()
tuned_results['Logistic Regression'] = pd.DataFrame(log_score).mean()
tuned_results = pd.DataFrame(tuned_results).transpose()
tuned_results.to_csv('results/tuned_cv_results.csv', index = True)
rf_best.fit(X_train, y_train)
dump(rf_best, 'results/Bestrfmodel.joblib')
pred = rf_best.predict(X_test)
best_f1 = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_tuned_model_test = pd.DataFrame({'Model': ['Random Forest'], 'Test_Score':[best_f1]})
best_tuned_model_test.to_csv('results/best_tuned_model.csv', index = False)
if __name__ == "__main__":
main(opt["<input_file>"], opt["<input_file1>"], opt["<output_file>"], opt["<output_file1>"])
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.svm.SVC",
"sklearn.metrics.f1_score",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.model_selection.cross_validate",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.dummy.DummyClassifier",
"os.path.dirname",
"numpy.linspace",
"warnings.simplefilter",
"joblib.dump",
"docopt.docopt",
"sklearn.model_selection.RandomizedSearchCV"
] |
[((1218, 1233), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1224, 1233), False, 'from docopt import docopt\n'), ((1179, 1210), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1200, 1210), False, 'import warnings\n'), ((1341, 1364), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (1352, 1364), True, 'import pandas as pd\n'), ((1376, 1400), 'pandas.read_csv', 'pd.read_csv', (['input_file1'], {}), '(input_file1)\n', (1387, 1400), True, 'import pandas as pd\n'), ((2169, 2232), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_score'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(f1_score, average='weighted', labels=['Excellent'])\n", (2180, 2232), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((3352, 3416), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'pred'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(y_test, pred, average='weighted', labels=['Excellent'])\n", (3360, 3416), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((3438, 3504), 'pandas.DataFrame', 'pd.DataFrame', (["{'Model': [best_model], 'Test_Score': [test_scores]}"], {}), "({'Model': [best_model], 'Test_Score': [test_scores]})\n", (3450, 3504), True, 'import pandas as pd\n'), ((3966, 4086), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (["classifiers['Random_Forest']", 'param_RF'], {'cv': '(5)', 'n_jobs': '(-1)', 'scoring': 'f1', 'n_iter': '(20)', 'random_state': '(123)'}), "(classifiers['Random_Forest'], param_RF, cv=5, n_jobs=-1,\n scoring=f1, n_iter=20, random_state=123)\n", (3984, 4086), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4271, 4363), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (["classifiers['Logistic_Regression']", 'param_log'], {'cv': '(5)', 'n_jobs': '(-1)', 'scoring': 'f1'}), "(classifiers['Logistic_Regression'], param_log, cv=5, n_jobs=-1,\n scoring=f1)\n", (4283, 4363), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4716, 4794), 'sklearn.model_selection.cross_validate', 'cross_validate', (['rf_best', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'f1'}), '(rf_best, X_train, y_train, return_train_score=True, scoring=f1)\n', (4730, 4794), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((4811, 4890), 'sklearn.model_selection.cross_validate', 'cross_validate', (['log_best', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'f1'}), '(log_best, X_train, y_train, return_train_score=True, scoring=f1)\n', (4825, 4890), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((5201, 5244), 'joblib.dump', 'dump', (['rf_best', '"""results/Bestrfmodel.joblib"""'], {}), "(rf_best, 'results/Bestrfmodel.joblib')\n", (5205, 5244), False, 'from joblib import dump, load\n'), ((5294, 5358), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'pred'], {'average': '"""weighted"""', 'labels': "['Excellent']"}), "(y_test, pred, average='weighted', labels=['Excellent'])\n", (5302, 5358), False, 'from sklearn.metrics import f1_score, make_scorer\n'), ((5391, 5458), 'pandas.DataFrame', 'pd.DataFrame', (["{'Model': ['Random Forest'], 'Test_Score': [best_f1]}"], {}), "({'Model': ['Random Forest'], 'Test_Score': [best_f1]})\n", (5403, 5458), True, 'import pandas as pd\n'), ((1817, 1878), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (1835, 1878), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1909, 1974), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (1931, 1974), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2003, 2036), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'random_state': '(123)'}), '(random_state=123)\n', (2018, 2036), False, 'from sklearn.dummy import DummyClassifier\n'), ((2052, 2098), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(123)', 'class_weight': '"""balanced"""'}), "(random_state=123, class_weight='balanced')\n", (2055, 2098), False, 'from sklearn.svm import SVC\n'), ((2131, 2153), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (2151, 2153), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3084, 3108), 'pandas.DataFrame', 'pd.DataFrame', (['results_df'], {}), '(results_df)\n', (3096, 3108), True, 'import pandas as pd\n'), ((1670, 1698), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (1685, 1698), False, 'import os\n'), ((2764, 2843), 'sklearn.model_selection.cross_validate', 'cross_validate', (['clf', 'X_train', 'y_train'], {'return_train_score': '(True)', 'scoring': 'scoring'}), '(clf, X_train, y_train, return_train_score=True, scoring=scoring)\n', (2778, 2843), False, 'from sklearn.model_selection import cross_validate, GridSearchCV, RandomizedSearchCV\n'), ((2891, 2911), 'pandas.DataFrame', 'pd.DataFrame', (['scores'], {}), '(scores)\n', (2903, 2911), True, 'import pandas as pd\n'), ((3032, 3072), 'joblib.dump', 'dump', (['clf', "('results/' + name + '.joblib')"], {}), "(clf, 'results/' + name + '.joblib')\n", (3036, 3072), False, 'from joblib import dump, load\n'), ((4928, 4950), 'pandas.DataFrame', 'pd.DataFrame', (['rf_score'], {}), '(rf_score)\n', (4940, 4950), True, 'import pandas as pd\n'), ((5001, 5024), 'pandas.DataFrame', 'pd.DataFrame', (['log_score'], {}), '(log_score)\n', (5013, 5024), True, 'import pandas as pd\n'), ((5052, 5079), 'pandas.DataFrame', 'pd.DataFrame', (['tuned_results'], {}), '(tuned_results)\n', (5064, 5079), True, 'import pandas as pd\n'), ((3712, 3753), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1000)', 'num': '(10)'}), '(start=100, stop=1000, num=10)\n', (3723, 3753), True, 'import numpy as np\n'), ((3811, 3852), 'numpy.linspace', 'np.linspace', ([], {'start': '(10)', 'stop': '(1000)', 'num': '(100)'}), '(start=10, stop=1000, num=100)\n', (3822, 3852), True, 'import numpy as np\n')]
|
import glob
import os
import numpy as np
import nibabel as nb
import argparse
def get_dir_list(train_path):
fnames = glob.glob(train_path)
list_train = []
for k, f in enumerate(fnames):
list_train.append(os.path.split(f)[0])
return list_train
def ParseData(list_data):
'''
Creates a list of all the slices
'''
data_instance = []
for dir_name in list_data:
fname = glob.glob(os.path.join(dir_name, '*seg.nii.gz'))
f = nb.load(fname[0])
img = f.get_fdata().astype('float32')
h, w, d = f.shape # sag, cor, ax
for slc in range(h):
if np.sum(img[slc, :, :]) != 0:
data_instance.append([dir_name, 'sag', slc])
for slc in range(w):
if np.sum(img[:, slc, :]) != 0:
data_instance.append([dir_name, 'cor', slc])
for slc in range(d):
if np.sum(img[:, :, slc]) != 0:
data_instance.append([dir_name, 'ax', slc])
print('Number of images: ', len(data_instance))
return data_instance
def get_slice(dir_name, orient, slc, cont, isNorm=True):
'''
takes the directory name, orientation, slice number and reads a slice, zero pad/crop and normalize
'''
# ---- get slice for given contrast image ---- #
fname = glob.glob(os.path.join(dir_name, cont))
f = nb.load(fname[0])
img = np.squeeze(f.get_fdata()).astype('float32')
if orient == 'sag':
x = img[slc, :, :]
elif orient == 'cor':
x = img[:, slc, :]
else:
x = img[:, :, slc]
return np.expand_dims(x, 0)
def get_batchsize_one(dir_name, orient, slc):
'''
takes index and generates one sample of input data
'''
# ---- get images ---- #
x_t1 = get_slice(dir_name, orient, slc, '*flair.nii.gz')
x_t2 = get_slice(dir_name, orient, slc, '*t1.nii.gz')
x_t1ce = get_slice(dir_name, orient, slc, '*t2.nii.gz')
x_flair = get_slice(dir_name, orient, slc, '*t1ce.nii.gz')
x_seg = get_slice(dir_name, orient, slc, '*seg.nii.gz', isNorm=False).astype('int')
x_seg[x_seg==4] = 3
x_inp = np.concatenate((x_t1, x_t2, x_t1ce, x_flair, x_seg), 0)
# (flair, t1, t2, t1ce)
return x_inp
def generate_data(src_path, dst_path):
data_instance = ParseData(get_dir_list(src_path))
for k, data in enumerate(data_instance):
print(k, ' of ', len(data_instance))
dir_name, orient, slc = data[0], data[1], data[2]
x_inp = get_batchsize_one(dir_name, orient, slc)
fname = os.path.join(dst_path, str(k)+'.npy')
np.save(fname, x_inp)
# ---- Arguments ---- #
ap = argparse.ArgumentParser()
ap.add_argument("-sp", "--src_path", type=str, default='./data/nifti/train/*/*seg.nii.gz')
ap.add_argument("-dp", "--dst_path", type=str, default='./data/np/train/')
args = vars(ap.parse_args())
if __name__ == '__main__':
'''
Script to convert nifti images to numpy array for faster loading
'''
src_path = args['src_path']
dst_path = args['dst_path']
generate_data(src_path, dst_path)
|
[
"argparse.ArgumentParser",
"nibabel.load",
"os.path.join",
"os.path.split",
"numpy.sum",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.save",
"glob.glob"
] |
[((2640, 2665), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2663, 2665), False, 'import argparse\n'), ((122, 143), 'glob.glob', 'glob.glob', (['train_path'], {}), '(train_path)\n', (131, 143), False, 'import glob\n'), ((1359, 1376), 'nibabel.load', 'nb.load', (['fname[0]'], {}), '(fname[0])\n', (1366, 1376), True, 'import nibabel as nb\n'), ((1583, 1603), 'numpy.expand_dims', 'np.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (1597, 1603), True, 'import numpy as np\n'), ((2118, 2173), 'numpy.concatenate', 'np.concatenate', (['(x_t1, x_t2, x_t1ce, x_flair, x_seg)', '(0)'], {}), '((x_t1, x_t2, x_t1ce, x_flair, x_seg), 0)\n', (2132, 2173), True, 'import numpy as np\n'), ((484, 501), 'nibabel.load', 'nb.load', (['fname[0]'], {}), '(fname[0])\n', (491, 501), True, 'import nibabel as nb\n'), ((1321, 1349), 'os.path.join', 'os.path.join', (['dir_name', 'cont'], {}), '(dir_name, cont)\n', (1333, 1349), False, 'import os\n'), ((2588, 2609), 'numpy.save', 'np.save', (['fname', 'x_inp'], {}), '(fname, x_inp)\n', (2595, 2609), True, 'import numpy as np\n'), ((433, 470), 'os.path.join', 'os.path.join', (['dir_name', '"""*seg.nii.gz"""'], {}), "(dir_name, '*seg.nii.gz')\n", (445, 470), False, 'import os\n'), ((229, 245), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (242, 245), False, 'import os\n'), ((633, 655), 'numpy.sum', 'np.sum', (['img[slc, :, :]'], {}), '(img[slc, :, :])\n', (639, 655), True, 'import numpy as np\n'), ((767, 789), 'numpy.sum', 'np.sum', (['img[:, slc, :]'], {}), '(img[:, slc, :])\n', (773, 789), True, 'import numpy as np\n'), ((901, 923), 'numpy.sum', 'np.sum', (['img[:, :, slc]'], {}), '(img[:, :, slc])\n', (907, 923), True, 'import numpy as np\n')]
|
from anndata import AnnData
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from joblib import delayed
from tqdm import tqdm
import sys
import igraph
from .utils import ProgressParallel
from .. import logging as logg
from .. import settings
def pseudotime(adata: AnnData, n_jobs: int = 1, n_map: int = 1, copy: bool = False):
"""\
Compute pseudotime.
Projects cells onto the tree, and uses distance from the root as a pseudotime value.
Parameters
----------
adata
Annotated data matrix.
n_jobs
Number of cpu processes to use in case of performing multiple mapping.
n_map
number of probabilistic mapping of cells onto the tree to use. If n_map=1 then likelihood cell mapping is used.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.obs['edge']`
assigned edge.
`.obs['t']`
assigned pseudotime value.
`.obs['seg']`
assigned segment of the tree.
`.obs['milestone']`
assigned region surrounding forks and tips.
`.uns['pseudotime_list']`
list of cell projection from all mappings.
"""
if "root" not in adata.uns["graph"]:
raise ValueError(
"You need to run `tl.root` or `tl.roots` before projecting cells."
)
adata = adata.copy() if copy else adata
graph = adata.uns["graph"]
reassign, recolor = False, False
if "milestones" in adata.obs:
if adata.obs.milestones.dtype.name == "category":
tmp_mil = adata.obs.milestones.cat.categories.copy()
reassign = True
if "milestones_colors" in adata.uns:
tmp_mil_col = adata.uns["milestones_colors"].copy()
recolor = True
logg.info("projecting cells onto the principal graph", reset=True)
if n_map == 1:
df_l = [map_cells(graph, multi=False)]
else:
df_l = ProgressParallel(
n_jobs=n_jobs, total=n_map, file=sys.stdout, desc=" mappings"
)(delayed(map_cells)(graph=graph, multi=True) for m in range(n_map))
# formatting cell projection data
df_summary = df_l[0]
df_summary["seg"] = df_summary["seg"].astype("category")
df_summary["edge"] = df_summary["edge"].astype("category")
# remove pre-existing palette to avoid errors with plotting
if "seg_colors" in adata.uns:
del adata.uns["seg_colors"]
if set(df_summary.columns.tolist()).issubset(adata.obs.columns):
adata.obs[df_summary.columns] = df_summary
else:
adata.obs = pd.concat([adata.obs, df_summary], axis=1)
# list(map(lambda x: x.column))
# todict=list(map(lambda x: dict(zip(["cells"]+["_"+s for s in x.columns.tolist()],
# [x.index.tolist()]+x.to_numpy().T.tolist())),df_l))
names = np.arange(len(df_l)).astype(str).tolist()
# vals = todict
dictionary = dict(zip(names, df_l))
adata.uns["pseudotime_list"] = dictionary
if n_map > 1:
adata.obs["t_sd"] = (
pd.concat(
list(
map(
lambda x: pd.Series(x["t"]),
list(adata.uns["pseudotime_list"].values()),
)
),
axis=1,
)
.apply(np.std, axis=1)
.values
)
milestones = pd.Series(index=adata.obs_names)
for seg in graph["pp_seg"].n:
cell_seg = adata.obs.loc[adata.obs["seg"] == seg, "t"]
if len(cell_seg) > 0:
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) < 0)
]
] = graph["pp_seg"].loc[int(seg), "from"]
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) > 0)
]
] = graph["pp_seg"].loc[int(seg), "to"]
adata.obs["milestones"] = milestones
adata.obs.milestones = (
adata.obs.milestones.astype(int).astype("str").astype("category")
)
adata.uns["graph"]["milestones"] = dict(
zip(
adata.obs.milestones.cat.categories,
adata.obs.milestones.cat.categories.astype(int),
)
)
while reassign:
if "tmp_mil_col" not in locals():
break
if len(tmp_mil_col) != len(adata.obs.milestones.cat.categories):
break
rename_milestones(adata, tmp_mil)
if recolor:
adata.uns["milestones_colors"] = tmp_mil_col
reassign = False
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added\n"
" .obs['edge'] assigned edge.\n"
" .obs['t'] pseudotime value.\n"
" .obs['seg'] segment of the tree assigned.\n"
" .obs['milestones'] milestone assigned.\n"
" .uns['pseudotime_list'] list of cell projection from all mappings."
)
return adata if copy else None
def map_cells(graph, multi=False):
import igraph
g = igraph.Graph.Adjacency((graph["B"] > 0).tolist(), mode="undirected")
# Add edge weights and node labels.
g.es["weight"] = graph["B"][graph["B"].nonzero()]
if multi:
rrm = (
np.apply_along_axis(
lambda x: np.random.choice(np.arange(len(x)), size=1, p=x),
axis=1,
arr=graph["R"],
)
).T.flatten()
else:
rrm = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"])
def map_on_edges(v):
vcells = np.argwhere(rrm == v)
if vcells.shape[0] > 0:
nv = np.array(g.neighborhood(v, order=1))
nvd = np.array(g.shortest_paths(v, nv)[0])
spi = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"][vcells, nv[1:]])
ndf = pd.DataFrame(
{
"cell": vcells.flatten(),
"v0": v,
"v1": nv[1:][spi],
"d": nvd[1:][spi],
}
)
p0 = graph["R"][vcells, v].flatten()
p1 = np.array(
list(
map(lambda x: graph["R"][vcells[x], ndf.v1[x]], range(len(vcells)))
)
).flatten()
alpha = np.random.uniform(size=len(vcells))
f = np.abs(
(np.sqrt(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2) - p0) / (p1 - p0)
)
ndf["t"] = (
graph["pp_info"].loc[ndf.v0, "time"].values
+ (
graph["pp_info"].loc[ndf.v1, "time"].values
- graph["pp_info"].loc[ndf.v0, "time"].values
)
* alpha
)
ndf["seg"] = 0
isinfork = (graph["pp_info"].loc[ndf.v0, "PP"].isin(graph["forks"])).values
ndf.loc[isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[isinfork, "v1"], "seg"].values
)
ndf.loc[~isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[~isinfork, "v0"], "seg"].values
)
return ndf
else:
return None
df = list(map(map_on_edges, range(graph["B"].shape[1])))
df = pd.concat(df)
df.sort_values("cell", inplace=True)
df.index = graph["cells_fitted"]
df["edge"] = df.apply(lambda x: str(int(x[1])) + "|" + str(int(x[2])), axis=1)
df.drop(["cell", "v0", "v1", "d"], axis=1, inplace=True)
return df
def rename_milestones(adata, new, copy: bool = False):
adata = adata.copy() if copy else adata
adata.uns["graph"]["milestones"] = dict(
zip(new, list(adata.uns["graph"]["milestones"].values()))
)
adata.obs.milestones = adata.obs.milestones.cat.rename_categories(new)
return adata if copy else None
|
[
"pandas.Series",
"numpy.sqrt",
"numpy.apply_along_axis",
"numpy.argwhere",
"joblib.delayed",
"pandas.concat"
] |
[((3533, 3565), 'pandas.Series', 'pd.Series', ([], {'index': 'adata.obs_names'}), '(index=adata.obs_names)\n', (3542, 3565), True, 'import pandas as pd\n'), ((7500, 7513), 'pandas.concat', 'pd.concat', (['df'], {}), '(df)\n', (7509, 7513), True, 'import pandas as pd\n'), ((2709, 2751), 'pandas.concat', 'pd.concat', (['[adata.obs, df_summary]'], {'axis': '(1)'}), '([adata.obs, df_summary], axis=1)\n', (2718, 2751), True, 'import pandas as pd\n'), ((5702, 5756), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argmax'], {'axis': '(1)', 'arr': "graph['R']"}), "(np.argmax, axis=1, arr=graph['R'])\n", (5721, 5756), True, 'import numpy as np\n'), ((5800, 5821), 'numpy.argwhere', 'np.argwhere', (['(rrm == v)'], {}), '(rrm == v)\n', (5811, 5821), True, 'import numpy as np\n'), ((5983, 6053), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argmax'], {'axis': '(1)', 'arr': "graph['R'][vcells, nv[1:]]"}), "(np.argmax, axis=1, arr=graph['R'][vcells, nv[1:]])\n", (6002, 6053), True, 'import numpy as np\n'), ((2166, 2184), 'joblib.delayed', 'delayed', (['map_cells'], {}), '(map_cells)\n', (2173, 2184), False, 'from joblib import delayed\n'), ((6616, 6664), 'numpy.sqrt', 'np.sqrt', (['(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2)'], {}), '(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2)\n', (6623, 6664), True, 'import numpy as np\n'), ((3283, 3300), 'pandas.Series', 'pd.Series', (["x['t']"], {}), "(x['t'])\n", (3292, 3300), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
Independent model based on Geodesic Regression model R_G
"""
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataGenerators import ImagesAll, TestImages, my_collate
from axisAngle import get_error2, geodesic_loss
from poseModels import model_3layer
from helperFunctions import classes
from featureModels import resnet_model
import numpy as np
import scipy.io as spio
import gc
import os
import time
import progressbar
import argparse
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='Pure Regression Models')
parser.add_argument('--gpu_id', type=str, default='0')
parser.add_argument('--render_path', type=str, default='data/renderforcnn/')
parser.add_argument('--augmented_path', type=str, default='data/augmented2/')
parser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test/')
parser.add_argument('--save_str', type=str)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--feature_network', type=str, default='resnet')
parser.add_argument('--N0', type=int, default=2048)
parser.add_argument('--N1', type=int, default=1000)
parser.add_argument('--N2', type=int, default=500)
parser.add_argument('--init_lr', type=float, default=1e-4)
parser.add_argument('--num_epochs', type=int, default=3)
args = parser.parse_args()
print(args)
# assign GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# save stuff here
results_file = os.path.join('results', args.save_str)
model_file = os.path.join('models', args.save_str + '.tar')
plots_file = os.path.join('plots', args.save_str)
log_dir = os.path.join('logs', args.save_str)
# relevant variables
ydata_type = 'axis_angle'
ndim = 3
num_classes = len(classes)
mse_loss = nn.MSELoss().cuda()
gve_loss = geodesic_loss().cuda()
ce_loss = nn.CrossEntropyLoss().cuda()
# DATA
# datasets
real_data = ImagesAll(args.augmented_path, 'real', ydata_type)
render_data = ImagesAll(args.render_path, 'render', ydata_type)
test_data = TestImages(args.pascal3d_path, ydata_type)
# setup data loaders
real_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
render_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
test_loader = DataLoader(test_data, batch_size=32)
print('Real: {0} \t Render: {1} \t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))
max_iterations = min(len(real_loader), len(render_loader))
# my_model
class IndependentModel(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = num_classes
self.feature_model = resnet_model('resnet50', 'layer4').cuda()
self.pose_model = model_3layer(args.N0, args.N1, args.N2, ndim).cuda()
def forward(self, x):
x = self.feature_model(x)
x = self.pose_model(x)
x = np.pi*F.tanh(x)
return x
model = IndependentModel()
# print(model)
# loss and optimizer
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
# store stuff
writer = SummaryWriter(log_dir)
count = 0
val_loss = []
# OPTIMIZATION functions
def training_init():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = mse_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def training():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = gve_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def testing():
model.eval()
ypred = []
ytrue = []
labels = []
for i, sample in enumerate(test_loader):
xdata = Variable(sample['xdata'].cuda())
label = Variable(sample['label'].cuda())
output = model(xdata)
ypred.append(output.data.cpu().numpy())
ytrue.append(sample['ydata'].numpy())
labels.append(sample['label'].numpy())
del xdata, label, output, sample
gc.collect()
ypred = np.concatenate(ypred)
ytrue = np.concatenate(ytrue)
labels = np.concatenate(labels)
model.train()
return ytrue, ypred, labels
def save_checkpoint(filename):
torch.save(model.state_dict(), filename)
# initialization
training_init()
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
for epoch in range(args.num_epochs):
tic = time.time()
scheduler.step()
# training step
training()
# save model at end of epoch
save_checkpoint(model_file)
# validation
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
# time and output
toc = time.time() - tic
print('Epoch: {0} done in time {1}s'.format(epoch, toc))
# cleanup
gc.collect()
writer.close()
val_loss = np.stack(val_loss)
spio.savemat(plots_file, {'val_loss': val_loss})
# evaluate the model
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
|
[
"poseModels.model_3layer",
"scipy.io.savemat",
"torch.nn.CrossEntropyLoss",
"torch.nn.MSELoss",
"progressbar.ProgressBar",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"axisAngle.geodesic_loss",
"numpy.stack",
"numpy.concatenate",
"featureModels.resnet_model",
"torch.nn.functional.tanh",
"gc.collect",
"time.time",
"torch.cat",
"dataGenerators.TestImages",
"axisAngle.get_error2",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"dataGenerators.ImagesAll",
"torch.utils.data.DataLoader"
] |
[((620, 681), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pure Regression Models"""'}), "(description='Pure Regression Models')\n", (643, 681), False, 'import argparse\n'), ((1552, 1590), 'os.path.join', 'os.path.join', (['"""results"""', 'args.save_str'], {}), "('results', args.save_str)\n", (1564, 1590), False, 'import os\n'), ((1604, 1650), 'os.path.join', 'os.path.join', (['"""models"""', "(args.save_str + '.tar')"], {}), "('models', args.save_str + '.tar')\n", (1616, 1650), False, 'import os\n'), ((1664, 1700), 'os.path.join', 'os.path.join', (['"""plots"""', 'args.save_str'], {}), "('plots', args.save_str)\n", (1676, 1700), False, 'import os\n'), ((1711, 1746), 'os.path.join', 'os.path.join', (['"""logs"""', 'args.save_str'], {}), "('logs', args.save_str)\n", (1723, 1746), False, 'import os\n'), ((1967, 2017), 'dataGenerators.ImagesAll', 'ImagesAll', (['args.augmented_path', '"""real"""', 'ydata_type'], {}), "(args.augmented_path, 'real', ydata_type)\n", (1976, 2017), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2032, 2081), 'dataGenerators.ImagesAll', 'ImagesAll', (['args.render_path', '"""render"""', 'ydata_type'], {}), "(args.render_path, 'render', ydata_type)\n", (2041, 2081), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2094, 2136), 'dataGenerators.TestImages', 'TestImages', (['args.pascal3d_path', 'ydata_type'], {}), '(args.pascal3d_path, ydata_type)\n', (2104, 2136), False, 'from dataGenerators import ImagesAll, TestImages, my_collate\n'), ((2172, 2310), 'torch.utils.data.DataLoader', 'DataLoader', (['real_data'], {'batch_size': 'args.num_workers', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'collate_fn': 'my_collate'}), '(real_data, batch_size=args.num_workers, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\n', (2182, 2310), False, 'from torch.utils.data import DataLoader\n'), ((2323, 2463), 'torch.utils.data.DataLoader', 'DataLoader', (['render_data'], {'batch_size': 'args.num_workers', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'collate_fn': 'my_collate'}), '(render_data, batch_size=args.num_workers, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\n', (2333, 2463), False, 'from torch.utils.data import DataLoader\n'), ((2474, 2510), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': '(32)'}), '(test_data, batch_size=32)\n', (2484, 2510), False, 'from torch.utils.data import DataLoader\n'), ((3188, 3248), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': '(0.1)'}), '(optimizer, step_size=1, gamma=0.1)\n', (3213, 3248), False, 'from torch import nn, optim\n'), ((3272, 3294), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (3285, 3294), False, 'from tensorboardX import SummaryWriter\n'), ((7405, 7423), 'numpy.stack', 'np.stack', (['val_loss'], {}), '(val_loss)\n', (7413, 7423), True, 'import numpy as np\n'), ((7424, 7472), 'scipy.io.savemat', 'spio.savemat', (['plots_file', "{'val_loss': val_loss}"], {}), "(plots_file, {'val_loss': val_loss})\n", (7436, 7472), True, 'import scipy.io as spio\n'), ((7623, 7723), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (7635, 7723), True, 'import scipy.io as spio\n'), ((3413, 3462), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'max_iterations'}), '(max_value=max_iterations)\n', (3436, 3462), False, 'import progressbar\n'), ((4826, 4875), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'max_value': 'max_iterations'}), '(max_value=max_iterations)\n', (4849, 4875), False, 'import progressbar\n'), ((6577, 6598), 'numpy.concatenate', 'np.concatenate', (['ypred'], {}), '(ypred)\n', (6591, 6598), True, 'import numpy as np\n'), ((6608, 6629), 'numpy.concatenate', 'np.concatenate', (['ytrue'], {}), '(ytrue)\n', (6622, 6629), True, 'import numpy as np\n'), ((6640, 6662), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (6654, 6662), True, 'import numpy as np\n'), ((6990, 7001), 'time.time', 'time.time', ([], {}), '()\n', (6999, 7001), False, 'import time\n'), ((7366, 7378), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7376, 7378), False, 'import gc\n'), ((1843, 1855), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1853, 1855), False, 'from torch import nn, optim\n'), ((1874, 1889), 'axisAngle.geodesic_loss', 'geodesic_loss', ([], {}), '()\n', (1887, 1889), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((1907, 1928), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1926, 1928), False, 'from torch import nn, optim\n'), ((3873, 3912), 'torch.cat', 'torch.cat', (['(output_real, output_render)'], {}), '((output_real, output_render))\n', (3882, 3912), False, 'import torch\n'), ((3925, 3962), 'torch.cat', 'torch.cat', (['(ydata_real, ydata_render)'], {}), '((ydata_real, ydata_render))\n', (3934, 3962), False, 'import torch\n'), ((5286, 5325), 'torch.cat', 'torch.cat', (['(output_real, output_render)'], {}), '((output_real, output_render))\n', (5295, 5325), False, 'import torch\n'), ((5338, 5375), 'torch.cat', 'torch.cat', (['(ydata_real, ydata_render)'], {}), '((ydata_real, ydata_render))\n', (5347, 5375), False, 'import torch\n'), ((6555, 6567), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6565, 6567), False, 'import gc\n'), ((6888, 6942), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (6898, 6942), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((7278, 7289), 'time.time', 'time.time', ([], {}), '()\n', (7287, 7289), False, 'import time\n'), ((7566, 7620), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (7576, 7620), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((3030, 3039), 'torch.nn.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (3036, 3039), True, 'import torch.nn.functional as F\n'), ((4209, 4309), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (4221, 4309), True, 'import scipy.io as spio\n'), ((4324, 4378), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (4334, 4378), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((5622, 5722), 'scipy.io.savemat', 'spio.savemat', (['results_file', "{'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels}"], {}), "(results_file, {'ytest': ytest, 'yhat_test': yhat_test,\n 'test_labels': test_labels})\n", (5634, 5722), True, 'import scipy.io as spio\n'), ((5737, 5791), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (5747, 5791), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((7195, 7249), 'axisAngle.get_error2', 'get_error2', (['ytest', 'yhat_test', 'test_labels', 'num_classes'], {}), '(ytest, yhat_test, test_labels, num_classes)\n', (7205, 7249), False, 'from axisAngle import get_error2, geodesic_loss\n'), ((2826, 2860), 'featureModels.resnet_model', 'resnet_model', (['"""resnet50"""', '"""layer4"""'], {}), "('resnet50', 'layer4')\n", (2838, 2860), False, 'from featureModels import resnet_model\n'), ((2888, 2933), 'poseModels.model_3layer', 'model_3layer', (['args.N0', 'args.N1', 'args.N2', 'ndim'], {}), '(args.N0, args.N1, args.N2, ndim)\n', (2900, 2933), False, 'from poseModels import model_3layer\n')]
|
# from sklearn.cluster._kmeans import *
import copy
from typing import Union
import torch
import torch.nn as nn
from sklearn.cluster._robustq import *
from .quantizer import Quantizer
__all__ = ['MiniBatchRobustqTorch', 'RobustqTorch']
class ClusterQuantizerBase(Quantizer):
def __init__(self, n_feature=1, n_clusters=8, name='',
quant_fun=lambda x: x):
super(ClusterQuantizerBase, self).__init__()
self.n_clusters = n_clusters
self.name = name
# specify the initial values for loading judgment
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
# specify the initial values for initial judgment
self.register_buffer("cluster_centers_", torch.zeros(n_clusters, n_feature))
self.quant_fun = quant_fun
def reset(self):
super().reset()
# self.labels_.zero_()
self.register_buffer("labels_", torch.zeros((0, ),dtype=torch.long))
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=self.n_clusters).view(-1, 1))
def forward(self, inputs):
output = self.quant_func(inputs)
return output
def extra_repr(self) -> str:
return 'name={},cluster={}'.format(self.name, self.n_clusters)
@staticmethod
def quant_calib(net,wrapped_modules,calib_loader):
calib_layers=[]
n_calibration_steps=1
for name,module in wrapped_modules.items():
module.mode='calibration_forward'
calib_layers.append(name)
n_calibration_steps=max(n_calibration_steps,module.quantizer.n_calibration_steps)
print(f"prepare calibration for {calib_layers}\n n_calibration_steps={n_calibration_steps}")
for step in range(n_calibration_steps):
print(f"Start calibration step={step+1}")
for name,module in wrapped_modules.items():
module.quantizer.calibration_step=step+1
with torch.no_grad():
for inp,target in calib_loader:
inp=inp.cuda()
net(inp)
for name,module in wrapped_modules.items():
print(f"{name}: {module.quantizer}")
module.mode='qat_forward'
print("calibration finished")
class RobustqTorch(ClusterQuantizerBase):
def __init__(self, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
super(RobustqTorch, self).__init__(n_feature, n_clusters=n_clusters, name=name)
self.alpha = alpha
self.gamma = gamma
self.kmeans = RobustQ(n_clusters=n_clusters, **kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def init_layer_cluster_center(self, data, n_clusters, method="uniform"):
if method == "uniform" or data is None:
self.cluster_centers_.data.copy_(torch.linspace(-1, 1, steps=n_clusters).view(-1, 1))
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
else:
self.fit(data, tol=1e-2)
def reset(self):
super().reset()
self.kmeans.cluster_centers_ = self.cluster_centers_.data.cpu().numpy()
def fit(self, X: torch.Tensor, y=None, sample_weight=None, n_init=None, init=None, tol=None):
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
bak = copy.deepcopy([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = [new if new is not None else old
for new, old in zip((n_init, init, tol), bak)]
self.kmeans.fit(data, y=y, sample_weight=sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
self.kmeans.n_init, self.kmeans.init, self.kmeans.tol = bak
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
return self.kmeans.predict(data, sample_weight, var_std=self.alpha, var_weight=self.gamma)
def forward(self, inputs):
# To avoid fault fitness in initial iterations
# if (self.cluster_centers_.data == 0).all():
# # use uniform quantization to avoid further fitness with bad data
# self.init_layer_cluster_center(inputs, self.weight_qbit)
if self.calibration and not self.calibrated:
self.fit(inputs)
labels = self.labels_
weight_quan = self.cluster_centers_[:, 0][labels].view(inputs.shape)
elif self.training:
# label should change as weights are updated
labels = self.predict(inputs)
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = inputs - inputs.detach() + weight_quan_temp
else:
# to avoid load the model without pre-fitness
# if len(self.labels_.data) == 0:
# # self.labels_.data.copy_(torch.from_numpy(self.predict(inputs)).view(-1))
# self.register_buffer("labels_", torch.from_numpy(self.predict(inputs)).view(-1))
assert len(self.labels_.data)
labels = self.labels_
weight_quan_temp = self.cluster_centers_[:, 0][labels].view(inputs.shape)
weight_quan = weight_quan_temp
return weight_quan
def extra_repr(self) -> str:
return super(RobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
class MiniBatchRobustqTorch(RobustqTorch):
def __init__(self, # batch_size, # data_or_size,
n_feature=1, n_clusters=8, name='',
alpha=0.1, gamma=1.0, q_level_init='uniform', **kwargs):
if "batch_size" in kwargs:
kwargs.pop("batch_size")
super().__init__(n_feature=n_feature, n_clusters=n_clusters, name=name,
alpha=alpha, gamma=gamma, q_level_init=q_level_init, **kwargs)
self.kmeans = MiniBatchRobustQ(n_clusters=n_clusters,**kwargs)
# if hasattr(data_or_size, '__array__'):
# data = data_or_size
# else:
# data = None
# # if isinstance(data, torch.Tensor):
# # data = data.detach().clone().cpu().view(-1, 1).numpy()
# if isinstance(data, np.ndarray):
# data = self.label_.new_tensor(torch.from_numpy(data))
# self.init_layer_cluster_center(data, n_clusters, q_level_init)
self.init_layer_cluster_center(None, n_clusters, q_level_init)
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Coordinates of the data points to cluster. It must be noted that
X will be copied if it is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None).
Returns
-------
self
"""
# 210626 data copy optimization
# data = X.detach().clone().view(-1, 1)
data = X.view(-1, 1)
if X.requires_grad:
data = data.detach()
data = data.cpu().numpy()
self.kmeans.partial_fit(data, y, sample_weight, var_std=self.alpha, var_weight=self.gamma)
# self.labels_.data.copy_(torch.from_numpy(self.kmeans.labels_))
self.register_buffer("labels_", torch.as_tensor(self.kmeans.labels_,dtype=torch.long))
self.cluster_centers_.data.copy_(torch.from_numpy(self.kmeans.cluster_centers_))
def extra_repr(self) -> str:
return super(MiniBatchRobustqTorch, self).extra_repr() + " gamma:{}, alpha:{} )".format(self.gamma, self.alpha)
# TODO: Use close package
def insert_robust_quntizer(module:nn.Module, quantizer: Union[RobustqTorch, MiniBatchRobustqTorch], alpha, gamma):
for k, m in module.named_modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
n_samples = m.weight.numel()
n_clusters = 2 ** m.quanizer.w_bit - 1
batch_factor = 800
# if q_type == 'robust_batch':
if isinstance(quantizer, MiniBatchRobustqTorch):
m.quantizer.w_quantizer = MiniBatchRobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
batch_size=n_clusters * batch_factor
if n_clusters * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
# elif q_type == 'robust':
elif isinstance(quantizer, RobustqTorch):
m.quantizer.w_quantizer = RobustqTorch(n_feature=1,
n_clusters=n_clusters,
alpha=alpha, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if __name__ == '__main__':
import numpy as np
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
torch.set_printoptions(3)
import sklearn
sklearn.show_versions()
a = {}
# vgg = models.vgg11(pretrained=True)
# if torch.cuda.is_available():
# vgg.cuda()
# a['state_dict'] = vgg.state_dict()
a = torch.load("plot/checkpoints/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
map_location=torch.device('cpu') if not torch.cuda.is_available() else torch.device('cuda'))
num_class = 7
batch_factor = 800
gamma = 0.
train_flg = False
robustq_torch_batch = []
robustq_sklean_batch = []
robustq_torch = []
robustq_sklean = []
kmeans_sklean = []
kmeans_sklean_batch = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
print(n_samples)
# from sklearn
kmeans_sklean.append(
KMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
kmeans_sklean_batch.append(
MiniBatchKMeans(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor if num_class * 300 < int(
0.3 * n_samples) else int(0.2 * n_samples)))
# from Robustq
robustq_sklean.append(
RobustQ(n_clusters=num_class, n_init=1, max_iter=30, random_state=0, algorithm="full"))
robustq_sklean_batch.append(MiniBatchRobustQ(n_clusters=num_class,
n_init=1, max_iter=30, random_state=0, # tol=1e-4,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples)))
# from clusterq
robustq_torch_batch_t = MiniBatchRobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
batch_size=num_class * batch_factor
if num_class * batch_factor < int(0.3 * n_samples)
else int(0.2 * n_samples),
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_batch_t.eval()
robustq_torch_t = RobustqTorch(n_feature=1,
n_clusters=num_class,
alpha=0.12, gamma=gamma,
n_init=1, max_iter=30, random_state=0,
q_level_init="uniform"
)
if not train_flg:
robustq_torch_t.eval()
if torch.cuda.is_available():
robustq_torch_batch_t.cuda()
robustq_torch_t.cuda()
robustq_torch.append(robustq_torch_t)
robustq_torch_batch.append(robustq_torch_batch_t)
import sys
sys.path.append("../")
from utee.misc import time_measurement
@time_measurement(False, 0, 0)
def f1(quantizer_list, is_np=False):
print("start\n")
ix = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
data_o = v.detach().view(-1, 1)
if is_np:
data = data_o.cpu().numpy()
else:
data = data_o.cuda()
quantizer_list[ix].fit(data)
data_o = v.detach().view(-1, 1)
if is_np:
datac = data_o.cpu().numpy()
t = (datac != data)
tt = t if not isinstance(t, np.ndarray) else t.any()
# print("data is modified:", tt)
else:
datac = data_o.cuda()
t = (datac != data)
tt = t.any().item()
# print("data is modified:", tt)
if tt:
print("max difference:", ((datac - data_o)[t]).max())
ix += 1
# import visdom
#
# vis = visdom.Visdom()
class Visdom():
def bar(self, *args, **kwargs):
pass
def line(self, *args, **kwargs):
pass
vis = Visdom()
def plot(quantizer, name="None", is_np=False):
print(quantizer.labels_)
print(quantizer.cluster_centers_)
# ------------- visdom draw --------------
# histogram of weight distribution
qw = quantizer.cluster_centers_[:, 0][quantizer.labels_] # .view(weight.shape)
qw_hist = []
if is_np:
qw_v = np.unique(qw)
for v in qw_v:
qw_hist.append((qw == v).sum())
else:
qw_v = qw.unique()
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win=name + " hist",
opts=dict(title=name + " hist" + ' gamma={}'.format(gamma)))
# vis.histogram(qw, win=name+" hist",
# opts=dict(title=name+" hist"+' gamma={}'.format(gamma)))
# transform function
x = torch.arange(-1., 1., 0.01)
print(x.shape)
if is_np:
x = x.view(-1, 1).cpu().numpy()
elif torch.cuda.is_available():
x = x.view(-1, 1).cuda()
else:
x = x.view(-1, 1)
level1 = quantizer.cluster_centers_[:, 0][quantizer.predict(x)]
# print(level1.shape, x.shape)
vis.line(Y=level1, X=x.reshape(-1),
win=name,
opts=dict(title=name))
@time_measurement(False, 0, 0)
def get_q_loss(quantizer_list, is_np=False):
ix = 0
loss = 0
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
if is_np:
data = v.detach().view(-1, 1)
data = data.cpu().numpy()
q_data = quantizer_list[ix].cluster_centers_[:, 0][quantizer_list[ix].predict(data)].reshape(
data.shape)
else:
data = v
q_data = quantizer_list[ix](data).reshape(data.shape)
loss += ((q_data - data) ** 2).sum()
# print(n)
ix += 1
print(loss)
print("=======test kmeans_sklean======\n")
f1(kmeans_sklean, True)
get_q_loss(kmeans_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean[ix].cluster_centers_[:, 0][kmeans_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test kmeans_sklean_batch======\n")
f1(kmeans_sklean_batch, True)
get_q_loss(kmeans_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = kmeans_sklean_batch[ix].cluster_centers_[:, 0][kmeans_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
print("=======test robustq_sklean======\n")
f1(robustq_sklean, True)
get_q_loss(robustq_sklean, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean[ix].cluster_centers_[:, 0][robustq_sklean[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean[0], 'robustq_sklean', True)
print("=======test robustq_sklean_batch======\n")
f1(robustq_sklean_batch, True)
get_q_loss(robustq_sklean_batch, True)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# q_data = robustq_sklean_batch[ix].cluster_centers_[:, 0][robustq_sklean_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_sklean_batch[0], 'robustq_sklean_batch', True)
print("=======test robustq_torch======\n")
f1(robustq_torch)
get_q_loss(robustq_torch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch[ix].cluster_centers_[:, 0][robustq_torch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch[0], 'robustq_torch')
print("=======test robustq_torch_batch======\n")
f1(robustq_torch_batch)
get_q_loss(robustq_torch_batch)
# ix = 0
# loss = 0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v
# q_data = robustq_torch_batch[ix].cluster_centers_[:, 0][robustq_torch_batch[ix].predict(data)].reshape(
# data.shape)
# loss += ((q_data - data) ** 2).sum()
# # print(n)
# ix += 1
# print(loss)
plot(robustq_torch_batch[0], 'robustq_torch_batch')
# print("======= cudalib ======\n")
# from libKMCUDA import kmeans_cuda
# clq_temp = []
# import time
# t_s = time.monotonic()
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# samples = data.cpu().numpy()
# centroids, assignments = kmeans_cuda(samples, num_class, )
# clq_temp.append([centroids, assignments])
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
#
# t_s = time.monotonic()
# ix = 0
# loss=0
# for n, v in a['state_dict'].items():
# if "weight" in n:
# n_samples = v.numel()
# if n_samples > 1024:
# data = v.detach().clone().view(-1, 1)
# data = data.cpu().numpy()
# centroids, assignments = clq_temp[ix]
# q_data = centroids[:, 0][assignments].reshape(data.shape)
# loss += ((q_data - data) ** 2).sum()
# ix +=1
# t_e = time.monotonic()
# s, ms = divmod((t_e - t_s) * 1000, 1000)
# m, s = divmod(s, 60)
# h, m = divmod(m, 60)
# print("%d:%02d:%02d:%03d" % (h, m, s, ms))
# print(loss)
print("=======test uniform======\n")
from module.quantization.quant_functions import linear_quantize, compute_integral_part
bits = 3
print("start\n")
ix = 0
q2_loss = 0
q2_list = []
for n, v in a['state_dict'].items():
if "weight" in n:
n_samples = v.numel()
if n_samples > 1024:
w = v.detach()
sf = bits - 1. - compute_integral_part(w, overflow_rate=0)
q2 = linear_quantize(w, sf, bits=bits)
q2_list.append(q2)
q2_loss += ((q2 - w)**2).sum()
ix += 1
print(q2_loss)
# vis.histogram(q2_list[0].view(-1), win='uniform'+" hist",
# opts=dict(title='uniform'+" hist"))
qw = q2_list[0]
qw_v = qw.unique()
qw_hist = []
for v in qw_v:
qw_hist.append((qw == v).sum().item())
vis.bar(torch.tensor(qw_hist), qw_v, win='uniform' + " hist",
opts=dict(title='uniform' + " hist"))
# 2021/08/31: remove dulplicated code of MiniBatchRobustqTorch and RobustqTorch,
# 2021/08/31: MiniBatchRobustqTorch inherits functions from RobustqTorch.
|
[
"utee.misc.time_measurement",
"module.quantization.quant_functions.linear_quantize",
"module.quantization.quant_functions.compute_integral_part",
"torch.as_tensor",
"numpy.unique",
"torch.set_printoptions",
"torch.device",
"sklearn.show_versions",
"torch.from_numpy",
"torch.tensor",
"torch.cuda.is_available",
"torch.linspace",
"copy.deepcopy",
"torch.no_grad",
"sys.path.append",
"torch.zeros",
"torch.arange",
"numpy.set_printoptions"
] |
[((11625, 11684), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.3f}'.format}"}), "(formatter={'float': '{: 0.3f}'.format})\n", (11644, 11684), True, 'import numpy as np\n'), ((11689, 11714), 'torch.set_printoptions', 'torch.set_printoptions', (['(3)'], {}), '(3)\n', (11711, 11714), False, 'import torch\n'), ((11740, 11763), 'sklearn.show_versions', 'sklearn.show_versions', ([], {}), '()\n', (11761, 11763), False, 'import sklearn\n'), ((15380, 15402), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (15395, 15402), False, 'import sys\n'), ((15453, 15482), 'utee.misc.time_measurement', 'time_measurement', (['(False)', '(0)', '(0)'], {}), '(False, 0, 0)\n', (15469, 15482), False, 'from utee.misc import time_measurement\n'), ((18207, 18236), 'utee.misc.time_measurement', 'time_measurement', (['(False)', '(0)', '(0)'], {}), '(False, 0, 0)\n', (18223, 18236), False, 'from utee.misc import time_measurement\n'), ((3984, 4054), 'copy.deepcopy', 'copy.deepcopy', (['[self.kmeans.n_init, self.kmeans.init, self.kmeans.tol]'], {}), '([self.kmeans.n_init, self.kmeans.init, self.kmeans.tol])\n', (3997, 4054), False, 'import copy\n'), ((17742, 17771), 'torch.arange', 'torch.arange', (['(-1.0)', '(1.0)', '(0.01)'], {}), '(-1.0, 1.0, 0.01)\n', (17754, 17771), False, 'import torch\n'), ((25415, 25436), 'torch.tensor', 'torch.tensor', (['qw_hist'], {}), '(qw_hist)\n', (25427, 25436), False, 'import torch\n'), ((594, 629), 'torch.zeros', 'torch.zeros', (['(0,)'], {'dtype': 'torch.long'}), '((0,), dtype=torch.long)\n', (605, 629), False, 'import torch\n'), ((738, 772), 'torch.zeros', 'torch.zeros', (['n_clusters', 'n_feature'], {}), '(n_clusters, n_feature)\n', (749, 772), False, 'import torch\n'), ((926, 961), 'torch.zeros', 'torch.zeros', (['(0,)'], {'dtype': 'torch.long'}), '((0,), dtype=torch.long)\n', (937, 961), False, 'import torch\n'), ((4484, 4538), 'torch.as_tensor', 'torch.as_tensor', (['self.kmeans.labels_'], {'dtype': 'torch.long'}), '(self.kmeans.labels_, dtype=torch.long)\n', (4499, 4538), False, 'import torch\n'), ((4580, 4626), 'torch.from_numpy', 'torch.from_numpy', (['self.kmeans.cluster_centers_'], {}), '(self.kmeans.cluster_centers_)\n', (4596, 4626), False, 'import torch\n'), ((9440, 9494), 'torch.as_tensor', 'torch.as_tensor', (['self.kmeans.labels_'], {'dtype': 'torch.long'}), '(self.kmeans.labels_, dtype=torch.long)\n', (9455, 9494), False, 'import torch\n'), ((9536, 9582), 'torch.from_numpy', 'torch.from_numpy', (['self.kmeans.cluster_centers_'], {}), '(self.kmeans.cluster_centers_)\n', (9552, 9582), False, 'import torch\n'), ((17216, 17229), 'numpy.unique', 'np.unique', (['qw'], {}), '(qw)\n', (17225, 17229), True, 'import numpy as np\n'), ((17448, 17469), 'torch.tensor', 'torch.tensor', (['qw_hist'], {}), '(qw_hist)\n', (17460, 17469), False, 'import torch\n'), ((17868, 17893), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17891, 17893), False, 'import torch\n'), ((1953, 1968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1966, 1968), False, 'import torch\n'), ((12036, 12055), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12048, 12055), False, 'import torch\n'), ((12094, 12114), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (12106, 12114), False, 'import torch\n'), ((15121, 15146), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15144, 15146), False, 'import torch\n'), ((24998, 25031), 'module.quantization.quant_functions.linear_quantize', 'linear_quantize', (['w', 'sf'], {'bits': 'bits'}), '(w, sf, bits=bits)\n', (25013, 25031), False, 'from module.quantization.quant_functions import linear_quantize, compute_integral_part\n'), ((1004, 1048), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': 'self.n_clusters'}), '(-1, 1, steps=self.n_clusters)\n', (1018, 1048), False, 'import torch\n'), ((12063, 12088), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12086, 12088), False, 'import torch\n'), ((24935, 24976), 'module.quantization.quant_functions.compute_integral_part', 'compute_integral_part', (['w'], {'overflow_rate': '(0)'}), '(w, overflow_rate=0)\n', (24956, 24976), False, 'from module.quantization.quant_functions import linear_quantize, compute_integral_part\n'), ((3345, 3384), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': 'n_clusters'}), '(-1, 1, steps=n_clusters)\n', (3359, 3384), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import torch
import argparse
import numpy as np
from model import PointCloudNet
from code.utils import fp_sampling, knn_patch, helper_function
import os
parser = argparse.ArgumentParser()
parser.add_argument('--num_points', default=1024, type=int,
help='Number of points per patch')
parser.add_argument('--patch_num_ratio', default=4, type=int,
help='Number of points per patch')
parser.add_argument('--trained_model', type=str,
help='Trained model directory')
parser.add_argument('--test_file', type=str,
help='XYZ file for testing')
FLAGS = parser.parse_args()
if not os.path.exists("../results"):
os.mkdir("../results")
NUM_POINTS = FLAGS.num_points
PATCH_NUM_RATIO = FLAGS.patch_num_ratio
TRAINED_MODEL = FLAGS.trained_model
TEST_FILE = FLAGS.test_file
f_name = TEST_FILE.split("/")[-1]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#normaliaze data and extract patches
pc = torch.tensor(np.loadtxt(TEST_FILE)).float().to(device)
num_patches = int(pc.shape[0] / NUM_POINTS * PATCH_NUM_RATIO)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(pc[:, 0:3], dim=0).contiguous(), num_patches)
patches = torch.tensor(knn_patch.extract_knn_patch(pc[torch.squeeze(fps_idx, dim=0).cpu().numpy(), 0:3].cpu().numpy(), pc.cpu().numpy(), NUM_POINTS)).to(device)
print(patches.shape)
centroid = torch.mean(patches[:, :, 0:3], dim=1, keepdim=True)
patches[:, :, 0:3] = patches[:, :, 0:3] - centroid
furthest_distance = torch.max(torch.sqrt(torch.sum(patches[:, :, 0:3] ** 2, dim=-1)), dim=1,keepdim=True).values
patches[:, :, 0:3] = patches[:, :, 0:3] / torch.unsqueeze(furthest_distance, dim=-1)
# read best epoch from trained model
trained_model_state = open("{0}/state.txt".format(TRAINED_MODEL), "r")
best_epoch, read_min_loss = helper_function.get_best_epoch(trained_model_state)
print(best_epoch, read_min_loss)
print("Best epoch (i.e., minimum loss) for {0}".format(read_min_loss))
#initialize model
net = PointCloudNet(3, 6, True, NUM_POINTS).to(device)
model = torch.load("{0}/epoch_{1}.pt".format(TRAINED_MODEL, best_epoch))
net.load_state_dict(model["model_state_dict"])
net.eval()
up_patches = net(patches)
#denormalize and merge patches
up_patches[:, :, 0:3] = up_patches[:, :, 0:3] * torch.unsqueeze(furthest_distance, dim=-1) + centroid
up_points = torch.cat([p for p in up_patches], dim=0)
fps_idx = fp_sampling.furthest_point_sample(torch.unsqueeze(up_points[:, 0:3], dim=0).contiguous(), pc.shape[0] * 4)
up_points = up_points[torch.squeeze(fps_idx, dim=0).cpu().numpy(), :].detach().cpu().numpy()
np.savetxt("../results/{0}".format(f_name), up_points, fmt='%.6f', delimiter=" ", newline="\n")
|
[
"os.path.exists",
"argparse.ArgumentParser",
"torch.mean",
"torch.unsqueeze",
"model.PointCloudNet",
"code.utils.helper_function.get_best_epoch",
"torch.cuda.is_available",
"torch.sum",
"os.mkdir",
"torch.squeeze",
"numpy.loadtxt",
"torch.cat"
] |
[((190, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (213, 215), False, 'import argparse\n'), ((1444, 1495), 'torch.mean', 'torch.mean', (['patches[:, :, 0:3]'], {'dim': '(1)', 'keepdim': '(True)'}), '(patches[:, :, 0:3], dim=1, keepdim=True)\n', (1454, 1495), False, 'import torch\n'), ((1884, 1935), 'code.utils.helper_function.get_best_epoch', 'helper_function.get_best_epoch', (['trained_model_state'], {}), '(trained_model_state)\n', (1914, 1935), False, 'from code.utils import fp_sampling, knn_patch, helper_function\n'), ((2425, 2466), 'torch.cat', 'torch.cat', (['[p for p in up_patches]'], {'dim': '(0)'}), '([p for p in up_patches], dim=0)\n', (2434, 2466), False, 'import torch\n'), ((684, 712), 'os.path.exists', 'os.path.exists', (['"""../results"""'], {}), "('../results')\n", (698, 712), False, 'import os\n'), ((718, 740), 'os.mkdir', 'os.mkdir', (['"""../results"""'], {}), "('../results')\n", (726, 740), False, 'import os\n'), ((1702, 1744), 'torch.unsqueeze', 'torch.unsqueeze', (['furthest_distance'], {'dim': '(-1)'}), '(furthest_distance, dim=-1)\n', (1717, 1744), False, 'import torch\n'), ((945, 970), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (968, 970), False, 'import torch\n'), ((2066, 2103), 'model.PointCloudNet', 'PointCloudNet', (['(3)', '(6)', '(True)', 'NUM_POINTS'], {}), '(3, 6, True, NUM_POINTS)\n', (2079, 2103), False, 'from model import PointCloudNet\n'), ((2359, 2401), 'torch.unsqueeze', 'torch.unsqueeze', (['furthest_distance'], {'dim': '(-1)'}), '(furthest_distance, dim=-1)\n', (2374, 2401), False, 'import torch\n'), ((1187, 1221), 'torch.unsqueeze', 'torch.unsqueeze', (['pc[:, 0:3]'], {'dim': '(0)'}), '(pc[:, 0:3], dim=0)\n', (1202, 1221), False, 'import torch\n'), ((1588, 1630), 'torch.sum', 'torch.sum', (['(patches[:, :, 0:3] ** 2)'], {'dim': '(-1)'}), '(patches[:, :, 0:3] ** 2, dim=-1)\n', (1597, 1630), False, 'import torch\n'), ((2511, 2552), 'torch.unsqueeze', 'torch.unsqueeze', (['up_points[:, 0:3]'], {'dim': '(0)'}), '(up_points[:, 0:3], dim=0)\n', (2526, 2552), False, 'import torch\n'), ((1039, 1060), 'numpy.loadtxt', 'np.loadtxt', (['TEST_FILE'], {}), '(TEST_FILE)\n', (1049, 1060), True, 'import numpy as np\n'), ((2606, 2635), 'torch.squeeze', 'torch.squeeze', (['fps_idx'], {'dim': '(0)'}), '(fps_idx, dim=0)\n', (2619, 2635), False, 'import torch\n'), ((1303, 1332), 'torch.squeeze', 'torch.squeeze', (['fps_idx'], {'dim': '(0)'}), '(fps_idx, dim=0)\n', (1316, 1332), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import pandas as pd
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
from scipy.sparse.linalg import eigs
from numpy import linalg as LA
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import figure
numImages = 60
# fig = plt.figure(figsize = (8,8))
X = np.zeros(shape = (numImages, 490*490))
for i in range(1, numImages + 1):
filename = str(i)+'.jpg'
img = mpimg.imread(filename)
img = img[:, :, 0]*0.299 + img[:, :, 1]*0.587 + img[:, :, 2]*0.114
X[i-1] = np.array(img.flatten()).reshape(1, img.shape[0]*img.shape[1])
numComponents = 60
pca = PCA(n_components=numComponents)
pca.fit(X)
Z = pca.transform(X)
fig1, ax = plt.subplots()
ax.scatter(Z[0:5, 0], Z[0:5, 1], s = 25, marker = 'x', c = 'r', label = '$NaCl\; 10mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[5:10, 0], Z[5:10, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 5.0mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[10:15, 0], Z[10:15, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='r',
label = '$NaCl\; 2.5mM,\; CaCl_2\; 3.0mM,\; MgCl_2\; 1.5mM$')
ax.scatter(Z[15:20, 0], Z[15:20, 1], s = 25, marker = 'x', c = 'g', label = '$NaHCO_3\; 10mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[20:25, 0], Z[20:25, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 5.0mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[25:30, 0], Z[25:30, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='g',
label = '$NaHCO_3\; 2.5mM,\; CaCl_2\; 0.5mM,\; MgCl_2\; 0.25mM$')
ax.scatter(Z[30:35, 0], Z[30:35, 1], s = 25, marker = 'x', c = 'b', label = '$Na_2SO_4\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[35:40, 0], Z[35:40, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[40:45, 0], Z[40:45, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='b',
label = '$Na_2SO_4\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[45:50, 0], Z[45:50, 1], s = 25, marker = 'x', c = 'y', label = '$NaHCO_3\; 10mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[50:55, 0], Z[50:55, 1], s = 25, marker = 'o', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 5.0mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
ax.scatter(Z[55:60, 0], Z[55:60, 1], s = 25, marker = 's', facecolors = 'none', edgecolors='y',
label = '$NaHCO_3\; 2.5mM,\; CaSO_4\; 0.5mM,\; MgSO_4\; 0.25mM$')
plt.xlabel('First component')
plt.ylabel('Second component')
ax.set_yticklabels([])
ax.set_xticklabels([])
# plt.title('PCA Image analysis for all samples')
ax.legend(loc='upper right', prop={'size': 7}, handletextpad = 0, labelspacing = 0)
plt.show()
fig1.savefig('PCA_all_images_2_components_1_plot.jpg', dpi = 1000)
# # use component 3 and 4
# fig2, ax = plt.subplots()
# ax.scatter(Z[0:5, 2], Z[0:5, 3], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 2], Z[5:10, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='r',
# label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 2], Z[10:15, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='r',
# label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 2], Z[15:20, 3], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 2], Z[20:25, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 2], Z[25:30, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='g',
# label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 2], Z[30:35, 3], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 2], Z[35:40, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='b',
# label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 2], Z[40:45, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='b',
# label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 2], Z[45:50, 3], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 2], Z[50:55, 3], s = 100, marker = 'o', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 2], Z[55:60, 3], s = 100, marker = 's', facecolors = 'none', edgecolors='y',
# label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# plt.xlabel('Third component', fontsize = 20)
# plt.ylabel('Fourth component', fontsize = 20)
# plt.title('PCA Image analysis for all samples', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
#
# eigenvalues = pca.explained_variance_
# variance = []
# for i in range(len(eigenvalues)):
# if i == 0:
# variance.append(eigenvalues[0])
# else:
# variance.append(variance[i-1] + eigenvalues[i])
# variance = variance/variance[-1]
#
# fig3, ax = plt.subplots()
# plt.plot(variance, 'ro-', linewidth=1)
# plt.title('Scree Plot for all 60 images', fontsize=20)
# plt.xlabel('Principal Component', fontsize=20)
# plt.ylabel('Cumulative Eigenvalue', fontsize=20)
# fig3.savefig('Scree Plot for all 60 images.png')
# # 3d image
# # fig = plt.figure(num=None, figsize=(4, 3), dpi=80, facecolor='w', edgecolor='k')
# fig = plt.figure()
# # figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
# # fig, axs = plt.subplots(nrows=1, ncols=1, constrained_layout=True)
# ax = Axes3D(fig)
# ax.scatter(Z[0:5, 0], Z[0:5, 1], Z[0:5, 2], s = 100, marker = 'x', c = 'r', label = 'NaCl 10mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[5:10, 0], Z[5:10, 1], Z[5:10, 2], s = 100, marker = 's', c = 'r', label = 'NaCl 5.0mM, CaCl2 3.0mM, MgCl2 1.5mM')
# ax.scatter(Z[10:15, 0], Z[10:15, 1], Z[10:15, 2], s = 100, marker = 'o', c ='r', label = 'NaCl 2.5mM, CaCl2 3.0mM, MgCl2 1.5mM')
#
# ax.scatter(Z[15:20, 0], Z[15:20, 1], Z[15:20, 2], s = 100, marker = 'x', c = 'g', label = 'NaHCO3 10mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[20:25, 0], Z[20:25, 1], Z[20:25, 2], s = 100, marker = 's', c = 'g', label = 'NaHCO3 5.0mM, CaCl2 0.5mM, MgCl2 0.25mM')
# ax.scatter(Z[25:30, 0], Z[25:30, 1], Z[25:30, 2], s = 100, marker = 'o', c = 'g', label = 'NaHCO3 2.5mM, CaCl2 0.5mM, MgCl2 0.25mM')
#
# ax.scatter(Z[30:35, 0], Z[30:35, 1], Z[30:35, 2], s = 100, marker = 'x', c = 'b', label = 'Na2SO4 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[35:40, 0], Z[35:40, 1], Z[35:40, 2], s = 100, marker = 's', c = 'b', label = 'Na2SO4 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[40:45, 0], Z[40:45, 1], Z[40:45, 2], s = 100, marker = 'o', c = 'b', label='Na2SO4 1.25mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.scatter(Z[45:50, 0], Z[45:50, 1], Z[45:50, 2], s = 100, marker = 'x', c = 'y', label = 'NaHCO3 10mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[50:55, 0], Z[50:55, 1], Z[50:55, 2], s = 100, marker = 's', c = 'y', label = 'NaHCO3 5.0mM, CaSO4 0.5mM, MgSO4 0.25mM')
# ax.scatter(Z[55:60, 0], Z[55:60, 1], Z[55:60, 2], s = 100, marker = 'o', c = 'y', label = 'NaHCO3 2.5mM, CaSO4 0.5mM, MgSO4 0.25mM')
#
# ax.set_xlabel('First component', fontsize = 15)
# ax.set_ylabel('Second component', fontsize = 15)
# ax.set_zlabel('Third component', fontsize = 15)
# ax.set_title('PCA image analysis for all samples \n with three components', fontsize = 20)
# ax.legend(loc = 'upper right', prop={'size': 7})
# plt.show()
# plt.close(fig)
|
[
"matplotlib.pyplot.ylabel",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.image.imread",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((419, 457), 'numpy.zeros', 'np.zeros', ([], {'shape': '(numImages, 490 * 490)'}), '(shape=(numImages, 490 * 490))\n', (427, 457), True, 'import numpy as np\n'), ((727, 758), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'numComponents'}), '(n_components=numComponents)\n', (730, 758), False, 'from sklearn.decomposition import PCA\n'), ((803, 817), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (815, 817), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2755), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""First component"""'], {}), "('First component')\n", (2736, 2755), True, 'import matplotlib.pyplot as plt\n'), ((2756, 2786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Second component"""'], {}), "('Second component')\n", (2766, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2968, 2978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2976, 2978), True, 'import matplotlib.pyplot as plt\n'), ((532, 554), 'matplotlib.image.imread', 'mpimg.imread', (['filename'], {}), '(filename)\n', (544, 554), True, 'import matplotlib.image as mpimg\n')]
|
import torch
import torch.nn.functional as F
from torch import nn
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized)
from .backbone import build_backbone
from .matcher import build_matcher_crowd
import numpy as np
import time
# the network frmawork of the regression branch
class RegressionModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, feature_size=256):
super(RegressionModel, self).__init__()
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 2)
# the network frmawork of the classification branch
class ClassificationModel(nn.Module):
def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256):
super(ClassificationModel, self).__init__()
self.num_classes = num_classes
self.num_anchor_points = num_anchor_points
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
self.act1 = nn.ReLU()
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act2 = nn.ReLU()
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act3 = nn.ReLU()
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
self.act4 = nn.ReLU()
self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1)
self.output_act = nn.Sigmoid()
# sub-branch forward
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.output(out)
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, _ = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
# generate the reference points in grid layout
def generate_anchor_points(stride=16, row=3, line=3):
row_step = stride / row
line_step = stride / line
shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2
shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
anchor_points = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
return anchor_points
# shift the meta-anchor to get an acnhor points
def shift(shape, stride, anchor_points):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchor_points.shape[0]
K = shifts.shape[0]
all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2)))
all_anchor_points = all_anchor_points.reshape((K * A, 2))
return all_anchor_points
# this class generate all reference points on all pyramid levels
class AnchorPoints(nn.Module):
def __init__(self, pyramid_levels=None, strides=None, row=3, line=3):
super(AnchorPoints, self).__init__()
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
else:
self.pyramid_levels = pyramid_levels
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
self.row = row
self.line = line
def forward(self, image):
image_shape = image.shape[2:]
image_shape = np.array(image_shape)
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
all_anchor_points = np.zeros((0, 2)).astype(np.float32)
# get reference points for each level
for idx, p in enumerate(self.pyramid_levels):
anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line)
shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points)
all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0)
all_anchor_points = np.expand_dims(all_anchor_points, axis=0)
# send reference points to device
if torch.cuda.is_available():
return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda()
else:
return torch.from_numpy(all_anchor_points.astype(np.float32))
class Decoder(nn.Module):
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
super(Decoder, self).__init__()
# upsample C5 to get P5 from the FPN paper
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
return [P3_x, P4_x, P5_x]
# the defenition of the P2PNet model
class P2PNet(nn.Module):
def __init__(self, backbone, row=2, line=2):
super().__init__()
self.backbone = backbone
self.num_classes = 2
# the number of all anchor points
num_anchor_points = row * line
self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points)
self.classification = ClassificationModel(num_features_in=256, \
num_classes=self.num_classes, \
num_anchor_points=num_anchor_points)
self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line)
self.fpn = Decoder(256, 512, 512)
def forward(self, samples: NestedTensor):
# get the backbone features
features = self.backbone(samples)
# forward the feature pyramid
features_fpn = self.fpn([features[1], features[2], features[3]])
batch_size = features[0].shape[0]
# run the regression and classification branch
regression = self.regression(features_fpn[1]) * 100 # 8x
classification = self.classification(features_fpn[1])
anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1)
# decode the points as prediction
output_coord = regression + anchor_points
output_class = classification
out = {'pred_logits': output_class, 'pred_points': output_coord}
return out
class SetCriterion_Crowd(nn.Module):
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[0] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def loss_labels(self, outputs, targets, indices, num_points):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], 0,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_points(self, outputs, targets, indices, num_points):
assert 'pred_points' in outputs
idx = self._get_src_permutation_idx(indices)
src_points = outputs['pred_points'][idx]
target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.mse_loss(src_points, target_points, reduction='none')
losses = {}
losses['loss_point'] = loss_bbox.sum() / num_points
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs):
loss_map = {
'labels': self.loss_labels,
'points': self.loss_points,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_points, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']}
indices1 = self.matcher(output1, targets)
num_points = sum(len(t["labels"]) for t in targets)
num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_points)
num_boxes = torch.clamp(num_points / get_world_size(), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes))
return losses
# create the P2PNet model
def build(args, training):
# treats persons as a single class
num_classes = 1
backbone = build_backbone(args)
model = P2PNet(backbone, args.row, args.line)
if not training:
return model
weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef}
losses = ['labels', 'points']
matcher = build_matcher_crowd(args)
criterion = SetCriterion_Crowd(num_classes, \
matcher=matcher, weight_dict=weight_dict, \
eos_coef=args.eos_coef, losses=losses)
return model, criterion
|
[
"torch.nn.ReLU",
"torch.full_like",
"numpy.array",
"torch.cuda.is_available",
"numpy.arange",
"torch.nn.Sigmoid",
"util.misc.get_world_size",
"numpy.meshgrid",
"torch.nn.functional.mse_loss",
"util.misc.is_dist_avail_and_initialized",
"torch.distributed.all_reduce",
"torch.nn.Upsample",
"torch.cat",
"torch.full",
"torch.nn.Conv2d",
"numpy.append",
"numpy.zeros",
"numpy.expand_dims",
"torch.ones"
] |
[((3171, 3200), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (3182, 3200), True, 'import numpy as np\n'), ((3541, 3570), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (3552, 3570), True, 'import numpy as np\n'), ((596, 662), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_features_in', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(num_features_in, feature_size, kernel_size=3, padding=1)\n', (605, 662), False, 'from torch import nn\n'), ((683, 692), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (690, 692), False, 'from torch import nn\n'), ((715, 778), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (724, 778), False, 'from torch import nn\n'), ((799, 808), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (806, 808), False, 'from torch import nn\n'), ((831, 894), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (840, 894), False, 'from torch import nn\n'), ((915, 924), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (922, 924), False, 'from torch import nn\n'), ((947, 1010), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (956, 1010), False, 'from torch import nn\n'), ((1031, 1040), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1038, 1040), False, 'from torch import nn\n'), ((1064, 1136), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', '(num_anchor_points * 2)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)\n', (1073, 1136), False, 'from torch import nn\n'), ((1799, 1865), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_features_in', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(num_features_in, feature_size, kernel_size=3, padding=1)\n', (1808, 1865), False, 'from torch import nn\n'), ((1886, 1895), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1893, 1895), False, 'from torch import nn\n'), ((1918, 1981), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (1927, 1981), False, 'from torch import nn\n'), ((2002, 2011), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2009, 2011), False, 'from torch import nn\n'), ((2034, 2097), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (2043, 2097), False, 'from torch import nn\n'), ((2118, 2127), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2125, 2127), False, 'from torch import nn\n'), ((2150, 2213), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, padding=1)\n', (2159, 2213), False, 'from torch import nn\n'), ((2234, 2243), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2241, 2243), False, 'from torch import nn\n'), ((2267, 2353), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', '(num_anchor_points * num_classes)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(feature_size, num_anchor_points * num_classes, kernel_size=3,\n padding=1)\n', (2276, 2353), False, 'from torch import nn\n'), ((2376, 2388), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2386, 2388), False, 'from torch import nn\n'), ((4512, 4533), 'numpy.array', 'np.array', (['image_shape'], {}), '(image_shape)\n', (4520, 4533), True, 'import numpy as np\n'), ((5095, 5136), 'numpy.expand_dims', 'np.expand_dims', (['all_anchor_points'], {'axis': '(0)'}), '(all_anchor_points, axis=0)\n', (5109, 5136), True, 'import numpy as np\n'), ((5190, 5215), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5213, 5215), False, 'import torch\n'), ((5594, 5662), 'torch.nn.Conv2d', 'nn.Conv2d', (['C5_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C5_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (5603, 5662), False, 'from torch import nn\n'), ((5691, 5734), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (5702, 5734), False, 'from torch import nn\n'), ((5755, 5828), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (5764, 5828), False, 'from torch import nn\n'), ((5885, 5953), 'torch.nn.Conv2d', 'nn.Conv2d', (['C4_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C4_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (5894, 5953), False, 'from torch import nn\n'), ((5982, 6025), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (5993, 6025), False, 'from torch import nn\n'), ((6046, 6119), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (6055, 6119), False, 'from torch import nn\n'), ((6176, 6244), 'torch.nn.Conv2d', 'nn.Conv2d', (['C3_size', 'feature_size'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(C3_size, feature_size, kernel_size=1, stride=1, padding=0)\n', (6185, 6244), False, 'from torch import nn\n'), ((6273, 6316), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (6284, 6316), False, 'from torch import nn\n'), ((6337, 6410), 'torch.nn.Conv2d', 'nn.Conv2d', (['feature_size', 'feature_size'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(feature_size, feature_size, kernel_size=3, stride=1, padding=1)\n', (6346, 6410), False, 'from torch import nn\n'), ((9249, 9281), 'torch.ones', 'torch.ones', (['(self.num_classes + 1)'], {}), '(self.num_classes + 1)\n', (9259, 9281), False, 'import torch\n'), ((9851, 9936), 'torch.full', 'torch.full', (['src_logits.shape[:2]', '(0)'], {'dtype': 'torch.int64', 'device': 'src_logits.device'}), '(src_logits.shape[:2], 0, dtype=torch.int64, device=src_logits.device\n )\n', (9861, 9936), False, 'import torch\n'), ((10502, 10557), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['src_points', 'target_points'], {'reduction': '"""none"""'}), "(src_points, target_points, reduction='none')\n", (10512, 10557), True, 'import torch.nn.functional as F\n'), ((10873, 10911), 'torch.cat', 'torch.cat', (['[src for src, _ in indices]'], {}), '([src for src, _ in indices])\n', (10882, 10911), False, 'import torch\n'), ((11155, 11193), 'torch.cat', 'torch.cat', (['[tgt for _, tgt in indices]'], {}), '([tgt for _, tgt in indices])\n', (11164, 11193), False, 'import torch\n'), ((12309, 12340), 'util.misc.is_dist_avail_and_initialized', 'is_dist_avail_and_initialized', ([], {}), '()\n', (12338, 12340), False, 'from util.misc import NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized\n'), ((3424, 3446), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]'], {}), '(0, shape[1])\n', (3433, 3446), True, 'import numpy as np\n'), ((3478, 3500), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]'], {}), '(0, shape[0])\n', (3487, 3500), True, 'import numpy as np\n'), ((5006, 5065), 'numpy.append', 'np.append', (['all_anchor_points', 'shifted_anchor_points'], {'axis': '(0)'}), '(all_anchor_points, shifted_anchor_points, axis=0)\n', (5015, 5065), True, 'import numpy as np\n'), ((12354, 12394), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['num_points'], {}), '(num_points)\n', (12382, 12394), False, 'import torch\n'), ((3024, 3046), 'numpy.arange', 'np.arange', (['(1)', '(line + 1)'], {}), '(1, line + 1)\n', (3033, 3046), True, 'import numpy as np\n'), ((3094, 3115), 'numpy.arange', 'np.arange', (['(1)', '(row + 1)'], {}), '(1, row + 1)\n', (3103, 3115), True, 'import numpy as np\n'), ((4656, 4672), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (4664, 4672), True, 'import numpy as np\n'), ((10791, 10814), 'torch.full_like', 'torch.full_like', (['src', 'i'], {}), '(src, i)\n', (10806, 10814), False, 'import torch\n'), ((11073, 11096), 'torch.full_like', 'torch.full_like', (['tgt', 'i'], {}), '(tgt, i)\n', (11088, 11096), False, 'import torch\n'), ((12440, 12456), 'util.misc.get_world_size', 'get_world_size', ([], {}), '()\n', (12454, 12456), False, 'from util.misc import NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized\n')]
|
from keras.models import load_model
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from sklearn.cluster import KMeans
from time import time
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the purity of the cluster results
def clustering_purity(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Purity for each cluster
cluster_purities = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij / m_j)
# Calculate cluster purity
cluster_purity = np.max(np.array(class_probabilities))
cluster_purities.append(cluster_purity)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(cluster_purity)])
total_purity = 0
for i, size in enumerate(cluster_sizes):
total_purity += (size / m) * cluster_purities[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'purity': cluster_purities,
'total_purity': total_purity})
return total_purity, results_table
# Takes a pandas dataframe containing the cluster assignment and ground truth for each data point
# and returns the entropy of the cluster results
def clustering_entropy(cluster_results, index_to_name):
clusters = cluster_results['cluster'].unique()
m = cluster_results.shape[0]
# Entropy for each cluster
cluster_entropies = []
cluster_sizes = []
most_common_classes = []
for j in clusters:
cluster_j = cluster_results[cluster_results['cluster'] == j]
m_j = cluster_j.shape[0]
cluster_sizes.append(m_j)
classes = cluster_j['class'].unique()
# Class probability distribution for this cluster
class_probabilities = []
for i in classes:
cluster_j_class_i = cluster_j[cluster_j['class'] == i]
m_ij = cluster_j_class_i.shape[0]
class_probabilities.append(m_ij/m_j)
# Calculate cluster entropy
cluster_entropy = 0
for p in class_probabilities:
cluster_entropy -= p * np.log2(p)
cluster_entropies.append(cluster_entropy)
# Save most common class per cluster
most_common_classes.append(index_to_name[class_probabilities.index(np.max(np.array(class_probabilities)))])
total_entropy = 0
for i, size in enumerate(cluster_sizes):
total_entropy += (size / m) * cluster_entropies[i]
# Pandas dataframe containing per cluster results
results_table = pd.DataFrame({'cluster': clusters,
'cluster_size': cluster_sizes,
'most_common_class': most_common_classes,
'entropy': cluster_entropies,
'total_entropy': total_entropy})
return total_entropy, results_table
def main():
model_name = 'encoder_caltech256.h5'
encoder = load_model(model_name)
encode_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = encode_datagen.flow_from_directory(
'data/256_ObjectCategories',
target_size=(128, 128),
batch_size=1,
class_mode='input', shuffle=False)
n_images = 29780
# Encode all images
encoded_imgs = encoder.predict_generator(predict_generator, n_images, verbose=1)
# Flatten encoded images to create feature vector for clustering
encoded_imgs_feature_vecs = encoded_imgs.reshape(n_images, 8 * 8 * 600)
# Perform K-means clustering on flattened feature vector
print('Starting K-means..')
t0 = time()
kmeans = KMeans(n_clusters=256, n_init=2, n_jobs=-1)
clusters = kmeans.fit_predict(encoded_imgs_feature_vecs)
duration = time() - t0
print("done in %fs" % (duration))
print()
# Prepare data for evaluation functions
cluster_results = pd.DataFrame({'cluster': clusters, 'class': predict_generator.classes})
# Save cluster results
cluster_results.to_csv(model_name[:-3] + 'cluster_results.csv', index=False)
class_index_to_name = {v: k for k, v in predict_generator.class_indices.items()}
print('Evaluating entropy..')
t0 = time()
total_entropy, entropy_per_cluster = clustering_entropy(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Evaluating purity..')
total_purity, purity_per_cluster = clustering_purity(cluster_results, index_to_name=class_index_to_name)
duration = time() - t0
print("done in %fs" % (duration))
print()
print('Entropy:')
print(str(total_entropy))
print(entropy_per_cluster.to_string())
print('\n\n\nPurity: ')
print(str(total_purity))
print(purity_per_cluster.to_string())
entropy_per_cluster.to_csv(model_name[:-3] + 'entropy_details.csv', index=False)
purity_per_cluster.to_csv(model_name[:-3] + 'purity_details.csv', index=False)
if __name__ == '__main__':
main()
|
[
"sklearn.cluster.KMeans",
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"pandas.DataFrame",
"numpy.log2",
"time.time"
] |
[((1556, 1730), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'cluster_size': cluster_sizes, 'most_common_class':\n most_common_classes, 'purity': cluster_purities, 'total_purity':\n total_purity}"], {}), "({'cluster': clusters, 'cluster_size': cluster_sizes,\n 'most_common_class': most_common_classes, 'purity': cluster_purities,\n 'total_purity': total_purity})\n", (1568, 1730), True, 'import pandas as pd\n'), ((3346, 3524), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'cluster_size': cluster_sizes, 'most_common_class':\n most_common_classes, 'entropy': cluster_entropies, 'total_entropy':\n total_entropy}"], {}), "({'cluster': clusters, 'cluster_size': cluster_sizes,\n 'most_common_class': most_common_classes, 'entropy': cluster_entropies,\n 'total_entropy': total_entropy})\n", (3358, 3524), True, 'import pandas as pd\n'), ((3765, 3787), 'keras.models.load_model', 'load_model', (['model_name'], {}), '(model_name)\n', (3775, 3787), False, 'from keras.models import load_model\n'), ((3810, 3847), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (3828, 3847), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4422, 4428), 'time.time', 'time', ([], {}), '()\n', (4426, 4428), False, 'from time import time\n'), ((4442, 4485), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(256)', 'n_init': '(2)', 'n_jobs': '(-1)'}), '(n_clusters=256, n_init=2, n_jobs=-1)\n', (4448, 4485), False, 'from sklearn.cluster import KMeans\n'), ((4691, 4762), 'pandas.DataFrame', 'pd.DataFrame', (["{'cluster': clusters, 'class': predict_generator.classes}"], {}), "({'cluster': clusters, 'class': predict_generator.classes})\n", (4703, 4762), True, 'import pandas as pd\n'), ((5002, 5008), 'time.time', 'time', ([], {}), '()\n', (5006, 5008), False, 'from time import time\n'), ((4562, 4568), 'time.time', 'time', ([], {}), '()\n', (4566, 4568), False, 'from time import time\n'), ((5136, 5142), 'time.time', 'time', ([], {}), '()\n', (5140, 5142), False, 'from time import time\n'), ((5356, 5362), 'time.time', 'time', ([], {}), '()\n', (5360, 5362), False, 'from time import time\n'), ((1139, 1168), 'numpy.array', 'np.array', (['class_probabilities'], {}), '(class_probabilities)\n', (1147, 1168), True, 'import numpy as np\n'), ((2921, 2931), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (2928, 2931), True, 'import numpy as np\n'), ((3110, 3139), 'numpy.array', 'np.array', (['class_probabilities'], {}), '(class_probabilities)\n', (3118, 3139), True, 'import numpy as np\n')]
|
# Exercícios Numpy-27
# *******************
import numpy as np
Z=np.arange((10),dtype=int)
print(Z**Z)
print(Z)
print(2<<Z>>2)
print()
print(Z <- Z)
print()
print(1j*Z)
print()
print(Z/1/1)
print()
#print(Z<Z>Z)
|
[
"numpy.arange"
] |
[((66, 90), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (75, 90), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2019-08-02 18:31
# @Author : <NAME>
# @Email : <EMAIL>
import os
import cv2
import glob
import shutil
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import numpy as np
import subprocess
def auto_unzip_fun(x, f):
return f(*x)
def make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=24):
"""
output_path is the final mp4 name
img_dir is where the images to make into video are saved.
"""
first_img = cv2.imread(img_path_list[0])
h, w = first_img.shape[:2]
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (w, h))
args_list = [(img_path,) for img_path in img_path_list]
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=cv2.imread), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
if save_frames_dir:
for i, img_path in enumerate(img_path_list):
shutil.copy(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
def fuse_image(img_path_list, row_num, col_num):
assert len(img_path_list) == row_num * col_num
img_list = [cv2.imread(img_path) for img_path in img_path_list]
row_imgs = []
for i in range(row_num):
col_imgs = img_list[i * col_num: (i + 1) * col_num]
col_img = np.concatenate(col_imgs, axis=1)
row_imgs.append(col_img)
fused_img = np.concatenate(row_imgs, axis=0)
return fused_img
def fuse_video(video_frames_path_list, output_mp4_path, row_num, col_num, fps=24):
assert len(video_frames_path_list) == row_num * col_num
frame_num = len(video_frames_path_list[0])
first_img = cv2.imread(video_frames_path_list[0][0])
h, w = first_img.shape[:2]
fused_h, fused_w = h * row_num, w * col_num
args_list = []
for frame_idx in range(frame_num):
fused_frame_path_list = [video_frames[frame_idx] for video_frames in video_frames_path_list]
args_list.append((fused_frame_path_list, row_num, col_num))
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# for args in args_list:
# fuse_image(*args)
# exit()
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=fuse_image), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % (tmp_avi_video_path))
def merge(src_img, ref_img_path, out_img_path, pad):
h, w = src_img.shape[:2]
image_size = h
ref_img = cv2.imread(ref_img_path)
out_img = cv2.imread(out_img_path)
if ref_img.shape[0] != image_size and ref_img.shape[1] != image_size:
ref_img = cv2.resize(ref_img, (image_size, image_size))
if out_img.shape[0] != image_size and out_img.shape[1] != image_size:
out_img = cv2.resize(out_img, (image_size, image_size))
# print(src_img.shape, ref_img.shape, out_img.shape)
merge_img = np.concatenate([src_img, pad, ref_img, pad, out_img], axis=1)
return merge_img
def load_image(image_path, image_size=512):
"""
Args:
image_path (str):
image_size (int):
Returns:
image (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
image = cv2.imread(image_path)
image = cv2.resize(image, (image_size, image_size))
return image
def fuse_one_image(img_paths, image_size):
return load_image(img_paths[0], image_size)
def fuse_two_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
img_size = image_size // 2
img_1 = load_image(img_paths[0], img_size)
img_2 = load_image(img_paths[1], img_size)
fuse_img = np.concatenate([img_1, img_2], axis=0)
return fuse_img
def fuse_four_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:2], image_size)
fuse_img_2 = fuse_two_images(img_paths[2:4], image_size)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=1)
return fuse_img
def fuse_eight_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:4], image_size // 2)
fuse_img_2 = fuse_two_images(img_paths[4:8], image_size // 2)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=0)
return fuse_img
def fuse_source(all_src_img_paths, image_size=512):
"""
Args:
all_src_img_paths (list of str): the list of source image paths, currently it only supports, 1, 2, 4, 8 number
of source images.
image_size (int): the final image resolution, (image_size, image_size, 3)
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
ns = len(all_src_img_paths)
# TODO, currently it only supports, 1, 2, 4, 8 number of source images.
assert ns in [1, 2, 4, 8], "{} must be in [1, 2, 4, 8], currently it only supports, " \
"1, 2, 4, 8 number of source images."
if ns == 1:
fuse_img = load_image(all_src_img_paths[0], image_size)
elif ns == 2:
fuse_img = fuse_two_images(all_src_img_paths, image_size)
elif ns == 4:
fuse_img = fuse_four_images(all_src_img_paths, image_size)
elif ns == 8:
fuse_img = fuse_eight_images(all_src_img_paths, image_size)
else:
raise ValueError("{} must be in [1, 2, 4, 8], currently it only supports, "
"1, 2, 4, 8 number of source images.")
return fuse_img
def fuse_source_reference_output(output_mp4_path, src_img_paths, ref_img_paths, out_img_paths,
image_size=512, pad=10, fps=25):
total = len(ref_img_paths)
assert total == len(out_img_paths), "{} != {}".format(total, len(out_img_paths))
fused_src_img = fuse_source(src_img_paths, image_size)
pad_region = np.zeros((image_size, pad, 3), dtype=np.uint8)
pool_size = min(15, os.cpu_count())
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
W = fused_src_img.shape[1] + (image_size + pad) * 2
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (W, image_size))
with ProcessPoolExecutor(pool_size) as pool:
for img in tqdm(pool.map(merge, [fused_src_img] * total,
ref_img_paths, out_img_paths, [pad_region] * total)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
|
[
"cv2.VideoWriter",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"numpy.concatenate",
"cv2.VideoWriter_fourcc",
"os.cpu_count",
"os.system",
"cv2.resize",
"concurrent.futures.ProcessPoolExecutor",
"cv2.imread",
"shutil.copy"
] |
[((574, 602), 'cv2.imread', 'cv2.imread', (['img_path_list[0]'], {}), '(img_path_list[0])\n', (584, 602), False, 'import cv2\n'), ((719, 750), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (741, 750), False, 'import cv2\n'), ((770, 826), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(w, h)'], {}), '(tmp_avi_video_path, fourcc, fps, (w, h))\n', (785, 826), False, 'import cv2\n'), ((1239, 1345), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (1248, 1345), False, 'import os\n'), ((1345, 1384), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (1354, 1384), False, 'import os\n'), ((1765, 1797), 'numpy.concatenate', 'np.concatenate', (['row_imgs'], {'axis': '(0)'}), '(row_imgs, axis=0)\n', (1779, 1797), True, 'import numpy as np\n'), ((2028, 2068), 'cv2.imread', 'cv2.imread', (['video_frames_path_list[0][0]'], {}), '(video_frames_path_list[0][0])\n', (2038, 2068), False, 'import cv2\n'), ((2461, 2492), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2483, 2492), False, 'import cv2\n'), ((2583, 2651), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(fused_w, fused_h)'], {}), '(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))\n', (2598, 2651), False, 'import cv2\n'), ((2854, 2960), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (2863, 2960), False, 'import os\n'), ((2960, 2999), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (2969, 2999), False, 'import os\n'), ((3120, 3144), 'cv2.imread', 'cv2.imread', (['ref_img_path'], {}), '(ref_img_path)\n', (3130, 3144), False, 'import cv2\n'), ((3159, 3183), 'cv2.imread', 'cv2.imread', (['out_img_path'], {}), '(out_img_path)\n', (3169, 3183), False, 'import cv2\n'), ((3536, 3597), 'numpy.concatenate', 'np.concatenate', (['[src_img, pad, ref_img, pad, out_img]'], {'axis': '(1)'}), '([src_img, pad, ref_img, pad, out_img], axis=1)\n', (3550, 3597), True, 'import numpy as np\n'), ((3884, 3906), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3894, 3906), False, 'import cv2\n'), ((3919, 3962), 'cv2.resize', 'cv2.resize', (['image', '(image_size, image_size)'], {}), '(image, (image_size, image_size))\n', (3929, 3962), False, 'import cv2\n'), ((4483, 4521), 'numpy.concatenate', 'np.concatenate', (['[img_1, img_2]'], {'axis': '(0)'}), '([img_1, img_2], axis=0)\n', (4497, 4521), True, 'import numpy as np\n'), ((4944, 4992), 'numpy.concatenate', 'np.concatenate', (['[fuse_img_1, fuse_img_2]'], {'axis': '(1)'}), '([fuse_img_1, fuse_img_2], axis=1)\n', (4958, 4992), True, 'import numpy as np\n'), ((5430, 5478), 'numpy.concatenate', 'np.concatenate', (['[fuse_img_1, fuse_img_2]'], {'axis': '(0)'}), '([fuse_img_1, fuse_img_2], axis=0)\n', (5444, 5478), True, 'import numpy as np\n'), ((7087, 7133), 'numpy.zeros', 'np.zeros', (['(image_size, pad, 3)'], {'dtype': 'np.uint8'}), '((image_size, pad, 3), dtype=np.uint8)\n', (7095, 7133), True, 'import numpy as np\n'), ((7240, 7271), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (7262, 7271), False, 'import cv2\n'), ((7347, 7412), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(W, image_size)'], {}), '(tmp_avi_video_path, fourcc, fps, (W, image_size))\n', (7362, 7412), False, 'import cv2\n'), ((7682, 7788), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (7691, 7788), False, 'import os\n'), ((7788, 7827), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (7797, 7827), False, 'import os\n'), ((896, 911), 'multiprocessing.Pool', 'Pool', (['pool_size'], {}), '(pool_size)\n', (900, 911), False, 'from multiprocessing import Pool\n'), ((1504, 1524), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1514, 1524), False, 'import cv2\n'), ((1682, 1714), 'numpy.concatenate', 'np.concatenate', (['col_imgs'], {'axis': '(1)'}), '(col_imgs, axis=1)\n', (1696, 1714), True, 'import numpy as np\n'), ((2661, 2676), 'multiprocessing.Pool', 'Pool', (['pool_size'], {}), '(pool_size)\n', (2665, 2676), False, 'from multiprocessing import Pool\n'), ((3277, 3322), 'cv2.resize', 'cv2.resize', (['ref_img', '(image_size, image_size)'], {}), '(ref_img, (image_size, image_size))\n', (3287, 3322), False, 'import cv2\n'), ((3416, 3461), 'cv2.resize', 'cv2.resize', (['out_img', '(image_size, image_size)'], {}), '(out_img, (image_size, image_size))\n', (3426, 3461), False, 'import cv2\n'), ((7159, 7173), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7171, 7173), False, 'import os\n'), ((7423, 7453), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', (['pool_size'], {}), '(pool_size)\n', (7442, 7453), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((1174, 1233), 'shutil.copy', 'shutil.copy', (['img_path', "('%s/%.8d.jpg' % (save_frames_dir, i))"], {}), "(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))\n", (1185, 1233), False, 'import shutil\n'), ((949, 986), 'functools.partial', 'partial', (['auto_unzip_fun'], {'f': 'cv2.imread'}), '(auto_unzip_fun, f=cv2.imread)\n', (956, 986), False, 'from functools import partial\n'), ((2714, 2751), 'functools.partial', 'partial', (['auto_unzip_fun'], {'f': 'fuse_image'}), '(auto_unzip_fun, f=fuse_image)\n', (2721, 2751), False, 'from functools import partial\n')]
|
#!/usr/bin/env python
# Standard imports
import pandas as pd
import numpy as np
# Pytorch
import torch
from torch import nn
# Using sklearn's LASSO implementation
from sklearn.linear_model import Lasso
# Local Files
from models.model_interface import CryptoModel
class LASSO(CryptoModel):
"""Wrapper around the sklearn LASSO class"""
def __init__(self, alpha=0.1, warm_start=True, verbose_training=False):
"""Create the LASSO model.
:input_size: Input size to the AutoEncoder, should be n_coins
"""
# Arguments
self.alpha = alpha
self.verbose_training = verbose_training
self.model = Lasso(alpha=alpha, fit_intercept=True, warm_start=warm_start)
# set the default plotting color
self.set_plotting_color()
def predict(self, sample):
"""Predict the next out of sample timestep
:sample: Vector or DataFrame of timesteps to use as input for the predictor(s).
:returns: [batch_size, 1, n_coins] Tensor of predictions
"""
n_samp, _, n_features = sample.shape
yhat = self.model.predict(sample.reshape((n_samp, n_features)))
if self.verbose_training:
print(f'prediction: {yhat}')
return yhat
def train(self, training_set):
"""Train, or re-train, the LSTM and AE
:training_set: DataFrame of training samples
"""
X, Y = [], []
for data, target in training_set:
X.append(data.numpy())
Y.append(target.numpy())
X = np.vstack(X)
Y = np.vstack(Y)
n_samples, _, n_features = X.shape
X = X.reshape((n_samples, n_features))
Y = Y.reshape((n_samples, n_features))
self.model.fit(X, Y)
#TODO: print out that coefficients (or at least num of) that the L1 normalization leaves
coef = self.model.coef_
all_zeros = np.isin([0,-0], coef)
if self.verbose_training:
print(f'All zeros? {all_zeros}')
print(f'Coefs? {coef.shape}')
if np.isin(False, all_zeros):
print(self.model.coef_)
print(type(self.model.coef_))
print(self.model.coef_.shape)
def get_fullname(self):
"""Get the full-grammar name for this model
:returns: English phrase as string
"""
return f"LASSO_alpha-{self.alpha}"
def get_filename(self):
"""Get the abbreviated (file)name for this model
:returns: Abbreviated string with underscores
"""
return f"LASSO_alpha-{self.alpha}"
def needs_retraining(self):
"""Does this model need regular retraining while forecasting?
:returns: bool
"""
return True
def set_plotting_color(self, color="#FCB97D"):
"""Set color used for plotting
:color: Hex value string
"""
self.color = color
def get_plotting_color(self):
"""return color for graphing distinction
:returns: str of color
"""
return self.color
|
[
"numpy.vstack",
"sklearn.linear_model.Lasso",
"numpy.isin"
] |
[((658, 719), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'alpha', 'fit_intercept': '(True)', 'warm_start': 'warm_start'}), '(alpha=alpha, fit_intercept=True, warm_start=warm_start)\n', (663, 719), False, 'from sklearn.linear_model import Lasso\n'), ((1555, 1567), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (1564, 1567), True, 'import numpy as np\n'), ((1580, 1592), 'numpy.vstack', 'np.vstack', (['Y'], {}), '(Y)\n', (1589, 1592), True, 'import numpy as np\n'), ((1909, 1931), 'numpy.isin', 'np.isin', (['[0, -0]', 'coef'], {}), '([0, -0], coef)\n', (1916, 1931), True, 'import numpy as np\n'), ((2067, 2092), 'numpy.isin', 'np.isin', (['(False)', 'all_zeros'], {}), '(False, all_zeros)\n', (2074, 2092), True, 'import numpy as np\n')]
|
# importing libraries
import numpy as np
import pandas as pd
import random
import torch
def set_seeds(seed=1234):
"""[Set seeds for reproducibility.]
Keyword Arguments:
seed {int} -- [The seed value] (default: {1234})
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("[INFO] THE SEED IS ", seed)
def set_device(cuda=True):
"""[To set the type of machine CPU or GPU]
Keyword Arguments:
cuda {bool} -- [To use GPU or not] (default: {True})
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("[INFO] THE DEVICE IS ", device)
return device
paths = {
"train_path1": "data/atis/train/seq.in",
"train_path2": "data/atis/train/seq.out",
"train_path3": "data/atis/train/label",
"valid_path1": "data/atis/dev/seq.in",
"valid_path2": "data/atis/dev/seq.out",
"valid_path3": "data/atis/dev/label",
"test_path1": "data/atis/test/seq.in",
"test_path2": "data/atis/test/seq.out",
"test_path3":"data/atis/test/label"
}
|
[
"torch.manual_seed",
"random.seed",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.cuda.manual_seed"
] |
[((247, 267), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (261, 267), True, 'import numpy as np\n'), ((272, 289), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (283, 289), False, 'import random\n'), ((294, 317), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (311, 317), False, 'import torch\n'), ((322, 350), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (344, 350), False, 'import torch\n'), ((594, 619), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (617, 619), False, 'import torch\n')]
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from ... import tensor as mt
from ..wrappers import ParallelPostFit
def test_parallel_post_fit_basic(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
clf = ParallelPostFit(GradientBoostingClassifier())
clf.fit(X, y)
assert isinstance(clf.predict(X), mt.Tensor)
assert isinstance(clf.predict_proba(X), mt.Tensor)
result = clf.score(X, y)
expected = clf.estimator.score(X, y)
assert result.fetch() == expected
clf = ParallelPostFit(LinearRegression())
clf.fit(X, y)
with pytest.raises(
AttributeError, match="The wrapped estimator (.|\n)* 'predict_proba' method."
):
clf.predict_proba(X)
def test_parallel_post_fit_predict(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs")
wrap = ParallelPostFit(LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs"))
base.fit(X, y)
wrap.fit(X, y)
result = wrap.predict(X)
expected = base.predict(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_proba(X)
expected = base.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_log_proba(X)
expected = base.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
def test_parallel_post_fit_transform(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = PCA(random_state=0)
wrap = ParallelPostFit(PCA(random_state=0))
base.fit(raw_x, raw_y)
wrap.fit(X, y)
result = base.transform(X)
expected = wrap.transform(X)
np.testing.assert_allclose(result, expected, atol=0.1)
def test_parallel_post_fit_multiclass(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
raw_x, raw_y = make_classification(n_classes=3, n_informative=4)
X, y = mt.tensor(raw_x, chunk_size=50), mt.tensor(raw_y, chunk_size=50)
clf = ParallelPostFit(
LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs", multi_class="auto")
)
clf.fit(X, y)
result = clf.predict(X)
expected = clf.estimator.predict(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_proba(X)
expected = clf.estimator.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_log_proba(X)
expected = clf.estimator.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
|
[
"sklearn.decomposition.PCA",
"numpy.testing.assert_allclose",
"sklearn.linear_model.LogisticRegression",
"pytest.raises",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.make_classification"
] |
[((974, 1009), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (993, 1009), False, 'from sklearn.datasets import make_classification\n'), ((1651, 1686), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (1670, 1686), False, 'from sklearn.datasets import make_classification\n'), ((1776, 1836), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""'}), "(random_state=0, n_jobs=1, solver='lbfgs')\n", (1794, 1836), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2030, 2074), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2056, 2074), True, 'import numpy as np\n'), ((2152, 2196), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2178, 2196), True, 'import numpy as np\n'), ((2282, 2326), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2308, 2326), True, 'import numpy as np\n'), ((2393, 2428), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (2412, 2428), False, 'from sklearn.datasets import make_classification\n'), ((2518, 2537), 'sklearn.decomposition.PCA', 'PCA', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2521, 2537), False, 'from sklearn.decomposition import PCA\n'), ((2702, 2756), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {'atol': '(0.1)'}), '(result, expected, atol=0.1)\n', (2728, 2756), True, 'import numpy as np\n'), ((2824, 2859), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (2843, 2859), False, 'from sklearn.datasets import make_classification\n'), ((2957, 3006), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_classes': '(3)', 'n_informative': '(4)'}), '(n_classes=3, n_informative=4)\n', (2976, 3006), False, 'from sklearn.datasets import make_classification\n'), ((3298, 3342), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3324, 3342), True, 'import numpy as np\n'), ((3429, 3473), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3455, 3473), True, 'import numpy as np\n'), ((3568, 3612), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3594, 3612), True, 'import numpy as np\n'), ((1114, 1142), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1140, 1142), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1403, 1421), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1419, 1421), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1450, 1550), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""The wrapped estimator (.|\n)* \'predict_proba\' method."""'}), '(AttributeError, match=\n """The wrapped estimator (.|\n)* \'predict_proba\' method.""")\n', (1463, 1550), False, 'import pytest\n'), ((1864, 1924), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""'}), "(random_state=0, n_jobs=1, solver='lbfgs')\n", (1882, 1924), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2565, 2584), 'sklearn.decomposition.PCA', 'PCA', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2568, 2584), False, 'from sklearn.decomposition import PCA\n'), ((3119, 3204), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""', 'multi_class': '"""auto"""'}), "(random_state=0, n_jobs=1, solver='lbfgs', multi_class='auto'\n )\n", (3137, 3204), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n')]
|
from typing import List, Union
import numpy as np
from .Figure import Figure
def create_position_pdf_plot(*, start_time_sec: np.float32, sampling_frequency: np.float32, pdf: np.ndarray, label: str):
# Nt = pdf.shape[0]
# Np = pdf.shape[1]
A = pdf
B = A / np.reshape(np.repeat(np.max(A, axis=1), A.shape[1]), A.shape)
B = (B * 100).astype(np.uint8)
data = {
'type': 'PositionPdfPlot',
'pdf': B,
'samplingFrequency': sampling_frequency,
'startTimeSec': start_time_sec
}
return Figure(
data=data,
label=label
)
def create_live_position_pdf_plot(*, start_time_sec: np.float32, end_time_sec: np.float32, sampling_frequency: np.float32, num_positions: int, pdf_object: dict, segment_size: int, multiscale_factor: int, label: str):
data = {
'type': 'LivePositionPdfPlot',
'pdfObject': pdf_object,
'startTimeSec': start_time_sec,
'endTimeSec': end_time_sec,
'numPositions': num_positions,
'samplingFrequency': sampling_frequency,
'segmentSize': segment_size,
'multiscaleFactor': multiscale_factor
}
return Figure(
data=data,
label=label
)
# def _get_subsample_inds(timestamps: np.array, sampling_frequency: float):
# dt = 1 / sampling_frequency
# ret = []
# last_t = timestamps[0] - dt * 2
# for i in range(len(timestamps)):
# delta = timestamps[i] - last_t
# if delta >= dt * 0.95:
# ret.append(i)
# last_t = timestamps[i]
# return ret
|
[
"numpy.max"
] |
[((294, 311), 'numpy.max', 'np.max', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (300, 311), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Summarise Sound Scattering Layers (SSLs)
@author: <NAME>
"""
## import packages
import matplotlib.pyplot as plt
import gzip
import pickle
import numpy as np
from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv
## import pyechometrics modules
from pyechometrics.metrics import stats, dims, nasc
## get Sv data and mask
def get_obj(filepath):
f = gzip.open(filepath,'rb')
obj = pickle.load(f,encoding = 'bytes')
f.close()
return obj
## noise_level
noise_level = -999
## read Sv
Sv18 = get_obj('./data/PS_Sv18.pklz')
## get SSL mask - see 'ident_SSLs' example in pyechomask
Sv18mask = get_obj('./data/SSL_flag_mask_18.pklz')
## plot
plt.figure(1)
plt.subplot(211)
plot_Sv(Sv18)
plt.subplot(212)
plot_Sv(Sv18,mask = Sv18mask)
plt.title('SSL identification - 18 kHz echosounder data')
plt.show()
## sample interval in meters for this echogram
sample_int = 0.2 ## in meters
## calculate NASC (include all SSLs)
NASC = nasc(Sv18, sample_int, mask = Sv18mask)
## plot NASC by ping
plt.plot(NASC)
plt.xlabel('ping')
plt.ylabel(r'NASC $m^2nmi^{-2}$')
plt.title('NASC values for SSLs')
plt.show()
## save plot
#save_png_plot('./','NASCexampleWiki')
## make binary mask for a single sound scattering layer (SSL) (Sv18mask == 2)
SSLmask = np.zeros(Sv18mask.shape)
SSLmask[Sv18mask == 2] = 1
## get SSL stats and dimensions
SSL_mean, SSL_median, SSL_std, n = stats(Sv18, mask = SSLmask)
mean_row, mean_height, mean_col, mean_length = dims(Sv18, mask = SSLmask)
## change row to depth
mean_depth = mean_row * sample_int
mean_height = mean_height * sample_int
## plot a pseudo SSL using metrics
## *assume single normal distribution
plot_pseudo_SSL(SSL_mean,SSL_std,mean_height,mean_depth)
plt.ylabel('depth (m)')
plt.xlabel('pings')
plt.title('pseudo DSL produced using summary metrics',fontsize = 16)
plt.show()
## save plot
#save_png_plot('./','exampleWiki')
|
[
"matplotlib.pyplot.ylabel",
"gzip.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pyechometrics.metrics.stats",
"pickle.load",
"pyechometrics.metrics.nasc",
"pyechometrics.metrics.dims",
"pyechoplot.plotting.plot_Sv",
"matplotlib.pyplot.figure",
"numpy.zeros",
"pyechoplot.plotting.plot_pseudo_SSL",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((703, 716), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (713, 716), True, 'import matplotlib.pyplot as plt\n'), ((717, 733), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (728, 733), True, 'import matplotlib.pyplot as plt\n'), ((734, 747), 'pyechoplot.plotting.plot_Sv', 'plot_Sv', (['Sv18'], {}), '(Sv18)\n', (741, 747), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((748, 764), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (759, 764), True, 'import matplotlib.pyplot as plt\n'), ((765, 793), 'pyechoplot.plotting.plot_Sv', 'plot_Sv', (['Sv18'], {'mask': 'Sv18mask'}), '(Sv18, mask=Sv18mask)\n', (772, 793), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((795, 852), 'matplotlib.pyplot.title', 'plt.title', (['"""SSL identification - 18 kHz echosounder data"""'], {}), "('SSL identification - 18 kHz echosounder data')\n", (804, 852), True, 'import matplotlib.pyplot as plt\n'), ((853, 863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (861, 863), True, 'import matplotlib.pyplot as plt\n'), ((988, 1025), 'pyechometrics.metrics.nasc', 'nasc', (['Sv18', 'sample_int'], {'mask': 'Sv18mask'}), '(Sv18, sample_int, mask=Sv18mask)\n', (992, 1025), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1050, 1064), 'matplotlib.pyplot.plot', 'plt.plot', (['NASC'], {}), '(NASC)\n', (1058, 1064), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ping"""'], {}), "('ping')\n", (1075, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1116), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NASC $m^2nmi^{-2}$"""'], {}), "('NASC $m^2nmi^{-2}$')\n", (1094, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1151), 'matplotlib.pyplot.title', 'plt.title', (['"""NASC values for SSLs"""'], {}), "('NASC values for SSLs')\n", (1127, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1160, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1347), 'numpy.zeros', 'np.zeros', (['Sv18mask.shape'], {}), '(Sv18mask.shape)\n', (1331, 1347), True, 'import numpy as np\n'), ((1455, 1480), 'pyechometrics.metrics.stats', 'stats', (['Sv18'], {'mask': 'SSLmask'}), '(Sv18, mask=SSLmask)\n', (1460, 1480), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1530, 1554), 'pyechometrics.metrics.dims', 'dims', (['Sv18'], {'mask': 'SSLmask'}), '(Sv18, mask=SSLmask)\n', (1534, 1554), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1730, 1789), 'pyechoplot.plotting.plot_pseudo_SSL', 'plot_pseudo_SSL', (['SSL_mean', 'SSL_std', 'mean_height', 'mean_depth'], {}), '(SSL_mean, SSL_std, mean_height, mean_depth)\n', (1745, 1789), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((1787, 1810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""depth (m)"""'], {}), "('depth (m)')\n", (1797, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1832), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pings"""'], {}), "('pings')\n", (1823, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1900), 'matplotlib.pyplot.title', 'plt.title', (['"""pseudo DSL produced using summary metrics"""'], {'fontsize': '(16)'}), "('pseudo DSL produced using summary metrics', fontsize=16)\n", (1842, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1910, 1912), True, 'import matplotlib.pyplot as plt\n'), ((402, 427), 'gzip.open', 'gzip.open', (['filepath', '"""rb"""'], {}), "(filepath, 'rb')\n", (411, 427), False, 'import gzip\n'), ((437, 469), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (448, 469), False, 'import pickle\n')]
|
import os
from tqdm import tqdm
from joblib import Parallel, delayed
try:
import seaborn as sns
except:
pass
import numpy as np
import cv2
from lost_ds.util import get_fs
from lost_ds.geometry.lost_geom import LOSTGeometries
from lost_ds.functional.api import remove_empty
def get_fontscale(fontscale, thickness, img_h, text_max_h_frac=0.04):
if isinstance(fontscale, (int, float)):
return fontscale
elif fontscale=='auto':
text_h = int(text_max_h_frac * img_h)
fontscale = cv2.getFontScaleFromHeight(cv2.FONT_HERSHEY_SIMPLEX,
max(text_h, 10),
thickness)
return fontscale
def get_thickness(line_thickness, img_h, thickness_max_h_frac=0.002):
if line_thickness == 'auto':
return int(thickness_max_h_frac * img_h)
else:
return line_thickness
def vis_sample(img, df, line_thickness=3, color=(0, 0, 255),
lbl_col='anno_lbl', lost_geometries:LOSTGeometries=None,
blow_up=None, radius=2, fontscale=2):
'''Visualize annos of an image
Args:
img (np.ndarray): image to draw on
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize. If df is None a random image from df will be
sampled.
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lost_geometries (LOSTGeometries): LOSTGeometries instance to use, will
create a new one if None
blow_up (): TODO: implement
Returns:
np.array: Image painted with annotations.
'''
df = remove_empty(df, 'anno_data')
if len(df) > 0:
geom = lost_geometries
if lost_geometries is None:
geom = LOSTGeometries()
anno_data = list(df['anno_data'])
anno_conf = None
if hasattr(df, 'anno_confidence'):
anno_conf = list(df['anno_confidence'])
anno_lbl = list(df[lbl_col])
anno_dtype = list(df['anno_dtype'])
anno_style = list(df['anno_style'])
anno_format = list(df['anno_format'])
thickness = get_thickness(line_thickness, img.shape[0])
fontscale = get_fontscale(fontscale, thickness, img.shape[0])
thickness = max(1, thickness)
img = geom.draw(img, anno_data, anno_conf, anno_lbl, anno_dtype,
anno_style, anno_format, thickness, fontscale, color,
radius)
return img
def vis_and_store(df, out_dir, lbl_col='anno_lbl', color=(0, 0, 255),
line_thickness=2, fontscale=2, filesystem=None,
radius=2):
'''Visualize annotations and store them to a folder
Args:
df (pd.DataFrame): Optional dataset in lost format to visualize
out_dir (str): Directory to store the visualized annotations
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lbl_col (str): column containing the labels
radius (int): radius to draw for points/circles
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
'''
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
def vis_img(img_path, df_vis):
geom = LOSTGeometries()
out_path = os.path.join(out_dir, os.path.basename(img_path))
if df_vis['anno_data'].notnull().any():
img = fs.read_img(img_path)
img = vis_sample(img=img, df=df_vis, line_thickness=line_thickness,
color=color, lbl_col=lbl_col, lost_geometries=geom,
radius=radius, fontscale=fontscale)
fs.write_img(img, out_path)
else:
fs.copy(img_path, out_path)
Parallel(n_jobs=-1)(delayed(vis_img)(path, df_vis)
for path, df_vis in tqdm(df.groupby('img_path'),
desc='visualize'))
# for path, df_vis in tqdm(df.groupby('img_path'), desc='visualize'):
# vis_img(path, df_vis)
def vis_semantic_segmentation(df, out_dir, n_classes, palette='dark',
seg_path_col='seg_path', filesystem=None):
"""Visualize the stored semantic segmentations by coloring it
Args:
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize.
out_dir (str): path to store images
n_classes (int): number of classes occuring in pixelmaps, number of
different colors needed for visualization
palette (str): seaborn color palette i.e. 'dark', 'bright', 'pastel',...
refer https://seaborn.pydata.org/tutorial/color_palettes.html
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
"""
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
palette = sns.color_palette(palette, n_classes)
palette = [(np.array(x)*255).astype(np.uint8) for x in palette]
segmentations = df[seg_path_col].unique()
def vis_seg(seg_path):
seg = fs.read_img(seg_path)
vis = np.zeros(seg.shape[:2] + (3,))
for i in range(n_classes):
vis = np.where(seg==i, palette[i], vis)
fs.write_img(vis, os.path.join(out_dir, seg_path.split('/')[-1]))
Parallel(n_jobs=-1)(delayed(vis_seg)(seg_path)
for seg_path in tqdm(segmentations, desc='vis sem. seg.'))
|
[
"seaborn.color_palette",
"numpy.where",
"tqdm.tqdm",
"lost_ds.util.get_fs",
"joblib.Parallel",
"numpy.zeros",
"numpy.array",
"lost_ds.geometry.lost_geom.LOSTGeometries",
"os.path.basename",
"joblib.delayed",
"lost_ds.functional.api.remove_empty"
] |
[((1871, 1900), 'lost_ds.functional.api.remove_empty', 'remove_empty', (['df', '"""anno_data"""'], {}), "(df, 'anno_data')\n", (1883, 1900), False, 'from lost_ds.functional.api import remove_empty\n'), ((3667, 3685), 'lost_ds.util.get_fs', 'get_fs', (['filesystem'], {}), '(filesystem)\n', (3673, 3685), False, 'from lost_ds.util import get_fs\n'), ((5382, 5400), 'lost_ds.util.get_fs', 'get_fs', (['filesystem'], {}), '(filesystem)\n', (5388, 5400), False, 'from lost_ds.util import get_fs\n'), ((5460, 5497), 'seaborn.color_palette', 'sns.color_palette', (['palette', 'n_classes'], {}), '(palette, n_classes)\n', (5477, 5497), True, 'import seaborn as sns\n'), ((3781, 3797), 'lost_ds.geometry.lost_geom.LOSTGeometries', 'LOSTGeometries', ([], {}), '()\n', (3795, 3797), False, 'from lost_ds.geometry.lost_geom import LOSTGeometries\n'), ((4294, 4313), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (4302, 4313), False, 'from joblib import Parallel, delayed\n'), ((5691, 5721), 'numpy.zeros', 'np.zeros', (['(seg.shape[:2] + (3,))'], {}), '(seg.shape[:2] + (3,))\n', (5699, 5721), True, 'import numpy as np\n'), ((5896, 5915), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (5904, 5915), False, 'from joblib import Parallel, delayed\n'), ((2007, 2023), 'lost_ds.geometry.lost_geom.LOSTGeometries', 'LOSTGeometries', ([], {}), '()\n', (2021, 2023), False, 'from lost_ds.geometry.lost_geom import LOSTGeometries\n'), ((3839, 3865), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3855, 3865), False, 'import os\n'), ((5775, 5810), 'numpy.where', 'np.where', (['(seg == i)', 'palette[i]', 'vis'], {}), '(seg == i, palette[i], vis)\n', (5783, 5810), True, 'import numpy as np\n'), ((4314, 4330), 'joblib.delayed', 'delayed', (['vis_img'], {}), '(vis_img)\n', (4321, 4330), False, 'from joblib import Parallel, delayed\n'), ((5916, 5932), 'joblib.delayed', 'delayed', (['vis_seg'], {}), '(vis_seg)\n', (5923, 5932), False, 'from joblib import Parallel, delayed\n'), ((5972, 6013), 'tqdm.tqdm', 'tqdm', (['segmentations'], {'desc': '"""vis sem. seg."""'}), "(segmentations, desc='vis sem. seg.')\n", (5976, 6013), False, 'from tqdm import tqdm\n'), ((5514, 5525), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5522, 5525), True, 'import numpy as np\n')]
|
import glob
import numpy as np
import os
import pandas as pd
import yaml
from dask_image.imread import imread
from dlclabel import misc
from itertools import groupby
from napari.layers import Shapes
from napari.plugins._builtins import napari_write_shapes
from napari.types import LayerData
from skimage.io import imsave
from skimage.util import img_as_ubyte
from typing import Any, Dict, List, Optional, Sequence, Union
SUPPORTED_IMAGES = "jpg", "jpeg", "png"
def handle_path(path: Union[str, Sequence[str]]) -> Union[str, Sequence[str]]:
"""Dispatch files in folder to the relevant plugin readers."""
paths = [path] if isinstance(path, str) else path
paths = [os.fspath(path) for path in paths]
if not isinstance(paths, (tuple, list)):
raise ValueError("'path' argument must be a string, list, or tuple")
# Test first whether a 'labeled-data' folder was passed in
if len(paths) == 1:
path = paths[0]
if os.path.isdir(path):
files = os.listdir(path)
images = ""
for file in files:
if any(file.endswith(ext) for ext in SUPPORTED_IMAGES):
images = os.path.join(path, f"*{os.path.splitext(file)[1]}")
break
if not images:
raise IOError("No supported images were found.")
datafile = ""
for file in files:
if file.endswith(".h5"):
datafile = os.path.join(path, "*.h5")
break
if datafile:
return [images, datafile]
return [images]
return paths
def _populate_metadata(
header: misc.DLCHeader,
*,
labels: Optional[Sequence[str]] = None,
ids: Optional[Sequence[str]] = None,
likelihood: Optional[Sequence[float]] = None,
paths: Optional[List[str]] = None,
size: Optional[int] = 8,
pcutoff: Optional[float] = 0.6,
colormap: Optional[str] = "viridis",
) -> Dict:
if labels is None:
labels = header.bodyparts
if ids is None:
ids = header.individuals
if likelihood is None:
likelihood = np.ones(len(labels))
label_colors = misc.build_color_cycle(len(header.bodyparts), colormap)
id_colors = misc.build_color_cycle(len(header.individuals), colormap)
face_color_cycle_maps = {
"label": dict(zip(header.bodyparts, label_colors)),
"id": dict(zip(header.individuals, id_colors)),
}
return {
"name": "keypoints",
"text": "label",
"properties": {
"label": list(labels),
"id": list(ids),
"likelihood": likelihood,
"valid": likelihood > pcutoff,
},
"face_color_cycle": label_colors,
"edge_color": "valid",
"edge_color_cycle": ["black", "red"],
"size": size,
"metadata": {
"header": header,
"face_color_cycle_maps": face_color_cycle_maps,
"paths": paths or [],
},
}
def _load_config(config_path: str):
with open(config_path) as file:
return yaml.safe_load(file)
def read_config(configname: str) -> List[LayerData]:
config = _load_config(configname)
header = misc.DLCHeader.from_config(config)
metadata = _populate_metadata(
header,
size=config["dotsize"],
pcutoff=config["pcutoff"],
colormap=config["colormap"],
)
metadata["name"] = f"CollectedData_{config['scorer']}"
return [(None, metadata, "points")]
def read_images(path: Union[str, List[str]]) -> List[LayerData]:
if isinstance(path, list):
root, ext = os.path.splitext(path[0])
path = os.path.join(os.path.dirname(root), f"*{ext}")
# Retrieve filepaths exactly as parsed by pims
filepaths = []
for filepath in sorted(glob.glob(path)):
_, *relpath = filepath.rsplit(os.sep, 3)
filepaths.append(os.path.join(*relpath))
params = {
"name": "images",
"metadata": {
"paths": filepaths,
"root": os.path.split(path)[0]
}
}
return [(imread(path), params, "image")]
def read_hdf(filename: str) -> List[LayerData]:
layers = []
for filename in glob.glob(filename):
temp = pd.read_hdf(filename)
header = misc.DLCHeader(temp.columns)
temp = temp.droplevel("scorer", axis=1)
if "individuals" not in temp.columns.names:
# Append a fake level to the MultiIndex
# to make it look like a multi-animal DataFrame
old_idx = temp.columns.to_frame()
old_idx.insert(0, "individuals", "")
temp.columns = pd.MultiIndex.from_frame(old_idx)
df = temp.stack(["individuals", "bodyparts"]).reset_index()
nrows = df.shape[0]
data = np.empty((nrows, 3))
image_paths = df["level_0"]
if np.issubdtype(image_paths.dtype, np.number):
image_inds = image_paths.values
paths2inds = []
else:
image_inds, paths2inds = misc.encode_categories(image_paths, return_map=True)
data[:, 0] = image_inds
data[:, 1:] = df[["y", "x"]].to_numpy()
metadata = _populate_metadata(
header,
labels=df["bodyparts"],
ids=df["individuals"],
likelihood=df.get("likelihood"),
paths=list(paths2inds),
)
metadata["name"] = os.path.split(filename)[1].split(".")[0]
metadata["metadata"]["root"] = os.path.split(filename)[0]
layers.append((data, metadata, "points"))
return layers
def write_hdf(filename: str, data: Any, metadata: Dict) -> Optional[str]:
temp = pd.DataFrame(data[:, -1:0:-1], columns=["x", "y"])
properties = metadata["properties"]
meta = metadata["metadata"]
temp["bodyparts"] = properties["label"]
temp["individuals"] = properties["id"]
temp["inds"] = data[:, 0].astype(int)
temp["likelihood"] = properties["likelihood"]
temp["scorer"] = meta["header"].scorer
df = temp.set_index(["scorer", "individuals", "bodyparts", "inds"]).stack()
df.index = df.index.set_names("coords", -1)
df = df.unstack(["scorer", "individuals", "bodyparts", "coords"])
df.index.name = None
if not properties["id"][0]:
df = df.droplevel("individuals", axis=1)
df = df.reindex(meta["header"].columns, axis=1)
if meta["paths"]:
df.index = [meta["paths"][i] for i in df.index]
name = metadata["name"]
root = meta["root"]
if "machine" in name: # We are attempting to save refined model predictions
df.drop("likelihood", axis=1, level="coords", inplace=True)
header = misc.DLCHeader(df.columns)
gt_file = ""
for file in os.listdir(root):
if file.startswith("CollectedData") and file.endswith("h5"):
gt_file = file
break
if gt_file: # Refined predictions must be merged into the existing data
df_gt = pd.read_hdf(os.path.join(root, gt_file))
new_scorer = df_gt.columns.get_level_values("scorer")[0]
header.scorer = new_scorer
df.columns = header.columns
df = pd.concat((df, df_gt))
df = df[~df.index.duplicated(keep="first")]
name = os.path.splitext(gt_file)[0]
else:
# Let us fetch the config.yaml file to get the scorer name...
project_folder = root.rsplit(os.sep, 2)[0]
config = _load_config(os.path.join(project_folder, "config.yaml"))
new_scorer = config["scorer"]
header.scorer = new_scorer
df.columns = header.columns
name = f"CollectedData_{new_scorer}"
df.sort_index(inplace=True)
filename = name + ".h5"
df.to_hdf(os.path.join(root, filename), key="df_with_missing")
return filename
def write_masks(foldername: str, data: Any, metadata: Dict) -> Optional[str]:
folder, _ = os.path.splitext(foldername)
os.makedirs(folder, exist_ok=True)
filename = os.path.join(folder, "{}_obj_{}.png")
shapes = Shapes(data, shape_type="polygon")
meta = metadata["metadata"]
frame_inds = [int(array[0, 0]) for array in data]
shape_inds = []
for _, group in groupby(frame_inds):
shape_inds += range(sum(1 for _ in group))
masks = shapes.to_masks(mask_shape=meta["shape"][1:])
for n, mask in enumerate(masks):
image_name = os.path.basename(meta["paths"][frame_inds[n]])
output_path = filename.format(os.path.splitext(image_name)[0], shape_inds[n])
imsave(output_path, img_as_ubyte(mask).squeeze(), check_contrast=False)
napari_write_shapes(os.path.join(folder, "vertices.csv"), data, metadata)
return folder
|
[
"pandas.MultiIndex.from_frame",
"dlclabel.misc.DLCHeader",
"os.fspath",
"os.listdir",
"skimage.util.img_as_ubyte",
"os.path.split",
"numpy.issubdtype",
"napari.layers.Shapes",
"os.path.isdir",
"numpy.empty",
"pandas.DataFrame",
"pandas.read_hdf",
"glob.glob",
"os.path.splitext",
"os.path.dirname",
"dlclabel.misc.DLCHeader.from_config",
"dask_image.imread.imread",
"itertools.groupby",
"os.makedirs",
"os.path.join",
"yaml.safe_load",
"os.path.basename",
"dlclabel.misc.encode_categories",
"pandas.concat"
] |
[((3236, 3270), 'dlclabel.misc.DLCHeader.from_config', 'misc.DLCHeader.from_config', (['config'], {}), '(config)\n', (3262, 3270), False, 'from dlclabel import misc\n'), ((4235, 4254), 'glob.glob', 'glob.glob', (['filename'], {}), '(filename)\n', (4244, 4254), False, 'import glob\n'), ((5697, 5747), 'pandas.DataFrame', 'pd.DataFrame', (['data[:, -1:0:-1]'], {'columns': "['x', 'y']"}), "(data[:, -1:0:-1], columns=['x', 'y'])\n", (5709, 5747), True, 'import pandas as pd\n'), ((7975, 8003), 'os.path.splitext', 'os.path.splitext', (['foldername'], {}), '(foldername)\n', (7991, 8003), False, 'import os\n'), ((8008, 8042), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (8019, 8042), False, 'import os\n'), ((8058, 8095), 'os.path.join', 'os.path.join', (['folder', '"""{}_obj_{}.png"""'], {}), "(folder, '{}_obj_{}.png')\n", (8070, 8095), False, 'import os\n'), ((8109, 8143), 'napari.layers.Shapes', 'Shapes', (['data'], {'shape_type': '"""polygon"""'}), "(data, shape_type='polygon')\n", (8115, 8143), False, 'from napari.layers import Shapes\n'), ((8270, 8289), 'itertools.groupby', 'groupby', (['frame_inds'], {}), '(frame_inds)\n', (8277, 8289), False, 'from itertools import groupby\n'), ((678, 693), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (687, 693), False, 'import os\n'), ((958, 977), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (971, 977), False, 'import os\n'), ((3109, 3129), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (3123, 3129), False, 'import yaml\n'), ((3649, 3674), 'os.path.splitext', 'os.path.splitext', (['path[0]'], {}), '(path[0])\n', (3665, 3674), False, 'import os\n'), ((3834, 3849), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3843, 3849), False, 'import glob\n'), ((4271, 4292), 'pandas.read_hdf', 'pd.read_hdf', (['filename'], {}), '(filename)\n', (4282, 4292), True, 'import pandas as pd\n'), ((4310, 4338), 'dlclabel.misc.DLCHeader', 'misc.DLCHeader', (['temp.columns'], {}), '(temp.columns)\n', (4324, 4338), False, 'from dlclabel import misc\n'), ((4818, 4838), 'numpy.empty', 'np.empty', (['(nrows, 3)'], {}), '((nrows, 3))\n', (4826, 4838), True, 'import numpy as np\n'), ((4886, 4929), 'numpy.issubdtype', 'np.issubdtype', (['image_paths.dtype', 'np.number'], {}), '(image_paths.dtype, np.number)\n', (4899, 4929), True, 'import numpy as np\n'), ((6694, 6720), 'dlclabel.misc.DLCHeader', 'misc.DLCHeader', (['df.columns'], {}), '(df.columns)\n', (6708, 6720), False, 'from dlclabel import misc\n'), ((6762, 6778), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (6772, 6778), False, 'import os\n'), ((7806, 7834), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (7818, 7834), False, 'import os\n'), ((8458, 8504), 'os.path.basename', 'os.path.basename', (["meta['paths'][frame_inds[n]]"], {}), "(meta['paths'][frame_inds[n]])\n", (8474, 8504), False, 'import os\n'), ((8695, 8731), 'os.path.join', 'os.path.join', (['folder', '"""vertices.csv"""'], {}), "(folder, 'vertices.csv')\n", (8707, 8731), False, 'import os\n'), ((999, 1015), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1009, 1015), False, 'import os\n'), ((3703, 3724), 'os.path.dirname', 'os.path.dirname', (['root'], {}), '(root)\n', (3718, 3724), False, 'import os\n'), ((3926, 3948), 'os.path.join', 'os.path.join', (['*relpath'], {}), '(*relpath)\n', (3938, 3948), False, 'import os\n'), ((4117, 4129), 'dask_image.imread.imread', 'imread', (['path'], {}), '(path)\n', (4123, 4129), False, 'from dask_image.imread import imread\n'), ((4673, 4706), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['old_idx'], {}), '(old_idx)\n', (4697, 4706), True, 'import pandas as pd\n'), ((5054, 5106), 'dlclabel.misc.encode_categories', 'misc.encode_categories', (['image_paths'], {'return_map': '(True)'}), '(image_paths, return_map=True)\n', (5076, 5106), False, 'from dlclabel import misc\n'), ((5515, 5538), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5528, 5538), False, 'import os\n'), ((7213, 7235), 'pandas.concat', 'pd.concat', (['(df, df_gt)'], {}), '((df, df_gt))\n', (7222, 7235), True, 'import pandas as pd\n'), ((4065, 4084), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4078, 4084), False, 'import os\n'), ((7019, 7046), 'os.path.join', 'os.path.join', (['root', 'gt_file'], {}), '(root, gt_file)\n', (7031, 7046), False, 'import os\n'), ((7311, 7336), 'os.path.splitext', 'os.path.splitext', (['gt_file'], {}), '(gt_file)\n', (7327, 7336), False, 'import os\n'), ((7517, 7560), 'os.path.join', 'os.path.join', (['project_folder', '"""config.yaml"""'], {}), "(project_folder, 'config.yaml')\n", (7529, 7560), False, 'import os\n'), ((8543, 8571), 'os.path.splitext', 'os.path.splitext', (['image_name'], {}), '(image_name)\n', (8559, 8571), False, 'import os\n'), ((1472, 1498), 'os.path.join', 'os.path.join', (['path', '"""*.h5"""'], {}), "(path, '*.h5')\n", (1484, 1498), False, 'import os\n'), ((8619, 8637), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['mask'], {}), '(mask)\n', (8631, 8637), False, 'from skimage.util import img_as_ubyte\n'), ((5435, 5458), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5448, 5458), False, 'import os\n'), ((1195, 1217), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1211, 1217), False, 'import os\n')]
|
#!/usr/bin/python3
import json
import pprint
import sys
import os
import numpy as np
import traceback
import random
import argparse
import json
import tensorflow
import keras
from keras import optimizers
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import img_to_array, load_img
from keras.callbacks import ModelCheckpoint, History
from PIL import Image
# start with PYTHONHASHSEED=89
np.random.seed(44)
random.seed(22)
tensorflow.set_random_seed(11)
# session_conf = tensorflow.ConfigProto(intra_op_parallelism_threads=1,
# inter_op_parallelism_threads=1)
# tf_sess = tensorflow.Session(graph=tensorflow.get_default_graph(), config=session_conf)
# keras.backend.set_session(tf_sess)
pp = pprint.PrettyPrinter()
modes = ['train', 'predict', 'validate']
aparser = argparse.ArgumentParser()
aparser.add_argument('-tSet', help='Choose source training set (from augmentation)')
aparser.add_argument('mode', help=str(modes))
aparser.add_argument('name', help='Name of this particular run')
aparser.add_argument('-augRunName', help='Name of the source augmentation')
aparser.add_argument('-ls', help='List all current models', action='store_true')
aparser.add_argument('-useTdata', help='Use training data for prediction/validation instead of validation data', action='store_true')
aparser.add_argument('-pFile', help='prediction mode: name of the image file to predict')
aparser.add_argument('-pathCap', help='Specify path to capture-output', nargs=1)
aparser.add_argument('-pathModel', help='Specify path to models', nargs=1)
aparser.add_argument('-pathAug', help='Specify path to augmentation-output', nargs=1)
aparser.add_argument('-save', help='Save config into cfg.json', action='store_true')
args = aparser.parse_args()
if os.path.exists('cfg.json'):
with open('cfg.json', 'r') as cfgfile:
cfg = json.load(cfgfile)
else:
cfg = {}
if args.pathCap or 'capturepath' not in cfg:
cfg['capturepath'] = args.pathCap
if args.pathModel or 'modelpath' not in cfg:
cfg['modelpath'] = args.pathModel
if args.pathAug or 'augpath' not in cfg:
cfg['augpath'] = args.pathAug
if args.tSet or 'tSet' not in cfg:
cfg['tSet'] = args.tSet
if args.name or 'nameOfRun' not in cfg:
cfg['nameOfRun'] = args.augRunName
if args.save:
with open('cfg.json', 'w') as cfgfile:
cfgfile.write(json.dumps(cfg, sort_keys=True, indent=2))
trainingSet = cfg['tSet']
mode = args.mode
nameofrun = args.name
predfile = args.pFile
srcT = args.useTdata
assert mode in modes
# paths
modelpath = cfg['modelpath']
if args.ls:
print('available runs: ' + str(os.listdir(os.path.join(modelpath, trainingSet))))
sys.exit()
outpath = os.path.join(modelpath, trainingSet, nameofrun)
modelPathBare = os.path.join(outpath, nameofrun)
cpmodelPathBare = os.path.join(outpath, 'chkp')
modelPath = modelPathBare + '.h5'
if not os.path.isdir(outpath):
os.makedirs(outpath)
if not os.path.isdir(cpmodelPathBare):
os.makedirs(cpmodelPathBare)
if len(os.listdir(cpmodelPathBare)):
cpmodelPath = os.path.join(cpmodelPathBare, sorted(os.listdir(cpmodelPathBare))[-1])
assert cpmodelPath.endswith('.h5')
else:
cpmodelPath = None
if not os.path.isfile(modelPath) and not cpmodelPath:
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=(720, 1280, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 3)))
model.add(Conv2D(32, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
# the model so far outputs 3D feature maps (height, width, features)
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64, kernel_initializer='random_uniform'))
model.add(LeakyReLU(alpha=.3))
#model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('sigmoid'))
adaD = optimizers.Adadelta()
model.compile(loss='mse', optimizer=adaD)
startEpoch = 0
else:
# load model
if os.path.isfile(modelPath):
model = load_model(modelPath)
# load training cfg
if os.path.isfile(modelPathBare + '.json'):
with open(modelPathBare + '.json', 'r') as jsonfile:
modelcfg = json.load(jsonfile)
startEpoch = modelcfg['epochsTrained']
else:
startEpoch = 0
else:
model = load_model(cpmodelPath)
startEpoch = int(os.path.basename(cpmodelPath).split('.')[0])
scaleX = 1920 * 2
scaleY = 1080
with open(os.path.join(cfg['capturepath'], trainingSet + '.json')) as jsonfile:
trainingdata = json.load(jsonfile)
dset = {}
for d in trainingdata:
dset[d['f'].split('.')[0]] = (float(d['x']) / scaleX,
float(d['y']) / scaleY)
tset = {}
tfiles = []
vset = {}
vfiles = []
trainDir = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-train', 'images')
valDir = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-validate', 'images')
for f in os.listdir(trainDir):
tset[f] = dset[f.split('.')[0].split('_')[0]]
tfiles.append(f)
for f in os.listdir(valDir):
vset[f] = dset[f.split('.')[0]]
vfiles.append(f)
batch_size = min(16, len(tfiles) // 16)
print('{} training samples, {} validation samples'.format(len(tfiles), len(vfiles)))
print(' -> Batch size chosen: {}'.format(batch_size))
class DataGen(keras.utils.Sequence):
def __init__(self, filenames, path, labels, batchSize, dim, nChannels, shuffle=True):
self.dim = dim
self.batchSize = batchSize
self.labels = labels
self.filenames = filenames
self.path = path
self.nChannels = nChannels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.filenames) / self.batchSize))
def __getitem__(self, index):
indexes = self.indexes[index * self.batchSize : (index + 1) * self.batchSize]
fNamesTmp = [self.filenames[k] for k in indexes]
X, y = self.__data_generation(fNamesTmp)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.filenames))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, fNamesTmp):
X = np.empty((self.batchSize, *self.dim, self.nChannels))
Y = np.empty((self.batchSize, 2))
for idx, fname in enumerate(fNamesTmp):
img = load_img(os.path.join(self.path, fname))
x = img_to_array(img)
x.reshape((720, 1280, 3))
x *= 1.0/256.0
X[idx,] = x
Y[idx,] = np.asarray(self.labels[fname])
return X, Y
if mode == 'train':
training_generator = DataGen(tfiles, trainDir, tset, batch_size, (720, 1280), 3, shuffle=True)
validation_generator = DataGen(vfiles, valDir, vset, batch_size, (720, 1280), 3, shuffle=False)
checkpointer = ModelCheckpoint(filepath=os.path.join(cpmodelPathBare, '{epoch:03d}.h5'), verbose=1, save_best_only=True)
hist = History()
try:
model.fit_generator(training_generator,
steps_per_epoch=len(tfiles) // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=len(vfiles) // batch_size,
max_queue_size=4,
workers=4,
initial_epoch=startEpoch,
callbacks=[checkpointer, hist])
except:
print()
traceback.print_exc()
finally:
print('hist: loss - validation loss')
if 'loss' in hist.history:
epochsTrained = len(hist.history['loss'])
for l, vl in zip(hist.history['loss'], hist.history['val_loss']):
print('{:.5f} - {:.5f}'.format(l, vl))
else:
print('N/A')
epochsTrained = 0
# always save your weights after training or during training
model.save(modelPath)
print('Saved model as "{}"'.format(modelPath))
with open(modelPathBare + '.json', 'w') as jsonfile:
jsonfile.write(json.dumps({'epochsTrained': epochsTrained + startEpoch}, sort_keys = True, indent = 2))
elif mode == 'predict':
# print(model.summary())
# pp.pprint(model.get_weights())
X = np.empty((1, 720, 1280, 3))
img = load_img(os.path.join(trainDir if srcT else valDir, sys.argv[4]))
x = img_to_array(img)
x.reshape((720, 1280, 3))
x = x / 256.0
X[0,] = x
output = model.predict(X, None, verbose=1)[0]
print('output: ({:.5f}, {:.5f}) - unscaled: ({:5.2f}, {:5.2f})'.format(output[0], output[1], output[0] * scaleX, output[1] * scaleY))
exp = np.asarray(tset[predfile] if srcT else vset[predfile])
print('expected: ({:.5f}, {:.5f}) - unscaled: ({:5.2f}, {:5.2f})'.format(exp[0], exp[1], exp[0] * scaleX, exp[1] * scaleY))
elif mode == 'validate':
if srcT:
files = tfiles
validation_generator = DataGen(files, trainDir, tset, batch_size, (720, 1280), 3, shuffle=False)
else:
files = vfiles
validation_generator = DataGen(files, valDir, vset, batch_size, (720, 1280), 3, shuffle=False)
predictions = model.predict_generator(validation_generator, verbose=1)
MSE = 0
for f, pred in zip(files, predictions):
exp = np.asarray(tset[f] if srcT else vset[f])
mse = ((exp[0] - pred[0])**2 + (exp[1] - pred[1])**2) / 2
print('{}: ({:.3f}, {:.3f}) -> ({:.3f}, {:.3f}) [mse: {:.3f}]'.format(f, exp[0], exp[1], pred[0], pred[1], mse))
MSE += mse
print('/MSE: {:.3f}'.format(MSE / len(files)))
|
[
"keras.preprocessing.image.img_to_array",
"keras.layers.Conv2D",
"keras.callbacks.History",
"keras.layers.Activation",
"sys.exit",
"keras.layers.Dense",
"tensorflow.set_random_seed",
"keras.optimizers.Adadelta",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"json.dumps",
"numpy.asarray",
"os.path.isdir",
"numpy.empty",
"pprint.PrettyPrinter",
"numpy.random.seed",
"keras.layers.advanced_activations.LeakyReLU",
"traceback.print_exc",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"os.path.isfile",
"keras.models.load_model",
"os.makedirs",
"os.path.join",
"random.seed",
"os.path.basename",
"json.load",
"numpy.random.shuffle"
] |
[((609, 627), 'numpy.random.seed', 'np.random.seed', (['(44)'], {}), '(44)\n', (623, 627), True, 'import numpy as np\n'), ((628, 643), 'random.seed', 'random.seed', (['(22)'], {}), '(22)\n', (639, 643), False, 'import random\n'), ((644, 674), 'tensorflow.set_random_seed', 'tensorflow.set_random_seed', (['(11)'], {}), '(11)\n', (670, 674), False, 'import tensorflow\n'), ((953, 975), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (973, 975), False, 'import pprint\n'), ((1029, 1054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1052, 1054), False, 'import argparse\n'), ((1991, 2017), 'os.path.exists', 'os.path.exists', (['"""cfg.json"""'], {}), "('cfg.json')\n", (2005, 2017), False, 'import os\n'), ((2913, 2960), 'os.path.join', 'os.path.join', (['modelpath', 'trainingSet', 'nameofrun'], {}), '(modelpath, trainingSet, nameofrun)\n', (2925, 2960), False, 'import os\n'), ((2977, 3009), 'os.path.join', 'os.path.join', (['outpath', 'nameofrun'], {}), '(outpath, nameofrun)\n', (2989, 3009), False, 'import os\n'), ((3028, 3057), 'os.path.join', 'os.path.join', (['outpath', '"""chkp"""'], {}), "(outpath, 'chkp')\n", (3040, 3057), False, 'import os\n'), ((5292, 5359), 'os.path.join', 'os.path.join', (["cfg['augpath']", "(cfg['nameOfRun'] + '-train')", '"""images"""'], {}), "(cfg['augpath'], cfg['nameOfRun'] + '-train', 'images')\n", (5304, 5359), False, 'import os\n'), ((5369, 5439), 'os.path.join', 'os.path.join', (["cfg['augpath']", "(cfg['nameOfRun'] + '-validate')", '"""images"""'], {}), "(cfg['augpath'], cfg['nameOfRun'] + '-validate', 'images')\n", (5381, 5439), False, 'import os\n'), ((5449, 5469), 'os.listdir', 'os.listdir', (['trainDir'], {}), '(trainDir)\n', (5459, 5469), False, 'import os\n'), ((5552, 5570), 'os.listdir', 'os.listdir', (['valDir'], {}), '(valDir)\n', (5562, 5570), False, 'import os\n'), ((2891, 2901), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2899, 2901), False, 'import sys\n'), ((3100, 3122), 'os.path.isdir', 'os.path.isdir', (['outpath'], {}), '(outpath)\n', (3113, 3122), False, 'import os\n'), ((3128, 3148), 'os.makedirs', 'os.makedirs', (['outpath'], {}), '(outpath)\n', (3139, 3148), False, 'import os\n'), ((3156, 3186), 'os.path.isdir', 'os.path.isdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3169, 3186), False, 'import os\n'), ((3192, 3220), 'os.makedirs', 'os.makedirs', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3203, 3220), False, 'import os\n'), ((3229, 3256), 'os.listdir', 'os.listdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3239, 3256), False, 'import os\n'), ((3484, 3496), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3494, 3496), False, 'from keras.models import Sequential\n'), ((4346, 4367), 'keras.optimizers.Adadelta', 'optimizers.Adadelta', ([], {}), '()\n', (4365, 4367), False, 'from keras import optimizers\n'), ((4465, 4490), 'os.path.isfile', 'os.path.isfile', (['modelPath'], {}), '(modelPath)\n', (4479, 4490), False, 'import os\n'), ((5067, 5086), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (5076, 5086), False, 'import json\n'), ((7484, 7493), 'keras.callbacks.History', 'History', ([], {}), '()\n', (7491, 7493), False, 'from keras.callbacks import ModelCheckpoint, History\n'), ((2076, 2094), 'json.load', 'json.load', (['cfgfile'], {}), '(cfgfile)\n', (2085, 2094), False, 'import json\n'), ((3425, 3450), 'os.path.isfile', 'os.path.isfile', (['modelPath'], {}), '(modelPath)\n', (3439, 3450), False, 'import os\n'), ((3512, 3558), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'input_shape': '(720, 1280, 3)'}), '(16, (3, 3), input_shape=(720, 1280, 3))\n', (3518, 3558), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3574, 3594), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3583, 3594), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3609, 3639), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 3)'}), '(pool_size=(2, 3))\n', (3621, 3639), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3656, 3674), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (3662, 3674), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3690, 3710), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3699, 3710), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3725, 3755), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3737, 3755), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3772, 3790), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (3778, 3790), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3806, 3826), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3815, 3826), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3841, 3871), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3853, 3871), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3888, 3906), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (3894, 3906), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3922, 3942), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3931, 3942), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3957, 3987), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3969, 3987), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((4077, 4086), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4084, 4086), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4161, 4207), 'keras.layers.Dense', 'Dense', (['(64)'], {'kernel_initializer': '"""random_uniform"""'}), "(64, kernel_initializer='random_uniform')\n", (4166, 4207), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4223, 4243), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (4232, 4243), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4287, 4295), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (4292, 4295), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4311, 4332), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (4321, 4332), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4508, 4529), 'keras.models.load_model', 'load_model', (['modelPath'], {}), '(modelPath)\n', (4518, 4529), False, 'from keras.models import load_model\n'), ((4569, 4608), 'os.path.isfile', 'os.path.isfile', (["(modelPathBare + '.json')"], {}), "(modelPathBare + '.json')\n", (4583, 4608), False, 'import os\n'), ((4840, 4863), 'keras.models.load_model', 'load_model', (['cpmodelPath'], {}), '(cpmodelPath)\n', (4850, 4863), False, 'from keras.models import load_model\n'), ((4978, 5033), 'os.path.join', 'os.path.join', (["cfg['capturepath']", "(trainingSet + '.json')"], {}), "(cfg['capturepath'], trainingSet + '.json')\n", (4990, 5033), False, 'import os\n'), ((6726, 6779), 'numpy.empty', 'np.empty', (['(self.batchSize, *self.dim, self.nChannels)'], {}), '((self.batchSize, *self.dim, self.nChannels))\n', (6734, 6779), True, 'import numpy as np\n'), ((6792, 6821), 'numpy.empty', 'np.empty', (['(self.batchSize, 2)'], {}), '((self.batchSize, 2))\n', (6800, 6821), True, 'import numpy as np\n'), ((8838, 8865), 'numpy.empty', 'np.empty', (['(1, 720, 1280, 3)'], {}), '((1, 720, 1280, 3))\n', (8846, 8865), True, 'import numpy as np\n'), ((8950, 8967), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (8962, 8967), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((9228, 9282), 'numpy.asarray', 'np.asarray', (['(tset[predfile] if srcT else vset[predfile])'], {}), '(tset[predfile] if srcT else vset[predfile])\n', (9238, 9282), True, 'import numpy as np\n'), ((2576, 2617), 'json.dumps', 'json.dumps', (['cfg'], {'sort_keys': '(True)', 'indent': '(2)'}), '(cfg, sort_keys=True, indent=2)\n', (2586, 2617), False, 'import json\n'), ((6637, 6668), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (6654, 6668), True, 'import numpy as np\n'), ((6945, 6962), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (6957, 6962), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((7075, 7105), 'numpy.asarray', 'np.asarray', (['self.labels[fname]'], {}), '(self.labels[fname])\n', (7085, 7105), True, 'import numpy as np\n'), ((7392, 7439), 'os.path.join', 'os.path.join', (['cpmodelPathBare', '"""{epoch:03d}.h5"""'], {}), "(cpmodelPathBare, '{epoch:03d}.h5')\n", (7404, 7439), False, 'import os\n'), ((8035, 8056), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8054, 8056), False, 'import traceback\n'), ((8885, 8940), 'os.path.join', 'os.path.join', (['(trainDir if srcT else valDir)', 'sys.argv[4]'], {}), '(trainDir if srcT else valDir, sys.argv[4])\n', (8897, 8940), False, 'import os\n'), ((3314, 3341), 'os.listdir', 'os.listdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3324, 3341), False, 'import os\n'), ((4702, 4721), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (4711, 4721), False, 'import json\n'), ((6897, 6927), 'os.path.join', 'os.path.join', (['self.path', 'fname'], {}), '(self.path, fname)\n', (6909, 6927), False, 'import os\n'), ((8650, 8737), 'json.dumps', 'json.dumps', (["{'epochsTrained': epochsTrained + startEpoch}"], {'sort_keys': '(True)', 'indent': '(2)'}), "({'epochsTrained': epochsTrained + startEpoch}, sort_keys=True,\n indent=2)\n", (8660, 8737), False, 'import json\n'), ((9860, 9900), 'numpy.asarray', 'np.asarray', (['(tset[f] if srcT else vset[f])'], {}), '(tset[f] if srcT else vset[f])\n', (9870, 9900), True, 'import numpy as np\n'), ((2847, 2883), 'os.path.join', 'os.path.join', (['modelpath', 'trainingSet'], {}), '(modelpath, trainingSet)\n', (2859, 2883), False, 'import os\n'), ((4889, 4918), 'os.path.basename', 'os.path.basename', (['cpmodelPath'], {}), '(cpmodelPath)\n', (4905, 4918), False, 'import os\n')]
|
from __future__ import print_function
try:
import h5py
WITH_H5PY = True
except ImportError:
WITH_H5PY = False
try:
import zarr
WITH_ZARR = True
from .io import IoZarr
except ImportError:
WITH_ZARR = False
try:
import z5py
WITH_Z5PY = True
from .io import IoN5
except ImportError:
WITH_Z5PY = False
import os
import json
from random import shuffle
import numpy as np
import re
import fnmatch
from .inference import load_input_crop
import dask
import toolz as tz
import logging
def _offset_list(shape, output_shape):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([float(z), float(y), float(x)])
return in_list
# NOTE this will not cover the whole volume
def _offset_list_with_shift(shape, output_shape, shift):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([min(float(z) + shift[0], shape[0]),
min(float(y) + shift[1], shape[1]),
min(float(x) + shift[2], shape[2])])
return in_list
# this returns the offsets for the given output blocks.
# blocks are padded on the fly during inference if necessary
def get_offset_lists(shape,
gpu_list,
save_folder,
output_shape,
randomize=False,
shift=None):
in_list = _offset_list(shape, output_shape) if shift is None else\
_offset_list_with_shift(shape, output_shape, shift)
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# this returns the offsets for the given output blocks and bounding box.
# blocks are padded on the fly during inference if necessary
def get_offset_lists_with_bb(shape,
gpu_list,
save_folder,
output_shape,
bb_start,
bb_stop,
randomize=False):
# zap the bounding box to grid defined by out_blocks
bb_start_c = [(bbs // outs) * outs for bbs, outs in zip(bb_start, output_shape)]
bb_stop_c = [(bbs // outs + 1) * outs for bbs, outs in zip(bb_stop, output_shape)]
in_list = []
for z in range(bb_start_c[0], bb_stop_c[0], output_shape[0]):
for y in range(bb_start_c[1], bb_stop_c[1], output_shape[1]):
for x in range(bb_start_c[2], bb_stop_c[2], output_shape[2]):
in_list.append([z, y, x])
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# redistributing offset lists from failed jobs
def redistribute_offset_lists(gpu_list, save_folder):
p_full = re.compile("list_gpu_\d+.json")
p_proc = re.compile("list_gpu_\d+_\S*_processed.txt")
full_list_jsons = []
processed_list_files = []
for f in os.listdir(save_folder):
mo_full = p_full.match(f)
mo_proc = p_proc.match(f)
if mo_full is not None:
full_list_jsons.append(f)
if mo_proc is not None:
processed_list_files.append(f)
full_block_list = set()
for fl in full_list_jsons:
with open(os.path.join(save_folder, fl), 'r') as f:
bl = json.load(f)
full_block_list.update({tuple(coo) for coo in bl})
processed_block_list = set()
bls = []
for pl in processed_list_files:
with open(os.path.join(save_folder, pl), 'r') as f:
bl_txt = f.read()
bl_txt = '[' + bl_txt[:bl_txt.rfind(']') + 1] + ']'
bls.append(json.loads(bl_txt))
processed_block_list.update({tuple(coo) for coo in bls[-1]})
to_be_processed_block_list = list(full_block_list - processed_block_list)
previous_tries = []
p_tries = re.compile("list_gpu_\d+_try\d+.json")
for f in os.listdir(save_folder):
mo_tries = p_tries.match(f)
if mo_tries is not None:
previous_tries.append(f)
if len(previous_tries) == 0:
tryno = 0
else:
trynos = []
for tr in previous_tries:
trynos.append(int(tr.split('try')[1].split('.json')[0]))
tryno = max(trynos)+1
print('Backing up last try ({0:})'.format(tryno))
for f in full_list_jsons:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-5] + '_try{0:}.json'.format(tryno)))
for f in processed_list_files:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-4] + '_try{0:}.txt'.format(tryno)))
n_splits = len(gpu_list)
out_list = [to_be_processed_block_list[i::n_splits] for i in range(n_splits)]
for ii, olist in enumerate(out_list):
if len(olist) > 0:
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
def load_ds(path, key):
ext = os.path.splitext(path)[-1]
if ext.lower() in ('.h5', '.hdf', '.hdf'):
assert WITH_H5PY
with h5py.File(path, 'r') as f:
ds = f[key]
elif ext.lower() in ('.zr', '.zarr', '.n5'):
assert WITH_Z5PY or WITH_ZARR
if WITH_ZARR:
f = zarr.open(path)
ds = f[key]
elif WITH_Z5PY:
with z5py.File(path) as f:
ds = f[key]
return ds
def generate_list_for_mask(offset_file_json, output_shape_wc, path, mask_ds, n_cpus, mask_voxel_size=None):
mask = load_ds(path, mask_ds)
if mask_voxel_size is None:
if "pixelResolution" in mask.attrs:
mask_voxel_size = mask.attrs["pixelResolution"]["dimensions"]
elif "resolution" in mask.attrs:
mask_voxel_size = mask.attrs["resolution"]
else:
mask_voxel_size = (1,) * len(output_shape_wc)
logging.warning("Did not find resolution information in attributes, defaulting to {0:}".format(mask_voxel_size))
shape_wc = tuple(np.array(mask.shape) * np.array(mask_voxel_size))
complete_offset_list = _offset_list(shape_wc, output_shape_wc)
if WITH_Z5PY:
io = IoN5(path, mask_ds, voxel_size=mask_voxel_size, channel_order=None)
else:
io = IoZarr(path, mask_ds, voxel_size=mask_voxel_size, channel_order=None)
@dask.delayed()
def load_offset(offset_wc):
return load_input_crop(io, offset_wc, (0,) * len(output_shape_wc), output_shape_wc, padding_mode="constant")[0]
@dask.delayed()
def evaluate_mask(mask_block):
if np.sum(mask_block) > 0:
return True
else:
return False
offsets_mask_eval = []
for offset_wc in complete_offset_list:
keep_offset = tz.pipe(offset_wc, load_offset, evaluate_mask)
offsets_mask_eval.append((offset_wc, keep_offset))
offsets_mask_eval = dask.compute(*offsets_mask_eval, scheduler="threads", num_workers=n_cpus)
offsets_in_mask = []
for o, m in offsets_mask_eval:
if m:
offsets_in_mask.append(o)
logging.info("{0:}/{1:} blocks contained in mask, saving offsets in {2:}".format(len(offsets_in_mask),
len(complete_offset_list),
offset_file_json))
with open(offset_file_json, 'w') as f:
json.dump(offsets_in_mask, f)
def generate_full_list(offset_file_json, output_shape_wc, path, raw_ds, raw_voxel_size=None):
raw = load_ds(path, raw_ds)
if raw_voxel_size is None:
if "pixelResolution" in raw.attrs:
raw_voxel_size = raw.attrs["pixelResolution"]["dimensions"]
elif "resolution" in raw.attrs:
raw_voxel_size = raw.attrs["resolution"]
else:
raw_voxel_size = (1,) * len(output_shape_wc)
logging.warning("Did not find resolution information in attributes, defaulting to {0:}".format(raw_voxel_size))
shape_wc = tuple(np.array(raw.shape) * np.array(raw_voxel_size))
complete_offset_list = _offset_list(shape_wc, output_shape_wc)
with open(offset_file_json, "w") as f:
json.dump(complete_offset_list, f)
# this returns the offsets for the given output blocks.
# blocks are padded on the fly in the inference if necessary
def offset_list_from_precomputed(input_list,
gpu_list,
save_folder,
list_name_extension='',
randomize=False):
if isinstance(input_list, str):
with open(input_list, 'r') as f:
input_list = json.load(f)
else:
assert isinstance(input_list, list)
if randomize:
shuffle(input_list)
n_splits = len(gpu_list)
out_list = [input_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
print("Original len", len(input_list))
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_{0:}{1:}.json'.format(gpu_list[ii], list_name_extension))
print("Dumping list number", ii, "of len", len(olist))
with open(list_name, 'w') as f:
json.dump(olist, f)
def stitch_prediction_blocks(save_path,
block_folder,
shape,
key='data',
end_channel=None,
n_workers=8,
chunks=(1, 64, 64, 64)):
from concurrent import futures
if end_channel is None:
chan_slice = (slice(None),)
else:
assert end_channel <= shape[0]
chan_slice = (slice(0, end_channel),)
def stitch_block(ds, block_id, block_file, n_blocks):
print("Stitching block %i / %i" % (block_id, n_blocks))
offsets = [int(off) for off in block_file[:-3].split('_')[1:]]
with h5py.File(os.path.join(block_folder, block_file), 'r') as g:
block_data = g['data'][:]
block_shape = block_data.shape[1:]
# Need to add slice for channel dimension
bb = chan_slice + tuple(slice(off, off + block_shape[ii])
for ii, off in enumerate(offsets))
ds[bb] = block_data
with h5py.File(save_path, 'w') as f:
ds = f.create_dataset(key,
shape=shape,
dtype='float32',
compression='gzip',
chunks=chunks)
files = os.listdir(block_folder)
# filter out invalid filenames
files = [ff for ff in files if ff.startswith('block')]
# make sure all blocks are h5 files
assert all(ff[-3:] == '.h5' for ff in files)
n_blocks = len(files)
with futures.ThreadPoolExecutor(max_workers=n_workers) as tp:
tasks = [tp.submit(stitch_block, ds, block_id, block_file, n_blocks)
for block_id, block_file in enumerate(files)]
[t.result() for t in tasks]
def extract_nn_affinities(save_prefix,
block_folder,
shape,
invert_affs=False):
from concurrent import futures
save_path_xy = save_prefix + '_xy.h5'
save_path_z = save_prefix + '_z.h5'
with h5py.File(save_path_xy, 'w') as f_xy, h5py.File(save_path_z, 'w') as f_z:
ds_xy = f_xy.create_dataset('data',
shape=shape,
dtype='float32',
compression='gzip',
chunks=(56, 56, 56))
ds_z = f_z.create_dataset('data',
shape=shape,
dtype='float32',
compression='gzip',
chunks=(56, 56, 56))
files = os.listdir(block_folder)
def extract_block(i, ff):
print("Stitching block %i / %i" % (i, len(files)))
offsets = [int(off) for off in ff[:-3].split('_')[1:]]
with h5py.File(os.path.join(block_folder, ff), 'r') as g:
block_data = g['data'][:3]
if invert_affs:
block_data = 1. - block_data
block_shape = block_data.shape[1:]
# Need to add slice for channel dimension
bb = tuple(slice(off, off + block_shape[ii]) for ii, off in enumerate(offsets))
ds_xy[bb] = (block_data[1] + block_data[2]) / 2.
ds_z[bb] = block_data[0]
with futures.ThreadPoolExecutor(max_workers=20) as tp:
tasks = []
for i, ff in enumerate(files):
if not ff.startswith('block'):
continue
assert ff[-3:] == '.h5'
tasks.append(tp.submit(extract_block, i, ff))
[t.result() for t in tasks]
def reject_empty_batch(data):
return np.sum(data) == 0
|
[
"toolz.pipe",
"re.compile",
"numpy.array",
"numpy.arange",
"os.path.exists",
"os.listdir",
"z5py.File",
"os.mkdir",
"json.loads",
"dask.delayed",
"random.shuffle",
"dask.compute",
"os.path.splitext",
"h5py.File",
"zarr.open",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"numpy.sum",
"json.load",
"json.dump"
] |
[((592, 631), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]', 'output_shape[0]'], {}), '(0, shape[0], output_shape[0])\n', (601, 631), True, 'import numpy as np\n'), ((968, 1007), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]', 'output_shape[0]'], {}), '(0, shape[0], output_shape[0])\n', (977, 1007), True, 'import numpy as np\n'), ((3650, 3682), 're.compile', 're.compile', (['"""list_gpu_\\\\d+.json"""'], {}), "('list_gpu_\\\\d+.json')\n", (3660, 3682), False, 'import re\n'), ((3695, 3741), 're.compile', 're.compile', (['"""list_gpu_\\\\d+_\\\\S*_processed.txt"""'], {}), "('list_gpu_\\\\d+_\\\\S*_processed.txt')\n", (3705, 3741), False, 'import re\n'), ((3808, 3831), 'os.listdir', 'os.listdir', (['save_folder'], {}), '(save_folder)\n', (3818, 3831), False, 'import os\n'), ((4713, 4753), 're.compile', 're.compile', (['"""list_gpu_\\\\d+_try\\\\d+.json"""'], {}), "('list_gpu_\\\\d+_try\\\\d+.json')\n", (4723, 4753), False, 'import re\n'), ((4765, 4788), 'os.listdir', 'os.listdir', (['save_folder'], {}), '(save_folder)\n', (4775, 4788), False, 'import os\n'), ((7194, 7208), 'dask.delayed', 'dask.delayed', ([], {}), '()\n', (7206, 7208), False, 'import dask\n'), ((7367, 7381), 'dask.delayed', 'dask.delayed', ([], {}), '()\n', (7379, 7381), False, 'import dask\n'), ((7739, 7812), 'dask.compute', 'dask.compute', (['*offsets_mask_eval'], {'scheduler': '"""threads"""', 'num_workers': 'n_cpus'}), "(*offsets_mask_eval, scheduler='threads', num_workers=n_cpus)\n", (7751, 7812), False, 'import dask\n'), ((650, 689), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]', 'output_shape[1]'], {}), '(0, shape[1], output_shape[1])\n', (659, 689), True, 'import numpy as np\n'), ((1026, 1065), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]', 'output_shape[1]'], {}), '(0, shape[1], output_shape[1])\n', (1035, 1065), True, 'import numpy as np\n'), ((1833, 1849), 'random.shuffle', 'shuffle', (['in_list'], {}), '(in_list)\n', (1840, 1849), False, 'from random import shuffle\n'), ((1955, 1982), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (1969, 1982), False, 'import os\n'), ((1992, 2013), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (2000, 2013), False, 'import os\n'), ((2077, 2137), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (2089, 2137), False, 'import os\n'), ((3157, 3173), 'random.shuffle', 'shuffle', (['in_list'], {}), '(in_list)\n', (3164, 3173), False, 'from random import shuffle\n'), ((3279, 3306), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (3293, 3306), False, 'import os\n'), ((3316, 3337), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (3324, 3337), False, 'import os\n'), ((3401, 3461), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (3413, 3461), False, 'import os\n'), ((5838, 5860), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (5854, 5860), False, 'import os\n'), ((7608, 7654), 'toolz.pipe', 'tz.pipe', (['offset_wc', 'load_offset', 'evaluate_mask'], {}), '(offset_wc, load_offset, evaluate_mask)\n', (7615, 7654), True, 'import toolz as tz\n'), ((8301, 8330), 'json.dump', 'json.dump', (['offsets_in_mask', 'f'], {}), '(offsets_in_mask, f)\n', (8310, 8330), False, 'import json\n'), ((9079, 9113), 'json.dump', 'json.dump', (['complete_offset_list', 'f'], {}), '(complete_offset_list, f)\n', (9088, 9113), False, 'import json\n'), ((9671, 9690), 'random.shuffle', 'shuffle', (['input_list'], {}), '(input_list)\n', (9678, 9690), False, 'from random import shuffle\n'), ((9799, 9826), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (9813, 9826), False, 'import os\n'), ((9836, 9857), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (9844, 9857), False, 'import os\n'), ((11262, 11287), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (11271, 11287), False, 'import h5py\n'), ((11530, 11554), 'os.listdir', 'os.listdir', (['block_folder'], {}), '(block_folder)\n', (11540, 11554), False, 'import os\n'), ((12328, 12356), 'h5py.File', 'h5py.File', (['save_path_xy', '"""w"""'], {}), "(save_path_xy, 'w')\n", (12337, 12356), False, 'import h5py\n'), ((12366, 12393), 'h5py.File', 'h5py.File', (['save_path_z', '"""w"""'], {}), "(save_path_z, 'w')\n", (12375, 12393), False, 'import h5py\n'), ((12926, 12950), 'os.listdir', 'os.listdir', (['block_folder'], {}), '(block_folder)\n', (12936, 12950), False, 'import os\n'), ((13987, 13999), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (13993, 13999), True, 'import numpy as np\n'), ((712, 751), 'numpy.arange', 'np.arange', (['(0)', 'shape[2]', 'output_shape[2]'], {}), '(0, shape[2], output_shape[2])\n', (721, 751), True, 'import numpy as np\n'), ((1088, 1127), 'numpy.arange', 'np.arange', (['(0)', 'shape[2]', 'output_shape[2]'], {}), '(0, shape[2], output_shape[2])\n', (1097, 1127), True, 'import numpy as np\n'), ((2190, 2209), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (2199, 2209), False, 'import json\n'), ((3514, 3533), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (3523, 3533), False, 'import json\n'), ((4180, 4192), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4189, 4192), False, 'import json\n'), ((4507, 4525), 'json.loads', 'json.loads', (['bl_txt'], {}), '(bl_txt)\n', (4517, 4525), False, 'import json\n'), ((5213, 5241), 'os.path.join', 'os.path.join', (['save_folder', 'f'], {}), '(save_folder, f)\n', (5225, 5241), False, 'import os\n'), ((5362, 5390), 'os.path.join', 'os.path.join', (['save_folder', 'f'], {}), '(save_folder, f)\n', (5374, 5390), False, 'import os\n'), ((5661, 5721), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (5673, 5721), False, 'import os\n'), ((5950, 5970), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (5959, 5970), False, 'import h5py\n'), ((6879, 6899), 'numpy.array', 'np.array', (['mask.shape'], {}), '(mask.shape)\n', (6887, 6899), True, 'import numpy as np\n'), ((6902, 6927), 'numpy.array', 'np.array', (['mask_voxel_size'], {}), '(mask_voxel_size)\n', (6910, 6927), True, 'import numpy as np\n'), ((7428, 7446), 'numpy.sum', 'np.sum', (['mask_block'], {}), '(mask_block)\n', (7434, 7446), True, 'import numpy as np\n'), ((8913, 8932), 'numpy.array', 'np.array', (['raw.shape'], {}), '(raw.shape)\n', (8921, 8932), True, 'import numpy as np\n'), ((8935, 8959), 'numpy.array', 'np.array', (['raw_voxel_size'], {}), '(raw_voxel_size)\n', (8943, 8959), True, 'import numpy as np\n'), ((9577, 9589), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9586, 9589), False, 'import json\n'), ((10173, 10192), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (10182, 10192), False, 'import json\n'), ((11797, 11846), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'n_workers'}), '(max_workers=n_workers)\n', (11823, 11846), False, 'from concurrent import futures\n'), ((13610, 13652), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(20)'}), '(max_workers=20)\n', (13636, 13652), False, 'from concurrent import futures\n'), ((4121, 4150), 'os.path.join', 'os.path.join', (['save_folder', 'fl'], {}), '(save_folder, fl)\n', (4133, 4150), False, 'import os\n'), ((4356, 4385), 'os.path.join', 'os.path.join', (['save_folder', 'pl'], {}), '(save_folder, pl)\n', (4368, 4385), False, 'import os\n'), ((5782, 5801), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (5791, 5801), False, 'import json\n'), ((6126, 6141), 'zarr.open', 'zarr.open', (['path'], {}), '(path)\n', (6135, 6141), False, 'import zarr\n'), ((10909, 10947), 'os.path.join', 'os.path.join', (['block_folder', 'block_file'], {}), '(block_folder, block_file)\n', (10921, 10947), False, 'import os\n'), ((13144, 13174), 'os.path.join', 'os.path.join', (['block_folder', 'ff'], {}), '(block_folder, ff)\n', (13156, 13174), False, 'import os\n'), ((6207, 6222), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (6216, 6222), False, 'import z5py\n')]
|
import random
import os
import logging
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# import faiss
################################################################################
# General-purpose #
################################################################################
def str_list(l):
return '_'.join([str(x) for x in l])
def set_logger(log_path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
return logger
class Logger(object):
""" Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), 'wb') as fp:
pickle.dump(self.data, fp, -1)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_datetime(time_delta):
days_delta = time_delta // (24*3600)
time_delta = time_delta % (24*3600)
hour_delta = time_delta // 3600
time_delta = time_delta % 3600
mins_delta = time_delta // 60
time_delta = time_delta % 60
secs_delta = time_delta
return '{}:{}:{}:{}'.format(days_delta, hour_delta, mins_delta, secs_delta)
################################################################################
# Metric-related ops #
################################################################################
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class) # Exclude unlabelled data.
hist = np.bincount(n_class * label_true[mask] + label_pred[mask],\
minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def scores(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
return hist
def get_result_metrics(histogram):
tp = np.diag(histogram)
fp = np.sum(histogram, 0) - tp
fn = np.sum(histogram, 1) - tp
iou = tp / (tp + fp + fn)
prc = tp / (tp + fn)
opc = np.sum(tp) / np.sum(histogram)
result = {"iou": iou,
"mean_iou": np.nanmean(iou),
"precision_per_class (per class accuracy)": prc,
"mean_precision (class-avg accuracy)": np.nanmean(prc),
"overall_precision (pixel accuracy)": opc}
result = {k: 100*v for k, v in result.items()}
return result
def compute_negative_euclidean(featmap, centroids, metric_function):
centroids = centroids.unsqueeze(-1).unsqueeze(-1)
return - (1 - 2*metric_function(featmap)\
+ (centroids*centroids).sum(dim=1).unsqueeze(0)) # negative l2 squared
def get_metric_as_conv(centroids):
N, C = centroids.size()
centroids_weight = centroids.unsqueeze(-1).unsqueeze(-1)
metric_function = nn.Conv2d(C, N, 1, padding=0, stride=1, bias=False)
metric_function.weight.data = centroids_weight
metric_function = nn.DataParallel(metric_function)
metric_function = metric_function.cuda()
return metric_function
################################################################################
# General torch ops #
################################################################################
def freeze_all(model):
for param in model.module.parameters():
param.requires_grad = False
def initialize_classifier(args):
classifier = get_linear(args.in_dim, args.K_train)
classifier = nn.DataParallel(classifier)
classifier = classifier.cuda()
return classifier
def get_linear(indim, outdim):
classifier = nn.Conv2d(indim, outdim, kernel_size=1, stride=1, padding=0, bias=True)
classifier.weight.data.normal_(0, 0.01)
classifier.bias.data.zero_()
return classifier
def feature_flatten(feats):
if len(feats.size()) == 2:
# feature already flattened.
return feats
feats = feats.view(feats.size(0), feats.size(1), -1).transpose(2, 1)\
.contiguous().view(-1, feats.size(1))
return feats
################################################################################
# Faiss related #
################################################################################
def get_faiss_module(args):
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0 #NOTE: Single GPU only.
idx = faiss.GpuIndexFlatL2(res, args.in_dim, cfg)
return idx
def get_init_centroids(args, K, featlist, index):
clus = faiss.Clustering(args.in_dim, K)
clus.seed = np.random.randint(args.seed)
clus.niter = args.kmeans_n_iter
clus.max_points_per_centroid = 10000000
clus.train(featlist, index)
return faiss.vector_float_to_array(clus.centroids).reshape(K, args.in_dim)
def module_update_centroids(index, centroids):
index.reset()
index.add(centroids)
return index
def fix_seed_for_reproducability(seed):
"""
Unfortunately, backward() of [interpolate] functional seems to be never deterministic.
Below are related threads:
https://github.com/pytorch/pytorch/issues/7068
https://discuss.pytorch.org/t/non-deterministic-behavior-of-pytorch-upsample-interpolate/42842?u=sbelharbi
"""
# Use random seed.
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
cudnn.deterministic = True
cudnn.benchmark = False
def worker_init_fn(seed):
return lambda x: np.random.seed(seed + x)
################################################################################
# Training Pipelines #
################################################################################
def postprocess_label(args, K, idx, idx_img, scores, n_dual):
out = scores[idx].topk(1, dim=0)[1].flatten().detach().cpu().numpy()
# Save labels.
if not os.path.exists(os.path.join(args.save_model_path, 'label_' + str(n_dual))):
os.makedirs(os.path.join(args.save_model_path, 'label_' + str(n_dual)))
torch.save(out, os.path.join(args.save_model_path, 'label_' + str(n_dual), '{}.pkl'.format(idx_img)))
# Count for re-weighting.
counts = torch.tensor(np.bincount(out, minlength=K)).float()
return counts
def eqv_transform_if_needed(args, dataloader, indice, input):
if args.equiv:
input = dataloader.dataset.transform_eqv(indice, input)
return input
def get_transform_params(args):
inv_list = []
eqv_list = []
if args.augment:
if args.blur:
inv_list.append('blur')
if args.grey:
inv_list.append('grey')
if args.jitter:
inv_list.extend(['brightness', 'contrast', 'saturation', 'hue'])
if args.equiv:
if args.h_flip:
eqv_list.append('h_flip')
if args.v_flip:
eqv_list.append('v_flip')
if args.random_crop:
eqv_list.append('random_crop')
return inv_list, eqv_list
def collate_train(batch):
if batch[0][-1] is not None:
indice = [b[0] for b in batch]
image1 = torch.stack([b[1] for b in batch])
image2 = torch.stack([b[2] for b in batch])
label1 = torch.stack([b[3] for b in batch])
label2 = torch.stack([b[4] for b in batch])
return indice, image1, image2, label1, label2
indice = [b[0] for b in batch]
image1 = torch.stack([b[1] for b in batch])
return indice, image1
def collate_eval(batch):
indice = [b[0] for b in batch]
image = torch.stack([b[1] for b in batch])
label = torch.stack([b[2] for b in batch])
return indice, image, label
def collate_train_baseline(batch):
if batch[0][-1] is not None:
return collate_eval(batch)
indice = [b[0] for b in batch]
image = torch.stack([b[1] for b in batch])
return indice, image
|
[
"logging.getLogger",
"logging.StreamHandler",
"numpy.nanmean",
"logging.FileHandler",
"numpy.random.seed",
"numpy.bincount",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"pickle.dump",
"logging.Formatter",
"torch.stack",
"os.path.join",
"torch.nn.DataParallel",
"random.seed",
"numpy.diag",
"torch.nn.Conv2d",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"torch.cuda.manual_seed"
] |
[((539, 558), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (556, 558), False, 'import logging\n'), ((637, 666), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (656, 666), False, 'import logging\n'), ((841, 864), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (862, 864), False, 'import logging\n'), ((2734, 2762), 'numpy.zeros', 'np.zeros', (['(n_class, n_class)'], {}), '((n_class, n_class))\n', (2742, 2762), True, 'import numpy as np\n'), ((2938, 2956), 'numpy.diag', 'np.diag', (['histogram'], {}), '(histogram)\n', (2945, 2956), True, 'import numpy as np\n'), ((3861, 3912), 'torch.nn.Conv2d', 'nn.Conv2d', (['C', 'N', '(1)'], {'padding': '(0)', 'stride': '(1)', 'bias': '(False)'}), '(C, N, 1, padding=0, stride=1, bias=False)\n', (3870, 3912), True, 'import torch.nn as nn\n'), ((3986, 4018), 'torch.nn.DataParallel', 'nn.DataParallel', (['metric_function'], {}), '(metric_function)\n', (4001, 4018), True, 'import torch.nn as nn\n'), ((4552, 4579), 'torch.nn.DataParallel', 'nn.DataParallel', (['classifier'], {}), '(classifier)\n', (4567, 4579), True, 'import torch.nn as nn\n'), ((4687, 4758), 'torch.nn.Conv2d', 'nn.Conv2d', (['indim', 'outdim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(indim, outdim, kernel_size=1, stride=1, padding=0, bias=True)\n', (4696, 4758), True, 'import torch.nn as nn\n'), ((5738, 5766), 'numpy.random.randint', 'np.random.randint', (['args.seed'], {}), '(args.seed)\n', (5755, 5766), True, 'import numpy as np\n'), ((6441, 6458), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6452, 6458), False, 'import random\n'), ((6508, 6528), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6522, 6528), True, 'import numpy as np\n'), ((6533, 6556), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (6550, 6556), False, 'import torch\n'), ((6561, 6589), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (6583, 6589), False, 'import torch\n'), ((6594, 6626), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (6620, 6626), False, 'import torch\n'), ((8722, 8756), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8733, 8756), False, 'import torch\n'), ((8857, 8891), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8868, 8891), False, 'import torch\n'), ((8904, 8938), 'torch.stack', 'torch.stack', (['[b[2] for b in batch]'], {}), '([b[2] for b in batch])\n', (8915, 8938), False, 'import torch\n'), ((9130, 9164), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (9141, 9164), False, 'import torch\n'), ((697, 756), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s: %(message)s"""'], {}), "('%(asctime)s:%(levelname)s: %(message)s')\n", (714, 756), False, 'import logging\n'), ((897, 929), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (914, 929), False, 'import logging\n'), ((2966, 2986), 'numpy.sum', 'np.sum', (['histogram', '(0)'], {}), '(histogram, 0)\n', (2972, 2986), True, 'import numpy as np\n'), ((3001, 3021), 'numpy.sum', 'np.sum', (['histogram', '(1)'], {}), '(histogram, 1)\n', (3007, 3021), True, 'import numpy as np\n'), ((3095, 3105), 'numpy.sum', 'np.sum', (['tp'], {}), '(tp)\n', (3101, 3105), True, 'import numpy as np\n'), ((3108, 3125), 'numpy.sum', 'np.sum', (['histogram'], {}), '(histogram)\n', (3114, 3125), True, 'import numpy as np\n'), ((3178, 3193), 'numpy.nanmean', 'np.nanmean', (['iou'], {}), '(iou)\n', (3188, 3193), True, 'import numpy as np\n'), ((3309, 3324), 'numpy.nanmean', 'np.nanmean', (['prc'], {}), '(prc)\n', (3319, 3324), True, 'import numpy as np\n'), ((6734, 6758), 'numpy.random.seed', 'np.random.seed', (['(seed + x)'], {}), '(seed + x)\n', (6748, 6758), True, 'import numpy as np\n'), ((8423, 8457), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8434, 8457), False, 'import torch\n'), ((8475, 8509), 'torch.stack', 'torch.stack', (['[b[2] for b in batch]'], {}), '([b[2] for b in batch])\n', (8486, 8509), False, 'import torch\n'), ((8527, 8561), 'torch.stack', 'torch.stack', (['[b[3] for b in batch]'], {}), '([b[3] for b in batch])\n', (8538, 8561), False, 'import torch\n'), ((8579, 8613), 'torch.stack', 'torch.stack', (['[b[4] for b in batch]'], {}), '([b[4] for b in batch])\n', (8590, 8613), False, 'import torch\n'), ((1344, 1374), 'pickle.dump', 'pickle.dump', (['self.data', 'fp', '(-1)'], {}), '(self.data, fp, -1)\n', (1355, 1374), False, 'import pickle\n'), ((2520, 2607), 'numpy.bincount', 'np.bincount', (['(n_class * label_true[mask] + label_pred[mask])'], {'minlength': '(n_class ** 2)'}), '(n_class * label_true[mask] + label_pred[mask], minlength=\n n_class ** 2)\n', (2531, 2607), True, 'import numpy as np\n'), ((1294, 1317), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (1306, 1317), False, 'import os\n'), ((7495, 7524), 'numpy.bincount', 'np.bincount', (['out'], {'minlength': 'K'}), '(out, minlength=K)\n', (7506, 7524), True, 'import numpy as np\n')]
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by <NAME>
# Python translation by <NAME>, with <NAME> and <NAME>
#
# Revised 5 March 2018 - <NAME> - Fixed call to prop_cubic_conv by
# getting rid of the flattening of the coordinate arrays.
import os
import proper
import numpy as np
from math import sin, cos
# from . import lib_dir
lib_dir = os.path.dirname(proper.__file__)
import scipy.signal as ss
if not proper.use_cubic_conv:
from scipy.ndimage.interpolation import map_coordinates
def prop_dm(wf, dm_z0, dm_xc, dm_yc, spacing = 0., **kwargs):
"""Simulate a deformable mirror of specified actuator spacing, including the
effects of the DM influence function.
Parameters
----------
wf : obj
WaveFront class object
dm_z0 : str or numpy ndarray
Either a 2D numpy array containing the surface piston of each DM
actuator in meters or the name of a 2D FITS image file containing the
above
dm_xc, dm_yc : list or numpy ndarray
The location of the optical axis (center of the wavefront) on the DM in
actuator units (0 ro num_actuator-1). The center of the first actuator
is (0.0, 0.0)
spacing : float
Defines the spacing in meters between actuators; must not be used when
n_act_across_pupil is specified.
Returns
-------
dmap : numpy ndarray
Returns DM surface (not wavefront) map in meters
Other Parameters
----------------
FIT : bool
Switch that tells routine that the values in "dm_z" are the desired
surface heights rather than commanded actuator heights, and so the
routine should fit this map, accounting for actuator influence functions,
to determine the necessary actuator heights. An iterative error-minimizing
loop is used for the fit.
NO_APPLY : bool
If set, the DM pattern is not added to the wavefront. Useful if the DM
surface map is needed but should not be applied to the wavefront
N_ACT_ACROSS_PUPIL : int
Specifies the number of actuators that span the X-axis beam diameter. If
it is a whole number, the left edge of the left pixel is aligned with
the left edge of the beam, and the right edge of the right pixel with
the right edge of the beam. This determines the spacing and size of the
actuators. Should not be used when "spacing" value is specified.
XTILT, YTILT, ZTILT : float
Specify the rotation of the DM surface with respect to the wavefront plane
in degrees about the X, Y, Z axes, respectively, with the origin at the
center of the wavefront. The DM surface is interpolated and orthographically
projected onto the wavefront grid. The coordinate system assumes that
the wavefront and initial DM surface are in the X,Y plane with a lower
left origin with Z towards the observer. The rotations are left handed.
The default rotation order is X, Y, then Z unless the /ZYX switch is set.
XYZ or ZYX : bool
Specifies the rotation order if two or more of XTILT, YTILT, or ZTILT
are specified. The default is /XYZ for X, Y, then Z rotations.
Raises
------
ValueError:
User cannot specify both actuator spacing and N_ACT_ACROSS_PUPIL
ValueError:
User must specify either actuator spacing or N_ACT_ACROSS_PUPIL
"""
if "ZYX" in kwargs and "XYZ" in kwargs:
raise ValueError('PROP_DM: Error: Cannot specify both XYZ and ZYX rotation orders. Stopping')
elif not "ZYX" in kwargs and not 'XYZ' in kwargs:
XYZ = 1 # default is rotation around X, then Y, then Z
ZYX = 0
elif "ZYX" in kwargs:
ZYX = 1
XYZ = 0
elif "XYZ" in kwargs:
XYZ = 1
ZYX = 0
if "XTILT" in kwargs:
xtilt = kwargs["XTILT"]
else:
xtilt = 0.
if "YTILT" in kwargs:
ytilt = kwargs["YTILT"]
else:
ytilt = 0.
if "ZTILT" in kwargs:
ztilt = kwargs["ZTILT"]
else:
ztilt = 0.
if type(dm_z0) == str:
dm_z = proper.prop_fits_read(dm_z0) # Read DM setting from FITS file
else:
dm_z = dm_z0
n = proper.prop_get_gridsize(wf)
dx_surf = proper.prop_get_sampling(wf) # sampling of current surface in meters
beamradius = proper.prop_get_beamradius(wf)
# influence function sampling is 0.1 mm, peak at (x,y)=(45,45)
# Influence function has shape = 1x91x91. Saving it as a 2D array
# before continuing with processing
inf = proper.prop_fits_read(os.path.join(lib_dir, "influence_dm5v2.fits"))
inf = inf[0,:,:]
s = inf.shape
nx_inf = s[1]
ny_inf = s[0]
xc_inf = int(nx_inf/2)
yc_inf = int(ny_inf/2)
dx_inf = 0.1e-3 # influence function spacing in meters
dx_dm_inf = 1.e-3 # spacing between DM actuators in meters assumed by influence function
inf_mag = 10
if spacing != 0 and "N_ACT_ACROSS_PUPIL" in kwargs:
raise ValueError("PROP_DM: User cannot specify both actuator spacing and N_ACT_ACROSS_PUPIL. Stopping.")
if spacing == 0 and not "N_ACT_ACROSS_PUPIL" in kwargs:
raise ValueError("PROP_DM: User must specify either actuator spacing or N_ACT_ACROSS_PUPIL. Stopping.")
if "N_ACT_ACROSS_PUPIL" in kwargs:
dx_dm = 2. * beamradius / int(kwargs["N_ACT_ACROSS_PUPIL"])
else:
dx_dm = spacing
dx_inf = dx_inf * dx_dm / dx_dm_inf # Influence function sampling scaled
# to specified DM actuator spacing
if "FIT" in kwargs:
x = (np.arange(5, dtype = np.float64) - 2) * dx_dm
if proper.use_cubic_conv:
inf_kernel = proper.prop_cubic_conv(inf.T, x/dx_inf+xc_inf, x/dx_inf+yc_inf, GRID=True)
else:
xygrid = np.meshgrid(x/dx_inf+xc_inf, x/dx_inf+yc_inf)
inf_kernel = map_coordinates(inf.T, xygrid, order = 3, mode = "nearest")
(dm_z_commanded, dms) = proper.prop_fit_dm(dm_z, inf_kernel)
else:
dm_z_commanded = dm_z
s = dm_z.shape
nx_dm = s[1]
ny_dm = s[0]
# Create subsampled DM grid
margin = 9 * inf_mag
nx_grid = nx_dm * inf_mag + 2 * margin
ny_grid = ny_dm * inf_mag + 2 * margin
xoff_grid = margin + inf_mag/2 # pixel location of 1st actuator center in subsampled grid
yoff_grid = xoff_grid
dm_grid = np.zeros([ny_grid, nx_grid], dtype = np.float64)
x = np.arange(nx_dm, dtype = np.int16) * int(inf_mag) + int(xoff_grid)
y = np.arange(ny_dm, dtype = np.int16) * int(inf_mag) + int(yoff_grid)
dm_grid[np.tile(np.vstack(y), (nx_dm,)), np.tile(x, (ny_dm,1))] = dm_z_commanded
dm_grid = ss.fftconvolve(dm_grid, inf, mode = 'same')
# 3D rotate DM grid and project orthogonally onto wavefront
xdim = int(np.round(np.sqrt(2) * nx_grid * dx_inf / dx_surf)) # grid dimensions (pix) projected onto wavefront
ydim = int(np.round(np.sqrt(2) * ny_grid * dx_inf / dx_surf))
if xdim > n: xdim = n
if ydim > n: ydim = n
x = np.ones((ydim,1), dtype = np.int) * ((np.arange(xdim) - int(xdim/2)) * dx_surf)
y = (np.ones((xdim,1), dtype = np.int) * ((np.arange(ydim) - int(ydim/2)) * dx_surf)).T
a = xtilt * np.pi / 180
b = ytilt * np.pi / 180
g = ztilt * np.pi /180
if XYZ:
m = np.array([ [cos(b)*cos(g), -cos(b)*sin(g), sin(b), 0],
[cos(a)*sin(g) + sin(a)*sin(b)*cos(g), cos(a)*cos(g)-sin(a)*sin(b)*sin(g), -sin(a)*cos(b), 0],
[sin(a)*sin(g)-cos(a)*sin(b)*cos(g), sin(a)*cos(g)+cos(a)*sin(b)*sin(g), cos(a)*cos(b), 0],
[0, 0, 0, 1] ])
else:
m = np.array([ [cos(b)*cos(g), cos(g)*sin(a)*sin(b)-cos(a)*sin(g), cos(a)*cos(g)*sin(b)+sin(a)*sin(g), 0],
[cos(b)*sin(g), cos(a)*cos(g)+sin(a)*sin(b)*sin(g), -cos(g)*sin(a)+cos(a)*sin(b)*sin(g), 0],
[-sin(b), cos(b)*sin(a), cos(a)*cos(b), 0],
[0, 0, 0, 1] ])
# Forward project a square
edge = np.array([[-1.0,-1.0,0.0,0.0], [1.0,-1.0,0.0,0.0], [1.0,1.0,0.0,0.0], [-1.0,1.0,0.0,0.0]])
new_xyz = np.dot(edge, m)
# determine backward projection for screen-raster-to-DM-surce computation
dx_dxs = (new_xyz[0,0] - new_xyz[1,0]) / (edge[0,0] - edge[1,0])
dx_dys = (new_xyz[1,0] - new_xyz[2,0]) / (edge[1,1] - edge[2,1])
dy_dxs = (new_xyz[0,1] - new_xyz[1,1]) / (edge[0,0] - edge[1,0])
dy_dys = (new_xyz[1,1] - new_xyz[2,1]) / (edge[1,1] - edge[2,1])
xs = ( x/dx_dxs - y*dx_dys/(dx_dxs*dy_dys) ) / ( 1 - dy_dxs*dx_dys/(dx_dxs*dy_dys) )
ys = ( y/dy_dys - x*dy_dxs/(dx_dxs*dy_dys) ) / ( 1 - dx_dys*dy_dxs/(dx_dxs*dy_dys) )
xdm = (xs + dm_xc * dx_dm) / dx_inf + xoff_grid
ydm = (ys + dm_yc * dx_dm) / dx_inf + yoff_grid
if proper.use_cubic_conv:
grid = proper.prop_cubic_conv(dm_grid.T, xdm, ydm, GRID = False)
grid = grid.reshape([xdm.shape[1], xdm.shape[0]])
else:
grid = map_coordinates(dm_grid.T, [xdm, ydm], order = 3, mode = "nearest", prefilter = True)
dmap = np.zeros([n,n], dtype = np.float64)
nx_grid, ny_grid = grid.shape
xmin, xmax = int(n/2 - xdim/2), int(n/2 - xdim/2 + nx_grid)
ymin, ymax = int(n/2 - ydim/2), int(n/2 - ydim/2 + ny_grid)
dmap[ymin:ymax, xmin:xmax] = grid
# Random dots sometimes appear in the phase map. This is a little temporary hack to deal with that bug!
import scipy.ndimage
sigma = [1, 1]
dmap = scipy.ndimage.filters.gaussian_filter(dmap, sigma, mode='constant')
if not "NO_APPLY" in kwargs:
proper.prop_add_phase(wf, 2 * dmap) # x2 to convert surface to wavefront error
return dmap
|
[
"numpy.sqrt",
"math.cos",
"numpy.array",
"proper.prop_cubic_conv",
"proper.prop_get_sampling",
"numpy.arange",
"proper.prop_get_beamradius",
"proper.prop_get_gridsize",
"scipy.signal.fftconvolve",
"numpy.dot",
"numpy.vstack",
"numpy.meshgrid",
"numpy.tile",
"numpy.ones",
"proper.prop_fits_read",
"os.path.dirname",
"proper.prop_add_phase",
"proper.prop_fit_dm",
"scipy.ndimage.interpolation.map_coordinates",
"os.path.join",
"numpy.zeros",
"math.sin"
] |
[((592, 624), 'os.path.dirname', 'os.path.dirname', (['proper.__file__'], {}), '(proper.__file__)\n', (607, 624), False, 'import os\n'), ((4461, 4489), 'proper.prop_get_gridsize', 'proper.prop_get_gridsize', (['wf'], {}), '(wf)\n', (4485, 4489), False, 'import proper\n'), ((4504, 4532), 'proper.prop_get_sampling', 'proper.prop_get_sampling', (['wf'], {}), '(wf)\n', (4528, 4532), False, 'import proper\n'), ((4591, 4621), 'proper.prop_get_beamradius', 'proper.prop_get_beamradius', (['wf'], {}), '(wf)\n', (4617, 4621), False, 'import proper\n'), ((6679, 6725), 'numpy.zeros', 'np.zeros', (['[ny_grid, nx_grid]'], {'dtype': 'np.float64'}), '([ny_grid, nx_grid], dtype=np.float64)\n', (6687, 6725), True, 'import numpy as np\n'), ((6978, 7019), 'scipy.signal.fftconvolve', 'ss.fftconvolve', (['dm_grid', 'inf'], {'mode': '"""same"""'}), "(dm_grid, inf, mode='same')\n", (6992, 7019), True, 'import scipy.signal as ss\n'), ((8223, 8330), 'numpy.array', 'np.array', (['[[-1.0, -1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [-1.0,\n 1.0, 0.0, 0.0]]'], {}), '([[-1.0, -1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0], [1.0, 1.0, 0.0, \n 0.0], [-1.0, 1.0, 0.0, 0.0]])\n', (8231, 8330), True, 'import numpy as np\n'), ((8328, 8343), 'numpy.dot', 'np.dot', (['edge', 'm'], {}), '(edge, m)\n', (8334, 8343), True, 'import numpy as np\n'), ((9268, 9302), 'numpy.zeros', 'np.zeros', (['[n, n]'], {'dtype': 'np.float64'}), '([n, n], dtype=np.float64)\n', (9276, 9302), True, 'import numpy as np\n'), ((4359, 4387), 'proper.prop_fits_read', 'proper.prop_fits_read', (['dm_z0'], {}), '(dm_z0)\n', (4380, 4387), False, 'import proper\n'), ((4832, 4877), 'os.path.join', 'os.path.join', (['lib_dir', '"""influence_dm5v2.fits"""'], {}), "(lib_dir, 'influence_dm5v2.fits')\n", (4844, 4877), False, 'import os\n'), ((6260, 6296), 'proper.prop_fit_dm', 'proper.prop_fit_dm', (['dm_z', 'inf_kernel'], {}), '(dm_z, inf_kernel)\n', (6278, 6296), False, 'import proper\n'), ((7331, 7363), 'numpy.ones', 'np.ones', (['(ydim, 1)'], {'dtype': 'np.int'}), '((ydim, 1), dtype=np.int)\n', (7338, 7363), True, 'import numpy as np\n'), ((9029, 9084), 'proper.prop_cubic_conv', 'proper.prop_cubic_conv', (['dm_grid.T', 'xdm', 'ydm'], {'GRID': '(False)'}), '(dm_grid.T, xdm, ydm, GRID=False)\n', (9051, 9084), False, 'import proper\n'), ((9170, 9249), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['dm_grid.T', '[xdm, ydm]'], {'order': '(3)', 'mode': '"""nearest"""', 'prefilter': '(True)'}), "(dm_grid.T, [xdm, ydm], order=3, mode='nearest', prefilter=True)\n", (9185, 9249), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((9779, 9814), 'proper.prop_add_phase', 'proper.prop_add_phase', (['wf', '(2 * dmap)'], {}), '(wf, 2 * dmap)\n', (9800, 9814), False, 'import proper\n'), ((5986, 6072), 'proper.prop_cubic_conv', 'proper.prop_cubic_conv', (['inf.T', '(x / dx_inf + xc_inf)', '(x / dx_inf + yc_inf)'], {'GRID': '(True)'}), '(inf.T, x / dx_inf + xc_inf, x / dx_inf + yc_inf,\n GRID=True)\n', (6008, 6072), False, 'import proper\n'), ((6096, 6149), 'numpy.meshgrid', 'np.meshgrid', (['(x / dx_inf + xc_inf)', '(x / dx_inf + yc_inf)'], {}), '(x / dx_inf + xc_inf, x / dx_inf + yc_inf)\n', (6107, 6149), True, 'import numpy as np\n'), ((6167, 6222), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['inf.T', 'xygrid'], {'order': '(3)', 'mode': '"""nearest"""'}), "(inf.T, xygrid, order=3, mode='nearest')\n", (6182, 6222), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((6737, 6769), 'numpy.arange', 'np.arange', (['nx_dm'], {'dtype': 'np.int16'}), '(nx_dm, dtype=np.int16)\n', (6746, 6769), True, 'import numpy as np\n'), ((6812, 6844), 'numpy.arange', 'np.arange', (['ny_dm'], {'dtype': 'np.int16'}), '(ny_dm, dtype=np.int16)\n', (6821, 6844), True, 'import numpy as np\n'), ((6924, 6946), 'numpy.tile', 'np.tile', (['x', '(ny_dm, 1)'], {}), '(x, (ny_dm, 1))\n', (6931, 6946), True, 'import numpy as np\n'), ((7420, 7452), 'numpy.ones', 'np.ones', (['(xdim, 1)'], {'dtype': 'np.int'}), '((xdim, 1), dtype=np.int)\n', (7427, 7452), True, 'import numpy as np\n'), ((5880, 5910), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (5889, 5910), True, 'import numpy as np\n'), ((6899, 6911), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (6908, 6911), True, 'import numpy as np\n'), ((7369, 7384), 'numpy.arange', 'np.arange', (['xdim'], {}), '(xdim)\n', (7378, 7384), True, 'import numpy as np\n'), ((7458, 7473), 'numpy.arange', 'np.arange', (['ydim'], {}), '(ydim)\n', (7467, 7473), True, 'import numpy as np\n'), ((7656, 7662), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7659, 7662), False, 'from math import sin, cos\n'), ((7111, 7121), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7118, 7121), True, 'import numpy as np\n'), ((7226, 7236), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7233, 7236), True, 'import numpy as np\n'), ((7625, 7631), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7628, 7631), False, 'from math import sin, cos\n'), ((7632, 7638), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7635, 7638), False, 'from math import sin, cos\n'), ((7648, 7654), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7651, 7654), False, 'from math import sin, cos\n'), ((7760, 7766), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7763, 7766), False, 'from math import sin, cos\n'), ((7853, 7859), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7856, 7859), False, 'from math import sin, cos\n'), ((7860, 7866), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7863, 7866), False, 'from math import sin, cos\n'), ((7930, 7936), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7933, 7936), False, 'from math import sin, cos\n'), ((7937, 7943), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7940, 7943), False, 'from math import sin, cos\n'), ((8024, 8030), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8027, 8030), False, 'from math import sin, cos\n'), ((8031, 8037), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8034, 8037), False, 'from math import sin, cos\n'), ((8120, 8126), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8123, 8126), False, 'from math import sin, cos\n'), ((8128, 8134), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8131, 8134), False, 'from math import sin, cos\n'), ((8135, 8141), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8138, 8141), False, 'from math import sin, cos\n'), ((8143, 8149), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8146, 8149), False, 'from math import sin, cos\n'), ((8150, 8156), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8153, 8156), False, 'from math import sin, cos\n'), ((7641, 7647), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7644, 7647), False, 'from math import sin, cos\n'), ((7678, 7684), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7681, 7684), False, 'from math import sin, cos\n'), ((7685, 7691), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7688, 7691), False, 'from math import sin, cos\n'), ((7708, 7714), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7711, 7714), False, 'from math import sin, cos\n'), ((7716, 7722), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7719, 7722), False, 'from math import sin, cos\n'), ((7723, 7729), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7726, 7729), False, 'from math import sin, cos\n'), ((7744, 7750), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7747, 7750), False, 'from math import sin, cos\n'), ((7753, 7759), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7756, 7759), False, 'from math import sin, cos\n'), ((7781, 7787), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7784, 7787), False, 'from math import sin, cos\n'), ((7788, 7794), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7791, 7794), False, 'from math import sin, cos\n'), ((7809, 7815), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7812, 7815), False, 'from math import sin, cos\n'), ((7817, 7823), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7820, 7823), False, 'from math import sin, cos\n'), ((7824, 7830), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7827, 7830), False, 'from math import sin, cos\n'), ((7845, 7851), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7848, 7851), False, 'from math import sin, cos\n'), ((7959, 7965), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7962, 7965), False, 'from math import sin, cos\n'), ((7966, 7972), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7969, 7972), False, 'from math import sin, cos\n'), ((7973, 7979), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7976, 7979), False, 'from math import sin, cos\n'), ((7995, 8001), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7998, 8001), False, 'from math import sin, cos\n'), ((8002, 8008), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8005, 8008), False, 'from math import sin, cos\n'), ((8009, 8015), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8012, 8015), False, 'from math import sin, cos\n'), ((8039, 8045), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8042, 8045), False, 'from math import sin, cos\n'), ((8046, 8052), 'math.cos', 'cos', (['g'], {}), '(g)\n', (8049, 8052), False, 'from math import sin, cos\n'), ((8067, 8073), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8070, 8073), False, 'from math import sin, cos\n'), ((8083, 8089), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8086, 8089), False, 'from math import sin, cos\n'), ((8104, 8110), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8107, 8110), False, 'from math import sin, cos\n'), ((7694, 7700), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7697, 7700), False, 'from math import sin, cos\n'), ((7701, 7707), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7704, 7707), False, 'from math import sin, cos\n'), ((7730, 7736), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7733, 7736), False, 'from math import sin, cos\n'), ((7737, 7743), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7740, 7743), False, 'from math import sin, cos\n'), ((7795, 7801), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7798, 7801), False, 'from math import sin, cos\n'), ((7802, 7808), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7805, 7808), False, 'from math import sin, cos\n'), ((7831, 7837), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7834, 7837), False, 'from math import sin, cos\n'), ((7838, 7844), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7841, 7844), False, 'from math import sin, cos\n'), ((7945, 7951), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7948, 7951), False, 'from math import sin, cos\n'), ((7952, 7958), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7955, 7958), False, 'from math import sin, cos\n'), ((7981, 7987), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7984, 7987), False, 'from math import sin, cos\n'), ((7988, 7994), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7991, 7994), False, 'from math import sin, cos\n'), ((8053, 8059), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8056, 8059), False, 'from math import sin, cos\n'), ((8060, 8066), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8063, 8066), False, 'from math import sin, cos\n'), ((8076, 8082), 'math.cos', 'cos', (['g'], {}), '(g)\n', (8079, 8082), False, 'from math import sin, cos\n'), ((8090, 8096), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8093, 8096), False, 'from math import sin, cos\n'), ((8097, 8103), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8100, 8103), False, 'from math import sin, cos\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dataset
Custom dataset.
"""
import numpy as np
from mindspore import dataset as ds
def get_data(num, img_size=(1, 32, 32), num_classes=10, is_onehot=True):
for _ in range(num):
img = np.random.randn(*img_size)
target = np.random.randint(0, num_classes)
target_ret = np.array([target]).astype(np.float32)
if is_onehot:
target_onehot = np.zeros(shape=(num_classes,))
target_onehot[target] = 1
target_ret = target_onehot.astype(np.float32)
yield img.astype(np.float32), target_ret
def create_train_dataset(num_data=32768, batch_size=32, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size, drop_remainder=True)
input_data = input_data.repeat(repeat_size)
return input_data
def create_eval_dataset(num_data=2048, batch_size=2048, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size)
input_data = input_data.repeat(repeat_size)
return input_data
|
[
"numpy.array",
"numpy.random.randint",
"numpy.random.randn",
"numpy.zeros"
] |
[((866, 892), 'numpy.random.randn', 'np.random.randn', (['*img_size'], {}), '(*img_size)\n', (881, 892), True, 'import numpy as np\n'), ((910, 943), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_classes'], {}), '(0, num_classes)\n', (927, 943), True, 'import numpy as np\n'), ((1053, 1083), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_classes,)'}), '(shape=(num_classes,))\n', (1061, 1083), True, 'import numpy as np\n'), ((965, 983), 'numpy.array', 'np.array', (['[target]'], {}), '([target])\n', (973, 983), True, 'import numpy as np\n')]
|
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import time
import multiprocessing
import logging
import numpy as np
import unittest
import matplotlib.pyplot as plt
from home_platform.env import BasicEnvironment
from panda3d.core import LVector3f
TEST_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data")
TEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data", "suncg")
class TestBasicEnvironment(unittest.TestCase):
def testRender(self):
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992",
suncgDatasetRoot=TEST_SUNCG_DATA_DIR, depth=True)
env.agent.setPos(LVector3f(42, -39, 1))
env.agent.setHpr(LVector3f(60.0, 0.0, 0.0))
env.step()
image = env.renderWorld.getRgbImages()['agent-0']
depth = env.renderWorld.getDepthImages(mode='distance')['agent-0']
fig = plt.figure(figsize=(16, 8))
plt.axis("off")
ax = plt.subplot(121)
ax.imshow(image)
ax = plt.subplot(122)
ax.imshow(depth / np.max(depth), cmap='binary')
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
env.destroy()
def testGenerateSpawnPositions(self):
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992",
suncgDatasetRoot=TEST_SUNCG_DATA_DIR, depth=False)
occupancyMap, occupancyMapCoord, positions = env.generateSpawnPositions(
n=10)
xmin, ymin = np.min(occupancyMapCoord, axis=(0, 1))
xmax, ymax = np.max(occupancyMapCoord, axis=(0, 1))
fig = plt.figure()
plt.axis("on")
ax = plt.subplot(111)
ax.imshow(occupancyMap, cmap='gray', extent=[xmin, xmax, ymin, ymax])
ax.scatter(positions[:, 0], positions[:, 1], s=40, c=[1.0, 0.0, 0.0])
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
env.destroy()
def testMultiprocessing(self):
# Spawn new process with independent simulations using the
# multiprocessing module
# Not supported in OSX for now
if sys.platform == 'darwin':
return
nbProcesses = 2
nbSteps = 100
def worker():
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992", suncgDatasetRoot=TEST_SUNCG_DATA_DIR,
depth=False, debug=True)
env.agent.setPos(LVector3f(45, -42, 1))
env.agent.setHpr(LVector3f(45.0, 0.0, 0.0))
# Simulation loop
for _ in range(nbSteps):
env.step()
_ = env.getObservation()
env.destroy()
processes = []
for _ in range(nbProcesses):
p = multiprocessing.Process(target=worker)
processes.append(p)
p.start()
for p in processes:
p.join()
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
np.seterr(all='raise')
unittest.main()
|
[
"logging.basicConfig",
"multiprocessing.Process",
"panda3d.core.LVector3f",
"numpy.min",
"time.sleep",
"numpy.max",
"os.path.realpath",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"home_platform.env.BasicEnvironment",
"unittest.main",
"matplotlib.pyplot.axis",
"numpy.seterr",
"matplotlib.pyplot.show"
] |
[((4506, 4545), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (4525, 4545), False, 'import logging\n'), ((4550, 4572), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (4559, 4572), True, 'import numpy as np\n'), ((4577, 4592), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4590, 4592), False, 'import unittest\n'), ((1820, 1846), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1836, 1846), False, 'import os\n'), ((1919, 1945), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1935, 1945), False, 'import os\n'), ((2061, 2168), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(True)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=True)\n", (2077, 2168), False, 'from home_platform.env import BasicEnvironment\n'), ((2465, 2492), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (2475, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2516), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2509, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2541, 2546), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2601), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2596, 2601), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2687), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2674, 2687), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2711), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (2706, 2711), False, 'import time\n'), ((2720, 2734), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2729, 2734), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2924), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(False)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=False)\n", (2832, 2924), False, 'from home_platform.env import BasicEnvironment\n'), ((3073, 3111), 'numpy.min', 'np.min', (['occupancyMapCoord'], {'axis': '(0, 1)'}), '(occupancyMapCoord, axis=(0, 1))\n', (3079, 3111), True, 'import numpy as np\n'), ((3133, 3171), 'numpy.max', 'np.max', (['occupancyMapCoord'], {'axis': '(0, 1)'}), '(occupancyMapCoord, axis=(0, 1))\n', (3139, 3171), True, 'import numpy as np\n'), ((3187, 3199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3197, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3208, 3222), 'matplotlib.pyplot.axis', 'plt.axis', (['"""on"""'], {}), "('on')\n", (3216, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3247, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3438), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3425, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3462), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (3457, 3462), False, 'import time\n'), ((3471, 3485), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3480, 3485), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2242), 'panda3d.core.LVector3f', 'LVector3f', (['(42)', '(-39)', '(1)'], {}), '(42, -39, 1)\n', (2230, 2242), False, 'from panda3d.core import LVector3f\n'), ((2269, 2294), 'panda3d.core.LVector3f', 'LVector3f', (['(60.0)', '(0.0)', '(0.0)'], {}), '(60.0, 0.0, 0.0)\n', (2278, 2294), False, 'from panda3d.core import LVector3f\n'), ((3830, 3950), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(False)', 'debug': '(True)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=False, debug=True)\n", (3846, 3950), False, 'from home_platform.env import BasicEnvironment\n'), ((4330, 4368), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker'}), '(target=worker)\n', (4353, 4368), False, 'import multiprocessing\n'), ((2628, 2641), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2634, 2641), True, 'import numpy as np\n'), ((4011, 4032), 'panda3d.core.LVector3f', 'LVector3f', (['(45)', '(-42)', '(1)'], {}), '(45, -42, 1)\n', (4020, 4032), False, 'from panda3d.core import LVector3f\n'), ((4063, 4088), 'panda3d.core.LVector3f', 'LVector3f', (['(45.0)', '(0.0)', '(0.0)'], {}), '(45.0, 0.0, 0.0)\n', (4072, 4088), False, 'from panda3d.core import LVector3f\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate dummy data for tests/examples
"""
import numpy as np
def dummy_gauss_image(x=None, y=None,
xhalfrng=1.5, yhalfrng=None, xcen=0.5, ycen=0.9,
xnpts=1024, ynpts=None, xsigma=0.55, ysigma=0.25,
noise=0.3):
"""Create a dummy 2D Gaussian image with noise
Parameters
----------
x, y : 1D arrays (optional)
arrays where to generate the image [None -> generated]
xhalfrng : float (optional)
half range of the X axis [1.5]
yhalfrng : float or None (optional)
half range of the Y axis [None -> xhalfrng]
xcen : float (optional)
X center [0.5]
ycen : float (optional)
Y center [0.9]
xnpts : int (optional)
number of points X [1024]
ynpts : int or None (optional)
number of points Y [None -> xnpts]
xsigma : float (optional)
sigma X [0.55]
ysigma : float (optional)
sigma Y [0.25]
noise : float (optional)
random noise level between 0 and 1 [0.3]
Returns
-------
x, y : 1D arrays
signal : 2D array
"""
if yhalfrng is None:
yhalfrng = xhalfrng
if ycen is None:
ycen = xcen
if ynpts is None:
ynpts = xnpts
if x is None:
x = np.linspace(xcen-xhalfrng, xcen+xhalfrng, xnpts)
if y is None:
y = np.linspace(ycen-yhalfrng, ycen+yhalfrng, ynpts)
xx, yy = np.meshgrid(x, y)
signal = np.exp(-((xx-xcen)**2 / (2*xsigma**2) +
((yy-ycen)**2 / (2*ysigma**2))))
# add noise
signal += noise * np.random.random(size=signal.shape)
return x, y, signal
def dummy_gauss_curve(xhalfrng=15, xcen=5, xnpts=512, xsigma=0.65, noise=0.3):
"""Create a dummy 1D Gaussian curve with noise
Parameters
----------
xhalfrng : float (optional)
half range of the X axis [1.5]
xcen : float (optional)
X center [0.5]
xnpts : int (optional)
number of points X [1024]
xsigma : float (optional)
sigma X [0.55]
noise : float (optional)
random noise level between 0 and 1 [0.3]
Returns
-------
x, signal : 1D arrays
"""
x = np.linspace(xcen-xhalfrng, xcen+xhalfrng, xnpts)
signal = np.exp(-((x-xcen)**2 / (2*xsigma**2)))
# add noise
signal += noise * np.random.random(size=signal.shape)
return x, signal
def main():
"""Show two plot windows with dummy data"""
from silx import sx
sx.enable_gui()
from sloth.gui.plot.plot1D import Plot1D
from sloth.gui.plot.plot2D import Plot2D
p1 = Plot1D()
p2 = Plot2D()
x, y = dummy_gauss_curve()
p1.addCurve(x, y, legend="test dummy Gaussian with noise")
p1.show()
x, y, signal = dummy_gauss_image()
p2.addImage(signal, x=x, y=y, legend="test dummy image")
p2.show()
input("Press enter to close windows")
if __name__ == '__main__':
main()
|
[
"numpy.random.random",
"numpy.exp",
"silx.sx.enable_gui",
"numpy.linspace",
"numpy.meshgrid",
"sloth.gui.plot.plot1D.Plot1D",
"sloth.gui.plot.plot2D.Plot2D"
] |
[((1478, 1495), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1489, 1495), True, 'import numpy as np\n'), ((1509, 1600), 'numpy.exp', 'np.exp', (['(-((xx - xcen) ** 2 / (2 * xsigma ** 2) + (yy - ycen) ** 2 / (2 * ysigma ** 2))\n )'], {}), '(-((xx - xcen) ** 2 / (2 * xsigma ** 2) + (yy - ycen) ** 2 / (2 * \n ysigma ** 2)))\n', (1515, 1600), True, 'import numpy as np\n'), ((2246, 2298), 'numpy.linspace', 'np.linspace', (['(xcen - xhalfrng)', '(xcen + xhalfrng)', 'xnpts'], {}), '(xcen - xhalfrng, xcen + xhalfrng, xnpts)\n', (2257, 2298), True, 'import numpy as np\n'), ((2308, 2354), 'numpy.exp', 'np.exp', (['(-((x - xcen) ** 2 / (2 * xsigma ** 2)))'], {}), '(-((x - xcen) ** 2 / (2 * xsigma ** 2)))\n', (2314, 2354), True, 'import numpy as np\n'), ((2532, 2547), 'silx.sx.enable_gui', 'sx.enable_gui', ([], {}), '()\n', (2545, 2547), False, 'from silx import sx\n'), ((2647, 2655), 'sloth.gui.plot.plot1D.Plot1D', 'Plot1D', ([], {}), '()\n', (2653, 2655), False, 'from sloth.gui.plot.plot1D import Plot1D\n'), ((2665, 2673), 'sloth.gui.plot.plot2D.Plot2D', 'Plot2D', ([], {}), '()\n', (2671, 2673), False, 'from sloth.gui.plot.plot2D import Plot2D\n'), ((1337, 1389), 'numpy.linspace', 'np.linspace', (['(xcen - xhalfrng)', '(xcen + xhalfrng)', 'xnpts'], {}), '(xcen - xhalfrng, xcen + xhalfrng, xnpts)\n', (1348, 1389), True, 'import numpy as np\n'), ((1416, 1468), 'numpy.linspace', 'np.linspace', (['(ycen - yhalfrng)', '(ycen + yhalfrng)', 'ynpts'], {}), '(ycen - yhalfrng, ycen + yhalfrng, ynpts)\n', (1427, 1468), True, 'import numpy as np\n'), ((1642, 1677), 'numpy.random.random', 'np.random.random', ([], {'size': 'signal.shape'}), '(size=signal.shape)\n', (1658, 1677), True, 'import numpy as np\n'), ((2385, 2420), 'numpy.random.random', 'np.random.random', ([], {'size': 'signal.shape'}), '(size=signal.shape)\n', (2401, 2420), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose
from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward
from nnlib.utils.derivative import sigmoid_backward, relu_backward
from nnlib.utils.activation import sigmoid, relu
def test_linear_backward():
rand = RandomState(1)
dZ = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
dA_prev, dW, db = linear_backward(dZ, (A, 1, W), alpha=0, keep_prob=1)
assert_allclose(dA_prev, [
[0.51822968, -0.19517421],
[-0.40506361, 0.15255393],
[2.37496825, -0.89445391]])
assert_allclose(dW, [[-0.10076895, 1.40685096, 1.64992505]])
assert_allclose(db, [[0.50629448]])
def test_linear_backward_activation_sigmoid():
rand = RandomState(2)
dA = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
_ = rand.randn(1, 1) # noqa: F841
Z = rand.randn(1, 2)
dA_prev, dW, db = linear_backward_activation(dA, ((A, 1, W), (Z, sigmoid(Z))), sigmoid_backward, alpha=0, keep_prob=1)
assert_allclose(dA_prev, np.array([
[0.11017994, 0.01105339],
[0.09466817, 0.00949723],
[-0.05743092, -0.00576154]]), rtol=1e-05)
assert_allclose(dW, np.array([[0.10266786, 0.09778551, -0.01968084]]), rtol=1e-05)
assert_allclose(db, np.array([[-0.05729622]]), rtol=1e-05)
def test_linear_backward_activation_relu():
rand = RandomState(2)
dA = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
_ = rand.randn(1, 1) # noqa: F841
Z = rand.randn(1, 2)
dA_prev, dW, db = linear_backward_activation(dA, ((A, 1, W), (Z, relu(Z))), relu_backward, alpha=0, keep_prob=1)
assert_allclose(dA_prev, np.array([
[0.44090989, 0.],
[0.37883606, 0.],
[-0.2298228, 0.]]), rtol=1e-05)
assert_allclose(dW, np.array([[0.44513824, 0.37371418, -0.10478989]]), rtol=1e-05)
assert_allclose(db, np.array([[-0.20837892]]), rtol=1e-05)
def test_model_backward():
rand = RandomState(3)
AL = rand.randn(1, 2)
Y = np.array([[1, 0]])
X = rand.randn(4, 2)
W1 = rand.randn(3, 4)
b1 = rand.randn(3, 1)
Z1 = rand.randn(3, 2)
A1 = rand.randn(3, 2)
W2 = rand.randn(1, 3)
b2 = rand.randn(1, 1)
Z2 = rand.randn(1, 2)
parameters = dict(
W={1: W1, 2: W2},
b={1: b1, 2: b2}
)
caches = dict(
Z={1: Z1, 2: Z2},
A={0: X, 1: A1, 2: sigmoid(Z2)},
D={0: 1, 1: 1}
)
grads = model_backward(AL, Y, parameters, caches, alpha=0, keep_prob=1)
assert_allclose(
grads["dW"][1],
np.array([
[0.41010002, 0.07807203, 0.13798444, 0.10502167],
[0., 0., 0., 0.],
[0.05283652, 0.01005865, 0.01777766, 0.0135308]]),
rtol=1e-05
)
assert_allclose(
grads["db"][1],
np.array([
[-0.22007063],
[0.],
[-0.02835349]])
)
assert_allclose(
grads["dA"][1],
np.array([
[0.12913162, -0.44014127],
[-0.14175655, 0.48317296],
[0.01663708, -0.05670698]]),
rtol=1e-05
)
def test_model_backward_l2_regularization():
random_state = RandomState(1)
X = random_state.randn(3, 5)
Y = np.array([[1, 1, 0, 1, 0]])
cache = (
np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
[-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]),
np.array([[0., 3.32524635, 2.13994541, 2.60700654, 0.],
[0., 4.1600994, 0.79051021, 1.46493512, 0.]]),
np.array([[-1.09989127, -0.17242821, -0.87785842],
[0.04221375, 0.58281521, -1.10061918]]),
np.array([[1.14472371],
[0.90159072]]),
np.array([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
[-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],
[-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]]),
np.array([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]),
np.array([[0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]]),
np.array([[0.53035547],
[-0.69166075],
[-0.39675353]]),
np.array(
[[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]]),
np.array(
[[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]),
np.array([[-0.6871727, -0.84520564, -0.67124613]]),
np.array([[-0.0126646]])
)
Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, _, W3, b3 = cache
parameters = dict(
W={1: W1, 2: W2, 3: W3},
b={1: b1, 2: b2, 3: b3}
)
caches = dict(
Z={1: Z1, 2: Z2, 3: Z3},
A={0: X, 1: A1, 2: A2, 3: sigmoid(Z3)},
D={0: 1, 1: 1, 2: 1}
)
AL = caches["A"][3]
grads = model_backward(AL, Y, parameters, caches, alpha=0.7, keep_prob=1)
dW1 = np.array([[-0.25604646, 0.12298827, - 0.28297129],
[-0.17706303, 0.34536094, - 0.4410571]])
dW2 = np.array([[0.79276486, 0.85133918],
[-0.0957219, - 0.01720463],
[-0.13100772, - 0.03750433]])
dW3 = np.array([[-1.77691347, - 0.11832879, - 0.09397446]])
assert_allclose(grads['dW'][1], dW1)
assert_allclose(grads['dW'][2], dW2, rtol=1e-05)
assert_allclose(grads['dW'][3], dW3)
def test_model_backward_dropout():
random_state = RandomState(1)
X = random_state.randn(3, 5)
Y = np.array([[1, 1, 0, 1, 0]])
cache = (
np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
[-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]),
np.array([[True, False, True, True, True],
[True, True, True, True, False]],
dtype=bool),
np.array([[0., 0., 4.27989081, 5.21401307, 0.],
[0., 8.32019881, 1.58102041, 2.92987024, 0.]]),
np.array([[-1.09989127, -0.17242821, -0.87785842],
[0.04221375, 0.58281521, -1.10061918]]),
np.array([[1.14472371],
[0.90159072]]),
np.array([[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547],
[-0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075],
[-0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]]),
np.array([[True, False, True, False, True],
[False, True, False, True, True],
[False, False, True, False, False]],
dtype=bool),
np.array([[1.06071093, 0., 8.21049603, 0., 1.06071093],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]),
np.array([[0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]]),
np.array([[0.53035547],
[-0.69166075],
[-0.39675353]]),
np.array([[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]]),
np.array([[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]]),
np.array([[-0.6871727, -0.84520564, -0.67124613]]),
np.array([[-0.0126646]])
)
Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3 = cache
parameters = dict(
W={1: W1, 2: W2, 3: W3},
b={1: b1, 2: b2, 3: b3}
)
caches = dict(
Z={1: Z1, 2: Z2, 3: Z3},
A={0: X, 1: A1, 2: A2, 3: sigmoid(Z3)},
D={0: 1, 1: D1, 2: D2}
)
grads = model_backward(A3, Y, parameters, caches, alpha=0, keep_prob=0.8)
dA1 = np.array([[0.36544439, 0., -0.00188233, 0., -0.17408748],
[0.65515713, 0., -0.00337459, 0., -0.]])
dA2 = np.array([[0.58180856, 0., -0.00299679, 0., -0.27715731],
[0., 0.53159854, -0., 0.53159854, -0.34089673],
[0., 0., -0.00292733, 0., -0., ]])
assert_allclose(grads['dA'][1], dA1, rtol=1e-05)
assert_allclose(grads['dA'][2], dA2, rtol=1e-05)
|
[
"nnlib.utils.activation.sigmoid",
"nnlib.l_layer.backward.linear_backward",
"nnlib.utils.activation.relu",
"numpy.testing.assert_allclose",
"numpy.array",
"nnlib.l_layer.backward.model_backward",
"numpy.random.RandomState"
] |
[((351, 365), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (362, 365), False, 'from numpy.random import RandomState\n'), ((465, 517), 'nnlib.l_layer.backward.linear_backward', 'linear_backward', (['dZ', '(A, 1, W)'], {'alpha': '(0)', 'keep_prob': '(1)'}), '(dZ, (A, 1, W), alpha=0, keep_prob=1)\n', (480, 517), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((523, 635), 'numpy.testing.assert_allclose', 'assert_allclose', (['dA_prev', '[[0.51822968, -0.19517421], [-0.40506361, 0.15255393], [2.37496825, -\n 0.89445391]]'], {}), '(dA_prev, [[0.51822968, -0.19517421], [-0.40506361, \n 0.15255393], [2.37496825, -0.89445391]])\n', (538, 635), False, 'from numpy.testing import assert_allclose\n'), ((660, 720), 'numpy.testing.assert_allclose', 'assert_allclose', (['dW', '[[-0.10076895, 1.40685096, 1.64992505]]'], {}), '(dW, [[-0.10076895, 1.40685096, 1.64992505]])\n', (675, 720), False, 'from numpy.testing import assert_allclose\n'), ((725, 760), 'numpy.testing.assert_allclose', 'assert_allclose', (['db', '[[0.50629448]]'], {}), '(db, [[0.50629448]])\n', (740, 760), False, 'from numpy.testing import assert_allclose\n'), ((821, 835), 'numpy.random.RandomState', 'RandomState', (['(2)'], {}), '(2)\n', (832, 835), False, 'from numpy.random import RandomState\n'), ((1464, 1478), 'numpy.random.RandomState', 'RandomState', (['(2)'], {}), '(2)\n', (1475, 1478), False, 'from numpy.random import RandomState\n'), ((2058, 2072), 'numpy.random.RandomState', 'RandomState', (['(3)'], {}), '(3)\n', (2069, 2072), False, 'from numpy.random import RandomState\n'), ((2107, 2125), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (2115, 2125), True, 'import numpy as np\n'), ((2580, 2643), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['AL', 'Y', 'parameters', 'caches'], {'alpha': '(0)', 'keep_prob': '(1)'}), '(AL, Y, parameters, caches, alpha=0, keep_prob=1)\n', (2594, 2643), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((3398, 3412), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (3409, 3412), False, 'from numpy.random import RandomState\n'), ((3454, 3481), 'numpy.array', 'np.array', (['[[1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 1, 0]])\n', (3462, 3481), True, 'import numpy as np\n'), ((5316, 5381), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['AL', 'Y', 'parameters', 'caches'], {'alpha': '(0.7)', 'keep_prob': '(1)'}), '(AL, Y, parameters, caches, alpha=0.7, keep_prob=1)\n', (5330, 5381), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((5393, 5486), 'numpy.array', 'np.array', (['[[-0.25604646, 0.12298827, -0.28297129], [-0.17706303, 0.34536094, -0.4410571]]'], {}), '([[-0.25604646, 0.12298827, -0.28297129], [-0.17706303, 0.34536094,\n -0.4410571]])\n', (5401, 5486), True, 'import numpy as np\n'), ((5517, 5612), 'numpy.array', 'np.array', (['[[0.79276486, 0.85133918], [-0.0957219, -0.01720463], [-0.13100772, -\n 0.03750433]]'], {}), '([[0.79276486, 0.85133918], [-0.0957219, -0.01720463], [-0.13100772,\n -0.03750433]])\n', (5525, 5612), True, 'import numpy as np\n'), ((5662, 5713), 'numpy.array', 'np.array', (['[[-1.77691347, -0.11832879, -0.09397446]]'], {}), '([[-1.77691347, -0.11832879, -0.09397446]])\n', (5670, 5713), True, 'import numpy as np\n'), ((5721, 5757), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][1]", 'dW1'], {}), "(grads['dW'][1], dW1)\n", (5736, 5757), False, 'from numpy.testing import assert_allclose\n'), ((5762, 5810), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][2]", 'dW2'], {'rtol': '(1e-05)'}), "(grads['dW'][2], dW2, rtol=1e-05)\n", (5777, 5810), False, 'from numpy.testing import assert_allclose\n'), ((5815, 5851), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][3]", 'dW3'], {}), "(grads['dW'][3], dW3)\n", (5830, 5851), False, 'from numpy.testing import assert_allclose\n'), ((5908, 5922), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (5919, 5922), False, 'from numpy.random import RandomState\n'), ((5964, 5991), 'numpy.array', 'np.array', (['[[1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 1, 0]])\n', (5972, 5991), True, 'import numpy as np\n'), ((8048, 8113), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['A3', 'Y', 'parameters', 'caches'], {'alpha': '(0)', 'keep_prob': '(0.8)'}), '(A3, Y, parameters, caches, alpha=0, keep_prob=0.8)\n', (8062, 8113), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((8125, 8233), 'numpy.array', 'np.array', (['[[0.36544439, 0.0, -0.00188233, 0.0, -0.17408748], [0.65515713, 0.0, -\n 0.00337459, 0.0, -0.0]]'], {}), '([[0.36544439, 0.0, -0.00188233, 0.0, -0.17408748], [0.65515713, \n 0.0, -0.00337459, 0.0, -0.0]])\n', (8133, 8233), True, 'import numpy as np\n'), ((8254, 8409), 'numpy.array', 'np.array', (['[[0.58180856, 0.0, -0.00299679, 0.0, -0.27715731], [0.0, 0.53159854, -0.0, \n 0.53159854, -0.34089673], [0.0, 0.0, -0.00292733, 0.0, -0.0]]'], {}), '([[0.58180856, 0.0, -0.00299679, 0.0, -0.27715731], [0.0, \n 0.53159854, -0.0, 0.53159854, -0.34089673], [0.0, 0.0, -0.00292733, 0.0,\n -0.0]])\n', (8262, 8409), True, 'import numpy as np\n'), ((8442, 8490), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dA'][1]", 'dA1'], {'rtol': '(1e-05)'}), "(grads['dA'][1], dA1, rtol=1e-05)\n", (8457, 8490), False, 'from numpy.testing import assert_allclose\n'), ((8495, 8543), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dA'][2]", 'dA2'], {'rtol': '(1e-05)'}), "(grads['dA'][2], dA2, rtol=1e-05)\n", (8510, 8543), False, 'from numpy.testing import assert_allclose\n'), ((1128, 1222), 'numpy.array', 'np.array', (['[[0.11017994, 0.01105339], [0.09466817, 0.00949723], [-0.05743092, -0.00576154]\n ]'], {}), '([[0.11017994, 0.01105339], [0.09466817, 0.00949723], [-0.05743092,\n -0.00576154]])\n', (1136, 1222), True, 'import numpy as np\n'), ((1281, 1330), 'numpy.array', 'np.array', (['[[0.10266786, 0.09778551, -0.01968084]]'], {}), '([[0.10266786, 0.09778551, -0.01968084]])\n', (1289, 1330), True, 'import numpy as np\n'), ((1368, 1393), 'numpy.array', 'np.array', (['[[-0.05729622]]'], {}), '([[-0.05729622]])\n', (1376, 1393), True, 'import numpy as np\n'), ((1765, 1832), 'numpy.array', 'np.array', (['[[0.44090989, 0.0], [0.37883606, 0.0], [-0.2298228, 0.0]]'], {}), '([[0.44090989, 0.0], [0.37883606, 0.0], [-0.2298228, 0.0]])\n', (1773, 1832), True, 'import numpy as np\n'), ((1892, 1941), 'numpy.array', 'np.array', (['[[0.44513824, 0.37371418, -0.10478989]]'], {}), '([[0.44513824, 0.37371418, -0.10478989]])\n', (1900, 1941), True, 'import numpy as np\n'), ((1979, 2004), 'numpy.array', 'np.array', (['[[-0.20837892]]'], {}), '([[-0.20837892]])\n', (1987, 2004), True, 'import numpy as np\n'), ((2706, 2841), 'numpy.array', 'np.array', (['[[0.41010002, 0.07807203, 0.13798444, 0.10502167], [0.0, 0.0, 0.0, 0.0], [\n 0.05283652, 0.01005865, 0.01777766, 0.0135308]]'], {}), '([[0.41010002, 0.07807203, 0.13798444, 0.10502167], [0.0, 0.0, 0.0,\n 0.0], [0.05283652, 0.01005865, 0.01777766, 0.0135308]])\n', (2714, 2841), True, 'import numpy as np\n'), ((2982, 3029), 'numpy.array', 'np.array', (['[[-0.22007063], [0.0], [-0.02835349]]'], {}), '([[-0.22007063], [0.0], [-0.02835349]])\n', (2990, 3029), True, 'import numpy as np\n'), ((3153, 3248), 'numpy.array', 'np.array', (['[[0.12913162, -0.44014127], [-0.14175655, 0.48317296], [0.01663708, -\n 0.05670698]]'], {}), '([[0.12913162, -0.44014127], [-0.14175655, 0.48317296], [0.01663708,\n -0.05670698]])\n', (3161, 3248), True, 'import numpy as np\n'), ((3504, 3646), 'numpy.array', 'np.array', (['[[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [-\n 1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]'], {}), '([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [\n -1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])\n', (3512, 3646), True, 'import numpy as np\n'), ((3675, 3785), 'numpy.array', 'np.array', (['[[0.0, 3.32524635, 2.13994541, 2.60700654, 0.0], [0.0, 4.1600994, \n 0.79051021, 1.46493512, 0.0]]'], {}), '([[0.0, 3.32524635, 2.13994541, 2.60700654, 0.0], [0.0, 4.1600994, \n 0.79051021, 1.46493512, 0.0]])\n', (3683, 3785), True, 'import numpy as np\n'), ((3812, 3906), 'numpy.array', 'np.array', (['[[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521, -1.10061918]\n ]'], {}), '([[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521,\n -1.10061918]])\n', (3820, 3906), True, 'import numpy as np\n'), ((3931, 3969), 'numpy.array', 'np.array', (['[[1.14472371], [0.90159072]]'], {}), '([[1.14472371], [0.90159072]])\n', (3939, 3969), True, 'import numpy as np\n'), ((3997, 4213), 'numpy.array', 'np.array', (['[[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [-0.69166075,\n -3.47645987, -2.25194702, -2.65416996, -0.69166075], [-0.39675353, -\n 4.62285846, -2.61101729, -3.22874921, -0.39675353]]'], {}), '([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [-\n 0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075], [-\n 0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]])\n', (4005, 4213), True, 'import numpy as np\n'), ((4253, 4384), 'numpy.array', 'np.array', (['[[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [0.0, 0.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [\n 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (4261, 4384), True, 'import numpy as np\n'), ((4427, 4524), 'numpy.array', 'np.array', (['[[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-0.93576943, -\n 0.26788808]]'], {}), '([[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-\n 0.93576943, -0.26788808]])\n', (4435, 4524), True, 'import numpy as np\n'), ((4566, 4620), 'numpy.array', 'np.array', (['[[0.53035547], [-0.69166075], [-0.39675353]]'], {}), '([[0.53035547], [-0.69166075], [-0.39675353]])\n', (4574, 4620), True, 'import numpy as np\n'), ((4666, 4741), 'numpy.array', 'np.array', (['[[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]]'], {}), '([[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]])\n', (4674, 4741), True, 'import numpy as np\n'), ((4764, 4836), 'numpy.array', 'np.array', (['[[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]'], {}), '([[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])\n', (4772, 4836), True, 'import numpy as np\n'), ((4863, 4913), 'numpy.array', 'np.array', (['[[-0.6871727, -0.84520564, -0.67124613]]'], {}), '([[-0.6871727, -0.84520564, -0.67124613]])\n', (4871, 4913), True, 'import numpy as np\n'), ((4923, 4947), 'numpy.array', 'np.array', (['[[-0.0126646]]'], {}), '([[-0.0126646]])\n', (4931, 4947), True, 'import numpy as np\n'), ((6014, 6156), 'numpy.array', 'np.array', (['[[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [-\n 1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]'], {}), '([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [\n -1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])\n', (6022, 6156), True, 'import numpy as np\n'), ((6179, 6271), 'numpy.array', 'np.array', (['[[True, False, True, True, True], [True, True, True, True, False]]'], {'dtype': 'bool'}), '([[True, False, True, True, True], [True, True, True, True, False]],\n dtype=bool)\n', (6187, 6271), True, 'import numpy as np\n'), ((6312, 6416), 'numpy.array', 'np.array', (['[[0.0, 0.0, 4.27989081, 5.21401307, 0.0], [0.0, 8.32019881, 1.58102041, \n 2.92987024, 0.0]]'], {}), '([[0.0, 0.0, 4.27989081, 5.21401307, 0.0], [0.0, 8.32019881, \n 1.58102041, 2.92987024, 0.0]])\n', (6320, 6416), True, 'import numpy as np\n'), ((6434, 6528), 'numpy.array', 'np.array', (['[[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521, -1.10061918]\n ]'], {}), '([[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521,\n -1.10061918]])\n', (6442, 6528), True, 'import numpy as np\n'), ((6553, 6591), 'numpy.array', 'np.array', (['[[1.14472371], [0.90159072]]'], {}), '([[1.14472371], [0.90159072]])\n', (6561, 6591), True, 'import numpy as np\n'), ((6619, 6834), 'numpy.array', 'np.array', (['[[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547], [-0.69166075,\n -1.71413186, -3.81223329, -4.61667916, -0.69166075], [-0.39675353, -\n 2.62563561, -4.82528105, -6.0607449, -0.39675353]]'], {}), '([[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547], [-\n 0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075], [-\n 0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]])\n', (6627, 6834), True, 'import numpy as np\n'), ((6870, 7001), 'numpy.array', 'np.array', (['[[True, False, True, False, True], [False, True, False, True, True], [False,\n False, True, False, False]]'], {'dtype': 'bool'}), '([[True, False, True, False, True], [False, True, False, True, True\n ], [False, False, True, False, False]], dtype=bool)\n', (6878, 7001), True, 'import numpy as np\n'), ((7059, 7176), 'numpy.array', 'np.array', (['[[1.06071093, 0.0, 8.21049603, 0.0, 1.06071093], [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.06071093, 0.0, 8.21049603, 0.0, 1.06071093], [0.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (7067, 7176), True, 'import numpy as np\n'), ((7205, 7302), 'numpy.array', 'np.array', (['[[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-0.93576943, -\n 0.26788808]]'], {}), '([[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-\n 0.93576943, -0.26788808]])\n', (7213, 7302), True, 'import numpy as np\n'), ((7343, 7397), 'numpy.array', 'np.array', (['[[0.53035547], [-0.69166075], [-0.39675353]]'], {}), '([[0.53035547], [-0.69166075], [-0.39675353]])\n', (7351, 7397), True, 'import numpy as np\n'), ((7443, 7516), 'numpy.array', 'np.array', (['[[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]]'], {}), '([[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]])\n', (7451, 7516), True, 'import numpy as np\n'), ((7526, 7598), 'numpy.array', 'np.array', (['[[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]]'], {}), '([[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]])\n', (7534, 7598), True, 'import numpy as np\n'), ((7608, 7658), 'numpy.array', 'np.array', (['[[-0.6871727, -0.84520564, -0.67124613]]'], {}), '([[-0.6871727, -0.84520564, -0.67124613]])\n', (7616, 7658), True, 'import numpy as np\n'), ((7668, 7692), 'numpy.array', 'np.array', (['[[-0.0126646]]'], {}), '([[-0.0126646]])\n', (7676, 7692), True, 'import numpy as np\n'), ((1045, 1055), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z'], {}), '(Z)\n', (1052, 1055), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((1688, 1695), 'nnlib.utils.activation.relu', 'relu', (['Z'], {}), '(Z)\n', (1692, 1695), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((2512, 2523), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z2'], {}), '(Z2)\n', (2519, 2523), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((5218, 5229), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z3'], {}), '(Z3)\n', (5225, 5229), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((7972, 7983), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z3'], {}), '(Z3)\n', (7979, 7983), False, 'from nnlib.utils.activation import sigmoid, relu\n')]
|
from typing import Mapping, Any, Sequence
import numpy as np
import heapq
import math
from tqdm import tqdm
import scipy.optimize
import cvxpy as cvx
def n_bias(x_count: np.ndarray, bias: float):
# return np.sum(x_count[x_count >= bias])
clipped = np.clip(x_count - bias, a_min=0, a_max=None)
return np.sum(clipped)
def cost(bs, ns):
return np.sum(bs) ** 2 + (1 / 4) * (np.sum(ns ** 2))
def opt_cvx(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(sizes)
Bs = cvx.Variable(n)
constraints = [
Bs >= 0
]
term2 = 0
for i in range(n):
x_count = x_counts[i]
size = sizes[i]
term2 += cvx.square(cvx.sum(cvx.pos(x_count - Bs[i])) / size)
o = cvx.Minimize(
4 * cvx.square(cvx.sum(Bs)) + term2
)
prob = cvx.Problem(o, constraints)
sol = prob.solve(solver=cvx.ECOS)
b_values = Bs.value
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], b_values[i]) / sizes[i]
print("Cost: {}".format(cost(b_values, n_adj)))
return np.round(b_values)
def n_deriv(x_count, bias, nraw=1, s=1):
return nraw/s**2 * np.sum(x_count >= bias)
base = 2.0
def convert_to_bs(b_pows):
bs = base**b_pows
if isinstance(bs, np.ndarray):
bs[bs < 1] = 0
else:
if bs < 1:
bs = 0
# bs = np.floor(2.0 ** b_pows)
return bs
def opt_sequence_2(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(x_counts)
bs = np.zeros(n)
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], bs[i]) / sizes[i]
pq = []
for s_idx in range(len(x_counts)):
n_raw = n_adj[s_idx] * sizes[s_idx]
heapq.heappush(
pq,
(-n_deriv(x_counts[s_idx], bs[s_idx], nraw=n_raw, s=sizes[s_idx]), s_idx)
)
print("Optimizing Bias")
for cur_iter in tqdm(range(n_iter)):
_, opt_idx = heapq.heappop(pq)
# opt_idx = cur_iter % 3
# print("bs:{}".format(bs))
# print("ns:{}".format(n_adj))
# print("cost: {}".format(old_cost))
old_cost = cost(bs, n_adj)
def cost_b_fun(b):
new_bs = bs.copy()
new_adj = n_adj.copy()
new_bs[opt_idx] = b
new_adj[opt_idx] = n_bias(x_counts[opt_idx], b) / sizes[opt_idx]
return cost(new_bs, new_adj)
max_b = np.sum(x_counts[opt_idx])/sizes[opt_idx]
bracket = None
if bs[opt_idx] > 0:
bracket = (0, bs[opt_idx], max_b)
res = scipy.optimize.minimize_scalar(
cost_b_fun,
bracket=bracket,
bounds=(0, max_b),
tol=0.1
)
best_b = res.x
print("best b: {}".format(best_b))
new_cost = res.fun
print("Old Cost: {}".format(old_cost))
print("New Cost: {}".format(new_cost))
# if (new_cost > old_cost*.98):
# break
bs[opt_idx] = best_b
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
n_raw = n_adj[opt_idx] * sizes[opt_idx]
heapq.heappush(
pq,
(-n_deriv(x_counts[opt_idx], bs[opt_idx], nraw=n_raw, s=sizes[opt_idx]), opt_idx)
)
print("Heap: {}".format(pq))
return bs
def opt_sequence(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(x_counts)
b_pows = np.zeros(n) - 1
bs = convert_to_bs(b_pows)
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], bs[i]) / sizes[i]
pq = []
for s_idx in range(len(x_counts)):
heapq.heappush(
pq,
(-n_deriv(x_counts[s_idx], bs[s_idx]), s_idx)
)
shifts = np.array([-1, 0, 1])
print("Optimizing Bias")
for cur_iter in tqdm(range(n_iter)):
_, opt_idx = heapq.heappop(pq)
# print("bs:{}".format(bs))
# print("ns:{}".format(n_adj))
# print("cost: {}".format(old_cost))
new_costs = np.zeros(3)
for shift_idx, cur_shift in enumerate(shifts):
cur_b_pow = b_pows[opt_idx] + cur_shift
bs[opt_idx] = convert_to_bs(cur_b_pow)
# bs[opt_idx] = math.floor(2.0 ** cur_b_pow)
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
new_costs[shift_idx] = cost(bs, n_adj)
# print("i:{},b:{},deltas:{}".format(opt_idx, cur_b_pow, new_costs - old_cost))
best_shift_idx = np.argmin(new_costs)
print("New Cost: {}".format(new_costs[best_shift_idx]))
b_pows[opt_idx] += shifts[best_shift_idx]
# bs[opt_idx] = math.floor(2.0 ** b_pows[opt_idx])
bs[opt_idx] = convert_to_bs(b_pows[opt_idx])
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
if shifts[best_shift_idx] == 0:
break
heapq.heappush(
pq,
(-n_deriv(x_counts[opt_idx], bs[opt_idx]), opt_idx)
)
return bs
|
[
"numpy.clip",
"cvxpy.Variable",
"cvxpy.Problem",
"cvxpy.pos",
"cvxpy.sum",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"heapq.heappop",
"numpy.argmin",
"numpy.round"
] |
[((263, 307), 'numpy.clip', 'np.clip', (['(x_count - bias)'], {'a_min': '(0)', 'a_max': 'None'}), '(x_count - bias, a_min=0, a_max=None)\n', (270, 307), True, 'import numpy as np\n'), ((319, 334), 'numpy.sum', 'np.sum', (['clipped'], {}), '(clipped)\n', (325, 334), True, 'import numpy as np\n'), ((565, 580), 'cvxpy.Variable', 'cvx.Variable', (['n'], {}), '(n)\n', (577, 580), True, 'import cvxpy as cvx\n'), ((867, 894), 'cvxpy.Problem', 'cvx.Problem', (['o', 'constraints'], {}), '(o, constraints)\n', (878, 894), True, 'import cvxpy as cvx\n'), ((969, 980), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (977, 980), True, 'import numpy as np\n'), ((1130, 1148), 'numpy.round', 'np.round', (['b_values'], {}), '(b_values)\n', (1138, 1148), True, 'import numpy as np\n'), ((1620, 1631), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1628, 1631), True, 'import numpy as np\n'), ((1644, 1655), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1652, 1655), True, 'import numpy as np\n'), ((3657, 3668), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3665, 3668), True, 'import numpy as np\n'), ((3923, 3943), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (3931, 3943), True, 'import numpy as np\n'), ((1216, 1239), 'numpy.sum', 'np.sum', (['(x_count >= bias)'], {}), '(x_count >= bias)\n', (1222, 1239), True, 'import numpy as np\n'), ((2060, 2077), 'heapq.heappop', 'heapq.heappop', (['pq'], {}), '(pq)\n', (2073, 2077), False, 'import heapq\n'), ((3598, 3609), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3606, 3609), True, 'import numpy as np\n'), ((4035, 4052), 'heapq.heappop', 'heapq.heappop', (['pq'], {}), '(pq)\n', (4048, 4052), False, 'import heapq\n'), ((4202, 4213), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4210, 4213), True, 'import numpy as np\n'), ((4684, 4704), 'numpy.argmin', 'np.argmin', (['new_costs'], {}), '(new_costs)\n', (4693, 4704), True, 'import numpy as np\n'), ((366, 376), 'numpy.sum', 'np.sum', (['bs'], {}), '(bs)\n', (372, 376), True, 'import numpy as np\n'), ((395, 410), 'numpy.sum', 'np.sum', (['(ns ** 2)'], {}), '(ns ** 2)\n', (401, 410), True, 'import numpy as np\n'), ((2534, 2559), 'numpy.sum', 'np.sum', (['x_counts[opt_idx]'], {}), '(x_counts[opt_idx])\n', (2540, 2559), True, 'import numpy as np\n'), ((750, 774), 'cvxpy.pos', 'cvx.pos', (['(x_count - Bs[i])'], {}), '(x_count - Bs[i])\n', (757, 774), True, 'import cvxpy as cvx\n'), ((829, 840), 'cvxpy.sum', 'cvx.sum', (['Bs'], {}), '(Bs)\n', (836, 840), True, 'import cvxpy as cvx\n')]
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import sys
plt.ion()
data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))
valid_data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))
for fname in data_files:
data = np.loadtxt(fname).reshape(-1, 3)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 2], label=name)
for fname in valid_data_files:
data = np.loadtxt(fname).reshape(-1, 2)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 1], label=name)
plt.legend(loc=1)
raw_input('Press Enter.')
|
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ion",
"numpy.loadtxt",
"glob.glob"
] |
[((75, 84), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (82, 84), True, 'import matplotlib.pyplot as plt\n'), ((527, 544), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (537, 544), True, 'import matplotlib.pyplot as plt\n'), ((104, 153), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '/mnist_net_*_train.log')"], {}), "(sys.argv[1] + '/mnist_net_*_train.log')\n", (113, 153), False, 'import glob\n'), ((177, 226), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '/mnist_net_*_valid.log')"], {}), "(sys.argv[1] + '/mnist_net_*_valid.log')\n", (186, 226), False, 'import glob\n'), ((326, 374), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '(1 - data[:, 2])'], {'label': 'name'}), '(data[:, 0], 1 - data[:, 2], label=name)\n', (334, 374), True, 'import matplotlib.pyplot as plt\n'), ((479, 527), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '(1 - data[:, 1])'], {'label': 'name'}), '(data[:, 0], 1 - data[:, 1], label=name)\n', (487, 527), True, 'import matplotlib.pyplot as plt\n'), ((261, 278), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (271, 278), True, 'import numpy as np\n'), ((414, 431), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (424, 431), True, 'import numpy as np\n')]
|
import argparse
import glob
import os
import random
import re
from dataclasses import dataclass
from functools import partial
from math import ceil
from typing import List, Optional
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import util
tqdm.monitor_interval = 0
tqdm = partial(tqdm, bar_format="{l_bar}{r_bar}")
TRAIN = "train"
DEV = "dev"
TEST = "test"
class Optimizer(util.NamedEnum):
sgd = "sgd"
adadelta = "adadelta"
adam = "adam"
amsgrad = "amsgrad"
class Scheduler(util.NamedEnum):
reducewhenstuck = "reducewhenstuck"
warmupinvsqr = "warmupinvsqr"
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
@dataclass
class Evaluation:
filepath: str
devloss: float
evaluation_result: Optional[List[util.Eval]]
class BaseTrainer(object):
"""docstring for Trainer."""
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser()
self.set_args()
self.params = self.get_params()
util.maybe_mkdir(self.params.model)
self.logger = util.get_logger(
self.params.model + ".log", log_level=self.params.loglevel
)
for key, value in vars(self.params).items():
self.logger.info("command line argument: %s - %r", key, value)
setup_seed(self.params.seed)
self.data = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.optimizer = None
self.min_lr = 0
self.scheduler = None
self.evaluator = None
self.global_steps = 0
self.last_devloss = float("inf")
self.models: List[Evaluation] = list()
def set_args(self):
"""
get_args
"""
# fmt: off
parser = self.parser
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--train', required=True, type=str, nargs='+')
parser.add_argument('--dev', required=True, type=str, nargs='+')
parser.add_argument('--test', default=None, type=str, nargs='+')
parser.add_argument('--model', required=True, help='dump model filename')
parser.add_argument('--load', default='', help='load model and continue training; with `smart`, recover training automatically')
parser.add_argument('--bs', default=20, type=int, help='training batch size')
parser.add_argument('--epochs', default=20, type=int, help='maximum training epochs')
parser.add_argument('--max_steps', default=0, type=int, help='maximum training steps')
parser.add_argument('--warmup_steps', default=4000, type=int, help='number of warm up steps')
parser.add_argument('--total_eval', default=-1, type=int, help='total number of evaluation')
parser.add_argument('--optimizer', default=Optimizer.adam, type=Optimizer, choices=list(Optimizer))
parser.add_argument('--scheduler', default=Scheduler.reducewhenstuck, type=Scheduler, choices=list(Scheduler))
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--min_lr', default=1e-5, type=float, help='minimum learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum of SGD')
parser.add_argument('--beta1', default=0.9, type=float, help='beta1 of Adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 of Adam')
parser.add_argument('--estop', default=1e-8, type=float, help='early stopping criterion')
parser.add_argument('--cooldown', default=0, type=int, help='cooldown of `ReduceLROnPlateau`')
parser.add_argument('--patience', default=0, type=int, help='patience of `ReduceLROnPlateau`')
parser.add_argument('--discount_factor', default=0.5, type=float, help='discount factor of `ReduceLROnPlateau`')
parser.add_argument('--max_norm', default=0, type=float, help='gradient clipping max norm')
parser.add_argument('--gpuid', default=[], nargs='+', type=int, help='choose which GPU to use')
parser.add_argument('--loglevel', default='info', choices=['info', 'debug'])
parser.add_argument('--saveall', default=False, action='store_true', help='keep all models')
parser.add_argument('--shuffle', default=False, action='store_true', help='shuffle the data')
parser.add_argument('--cleanup_anyway', default=False, action='store_true', help='cleanup anyway')
# fmt: on
def get_params(self):
return self.parser.parse_args()
def checklist_before_run(self):
assert self.data is not None, "call load_data before run"
assert self.model is not None, "call build_model before run"
assert self.optimizer is not None, "call setup_training before run"
assert self.scheduler is not None, "call setup_scheduler before run"
assert self.evaluator is not None, "call setup_evalutator before run"
def load_data(self, dataset, train, dev, test):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def load_model(self, model):
assert self.model is None
self.logger.info("load model in %s", model)
self.model = torch.load(model, map_location=self.device)
self.model = self.model.to(self.device)
epoch = int(model.split("_")[-1])
return epoch
def smart_load_model(self, model_prefix):
assert self.model is None
models = []
for model in glob.glob(f"{model_prefix}.nll*"):
res = re.findall(r"\w*_\d+\.?\d*", model[len(model_prefix) :])
loss_ = res[0].split("_")
evals_ = res[1:-1]
epoch_ = res[-1].split("_")
assert loss_[0] == "nll" and epoch_[0] == "epoch"
loss, epoch = float(loss_[1]), int(epoch_[1])
evals = []
for ev in evals_:
ev = ev.split("_")
evals.append(util.Eval(ev[0], ev[0], float(ev[1])))
models.append((epoch, Evaluation(model, loss, evals)))
self.models = [x[1] for x in sorted(models)]
return self.load_model(self.models[-1].filepath)
def setup_training(self):
assert self.model is not None
params = self.params
if params.optimizer == Optimizer.sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), params.lr, momentum=params.momentum
)
elif params.optimizer == Optimizer.adadelta:
self.optimizer = torch.optim.Adadelta(self.model.parameters(), params.lr)
elif params.optimizer == Optimizer.adam:
self.optimizer = torch.optim.Adam(
self.model.parameters(), params.lr, betas=(params.beta1, params.beta2)
)
elif params.optimizer == Optimizer.amsgrad:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
params.lr,
betas=(params.beta1, params.beta2),
amsgrad=True,
)
else:
raise ValueError
self.min_lr = params.min_lr
if params.scheduler == Scheduler.reducewhenstuck:
self.scheduler = ReduceLROnPlateau(
self.optimizer,
"min",
patience=params.patience,
cooldown=params.cooldown,
factor=params.discount_factor,
min_lr=params.min_lr,
)
elif params.scheduler == Scheduler.warmupinvsqr:
self.scheduler = util.WarmupInverseSquareRootSchedule(
self.optimizer, params.warmup_steps
)
else:
raise ValueError
def save_training(self, model_fp):
save_objs = (self.optimizer.state_dict(), self.scheduler.state_dict())
torch.save(save_objs, f"{model_fp}.progress")
def load_training(self, model_fp):
assert self.model is not None
if os.path.isfile(f"{model_fp}.progress"):
optimizer_state, scheduler_state = torch.load(f"{model_fp}.progress")
self.optimizer.load_state_dict(optimizer_state)
self.scheduler.load_state_dict(scheduler_state)
else:
self.logger.warning("cannot find optimizer & scheduler file")
def setup_evalutator(self):
raise NotImplementedError
def get_lr(self):
if isinstance(self.scheduler, ReduceLROnPlateau):
return self.optimizer.param_groups[0]["lr"]
try:
return self.scheduler.get_last_lr()[0]
except AttributeError:
return self.scheduler.get_lr()[0]
def train(self, epoch_idx, batch_size, max_norm):
logger, model = self.logger, self.model
logger.info("At %d-th epoch with lr %f.", epoch_idx, self.get_lr())
model.train()
sampler, nb_batch = self.iterate_batch(TRAIN, batch_size)
losses, cnt = 0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss = model.get_loss(batch)
self.optimizer.zero_grad()
loss.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
logger.debug(
"loss %f with total grad norm %f",
loss,
util.grad_norm(model.parameters()),
)
self.optimizer.step()
if not isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step()
self.global_steps += 1
losses += loss.item()
cnt += 1
loss = losses / cnt
self.logger.info(f"Running average train loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_batch(self, mode, batch_size):
if mode == TRAIN:
return (self.data.train_batch_sample, ceil(self.data.nb_train / batch_size))
elif mode == DEV:
return (self.data.dev_batch_sample, ceil(self.data.nb_dev / batch_size))
elif mode == TEST:
return (self.data.test_batch_sample, ceil(self.data.nb_test / batch_size))
else:
raise ValueError(f"wrong mode: {mode}")
def calc_loss(self, mode, batch_size, epoch_idx) -> float:
self.model.eval()
sampler, nb_batch = self.iterate_batch(mode, batch_size)
loss, cnt = 0.0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss += self.model.get_loss(batch).item()
cnt += 1
loss = loss / cnt
self.logger.info(f"Average {mode} loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_instance(self, mode):
if mode == TRAIN:
return self.data.train_sample, self.data.nb_train
elif mode == DEV:
return self.data.dev_sample, self.data.nb_dev
elif mode == TEST:
return self.data.test_sample, self.data.nb_test
else:
raise ValueError(f"wrong mode: {mode}")
def evaluate(self, mode, epoch_idx, decode_fn) -> List[util.Eval]:
raise NotImplementedError
def decode(self, mode, write_fp, decode_fn):
raise NotImplementedError
def update_lr_and_stop_early(self, epoch_idx, devloss, estop):
stop_early = True
if isinstance(self.scheduler, ReduceLROnPlateau):
prev_lr = self.get_lr()
self.scheduler.step(devloss)
curr_lr = self.get_lr()
if (
self.last_devloss - devloss
) < estop and prev_lr == curr_lr == self.min_lr:
self.logger.info(
"Early stopping triggered with epoch %d (previous dev loss: %f, current: %f)",
epoch_idx,
self.last_devloss,
devloss,
)
stop_status = stop_early
else:
stop_status = not stop_early
self.last_devloss = devloss
else:
stop_status = not stop_early
return stop_status
def save_model(
self, epoch_idx, devloss: float, eval_res: List[util.Eval], model_fp
):
eval_tag = "".join(["{}_{}.".format(e.desc, e.res) for e in eval_res])
fp = f"{model_fp}.nll_{devloss:.4f}.{eval_tag}epoch_{epoch_idx}"
torch.save(self.model, fp)
self.models.append(Evaluation(fp, devloss, eval_res))
def select_model(self):
raise NotImplementedError
def reload_and_test(self, model_fp, best_fp, bs, decode_fn):
self.model = None
self.logger.info(f"loading {best_fp} for testing")
self.load_model(best_fp)
self.calc_loss(DEV, bs, -1)
self.logger.info("decoding dev set")
self.decode(DEV, f"{model_fp}.decode", decode_fn)
results = self.evaluate(DEV, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'DEV {model_fp.split("/")[-1]} {results}')
if self.data.test_file is not None:
self.calc_loss(TEST, bs, -1)
self.logger.info("decoding test set")
self.decode(TEST, f"{model_fp}.decode", decode_fn)
results = self.evaluate(TEST, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'TEST {model_fp.split("/")[-1]} {results}')
def cleanup(self, saveall, save_fps, model_fp):
if not saveall:
for model in self.models:
if model.filepath in save_fps:
continue
os.remove(model.filepath)
os.remove(f"{model_fp}.progress")
def run(self, start_epoch, decode_fn=None):
"""
helper for training
"""
self.checklist_before_run()
finish = False
params = self.params
steps_per_epoch = ceil(self.data.nb_train / params.bs)
if params.max_steps > 0:
max_epochs = ceil(params.max_steps / steps_per_epoch)
else:
max_epochs = params.epochs
params.max_steps = max_epochs * steps_per_epoch
self.logger.info(
f"maximum training {params.max_steps} steps ({max_epochs} epochs)"
)
if params.total_eval > 0:
eval_every = max(max_epochs // params.total_eval, 1)
else:
eval_every = 1
self.logger.info(f"evaluate every {eval_every} epochs")
for epoch_idx in range(start_epoch, max_epochs):
self.train(epoch_idx, params.bs, params.max_norm)
if not (
epoch_idx
and (epoch_idx % eval_every == 0 or epoch_idx + 1 == max_epochs)
):
continue
with torch.no_grad():
devloss = self.calc_loss(DEV, params.bs, epoch_idx)
eval_res = self.evaluate(DEV, epoch_idx, decode_fn)
if self.update_lr_and_stop_early(epoch_idx, devloss, params.estop):
finish = True
break
self.save_model(epoch_idx, devloss, eval_res, params.model)
self.save_training(params.model)
if finish or params.cleanup_anyway:
best_fp, save_fps = self.select_model()
with torch.no_grad():
self.reload_and_test(params.model, best_fp, params.bs, decode_fn)
self.cleanup(params.saveall, save_fps, params.model)
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"util.maybe_mkdir",
"math.ceil",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"argparse.ArgumentParser",
"util.get_logger",
"torch.load",
"random.seed",
"os.path.isfile",
"torch.cuda.is_available",
"functools.partial",
"numpy.random.seed",
"torch.save",
"util.WarmupInverseSquareRootSchedule",
"torch.no_grad",
"glob.glob",
"os.remove"
] |
[((340, 382), 'functools.partial', 'partial', (['tqdm'], {'bar_format': '"""{l_bar}{r_bar}"""'}), "(tqdm, bar_format='{l_bar}{r_bar}')\n", (347, 382), False, 'from functools import partial\n'), ((682, 699), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (693, 699), False, 'import random\n'), ((704, 724), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (718, 724), True, 'import numpy as np\n'), ((729, 752), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (746, 752), False, 'import torch\n'), ((760, 785), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (783, 785), False, 'import torch\n'), ((795, 827), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (821, 827), False, 'import torch\n'), ((1081, 1106), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1104, 1106), False, 'import argparse\n'), ((1180, 1215), 'util.maybe_mkdir', 'util.maybe_mkdir', (['self.params.model'], {}), '(self.params.model)\n', (1196, 1215), False, 'import util\n'), ((1238, 1313), 'util.get_logger', 'util.get_logger', (["(self.params.model + '.log')"], {'log_level': 'self.params.loglevel'}), "(self.params.model + '.log', log_level=self.params.loglevel)\n", (1253, 1313), False, 'import util\n'), ((5433, 5476), 'torch.load', 'torch.load', (['model'], {'map_location': 'self.device'}), '(model, map_location=self.device)\n', (5443, 5476), False, 'import torch\n'), ((5710, 5743), 'glob.glob', 'glob.glob', (['f"""{model_prefix}.nll*"""'], {}), "(f'{model_prefix}.nll*')\n", (5719, 5743), False, 'import glob\n'), ((8046, 8091), 'torch.save', 'torch.save', (['save_objs', 'f"""{model_fp}.progress"""'], {}), "(save_objs, f'{model_fp}.progress')\n", (8056, 8091), False, 'import torch\n'), ((8181, 8219), 'os.path.isfile', 'os.path.isfile', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (8195, 8219), False, 'import os\n'), ((12541, 12567), 'torch.save', 'torch.save', (['self.model', 'fp'], {}), '(self.model, fp)\n', (12551, 12567), False, 'import torch\n'), ((13906, 13939), 'os.remove', 'os.remove', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (13915, 13939), False, 'import os\n'), ((14155, 14191), 'math.ceil', 'ceil', (['(self.data.nb_train / params.bs)'], {}), '(self.data.nb_train / params.bs)\n', (14159, 14191), False, 'from math import ceil\n'), ((7429, 7579), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['self.optimizer', '"""min"""'], {'patience': 'params.patience', 'cooldown': 'params.cooldown', 'factor': 'params.discount_factor', 'min_lr': 'params.min_lr'}), "(self.optimizer, 'min', patience=params.patience, cooldown\n =params.cooldown, factor=params.discount_factor, min_lr=params.min_lr)\n", (7446, 7579), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((8268, 8302), 'torch.load', 'torch.load', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (8278, 8302), False, 'import torch\n'), ((14250, 14290), 'math.ceil', 'ceil', (['(params.max_steps / steps_per_epoch)'], {}), '(params.max_steps / steps_per_epoch)\n', (14254, 14290), False, 'from math import ceil\n'), ((1572, 1597), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1595, 1597), False, 'import torch\n'), ((7772, 7845), 'util.WarmupInverseSquareRootSchedule', 'util.WarmupInverseSquareRootSchedule', (['self.optimizer', 'params.warmup_steps'], {}), '(self.optimizer, params.warmup_steps)\n', (7808, 7845), False, 'import util\n'), ((10080, 10117), 'math.ceil', 'ceil', (['(self.data.nb_train / batch_size)'], {}), '(self.data.nb_train / batch_size)\n', (10084, 10117), False, 'from math import ceil\n'), ((13872, 13897), 'os.remove', 'os.remove', (['model.filepath'], {}), '(model.filepath)\n', (13881, 13897), False, 'import os\n'), ((15023, 15038), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15036, 15038), False, 'import torch\n'), ((15538, 15553), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15551, 15553), False, 'import torch\n'), ((10193, 10228), 'math.ceil', 'ceil', (['(self.data.nb_dev / batch_size)'], {}), '(self.data.nb_dev / batch_size)\n', (10197, 10228), False, 'from math import ceil\n'), ((10306, 10342), 'math.ceil', 'ceil', (['(self.data.nb_test / batch_size)'], {}), '(self.data.nb_test / batch_size)\n', (10310, 10342), False, 'from math import ceil\n')]
|
import cv2
import argparse
import numpy as np
def gray2bgr565(input_file, output_file):
img = np.fromfile(input_file, dtype=np.uint16)
img = img.reshape(480, 640)
# img = cv2.imread(input_file, cv2.IMREAD_ANYDEPTH)
ratio = np.amax(img) / 256
img8 = (img / ratio).astype('uint8')
img8 = cv2.cvtColor(img8, cv2.COLOR_GRAY2RGB)
cv2.imwrite(output_file, img8)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command Usages of ImageHelper')
parser.add_argument("-i", "--input", type=str, help="input image dir")
parser.add_argument("-o", "--output", type=str, help="output image dir")
args = parser.parse_args()
if args.input:
gray2bgr565(args.input, args.output)
else:
parser.print_help()
|
[
"cv2.imwrite",
"numpy.fromfile",
"argparse.ArgumentParser",
"cv2.cvtColor",
"numpy.amax"
] |
[((99, 139), 'numpy.fromfile', 'np.fromfile', (['input_file'], {'dtype': 'np.uint16'}), '(input_file, dtype=np.uint16)\n', (110, 139), True, 'import numpy as np\n'), ((311, 349), 'cv2.cvtColor', 'cv2.cvtColor', (['img8', 'cv2.COLOR_GRAY2RGB'], {}), '(img8, cv2.COLOR_GRAY2RGB)\n', (323, 349), False, 'import cv2\n'), ((354, 384), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img8'], {}), '(output_file, img8)\n', (365, 384), False, 'import cv2\n'), ((426, 494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command Usages of ImageHelper"""'}), "(description='Command Usages of ImageHelper')\n", (449, 494), False, 'import argparse\n'), ((240, 252), 'numpy.amax', 'np.amax', (['img'], {}), '(img)\n', (247, 252), True, 'import numpy as np\n')]
|
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import pdb
import sys
from ilqr.vehicle_model import Model
from ilqr.local_planner import LocalPlanner
from ilqr.constraints import Constraints
class iLQR():
def __init__(self, args, obstacle_bb, verbose=False):
self.args = args
self.Ts = args.timestep
self.N = args.horizon
self.tol = args.tol
self.obstacle_bb = obstacle_bb
self.verbose = verbose
self.global_plan = None
self.local_planner = LocalPlanner(args)
self.vehicle_model = Model(args)
self.constraints = Constraints(args, obstacle_bb)
# initial nominal trajectory
self.control_seq = np.zeros((self.args.num_ctrls, self.args.horizon))
self.control_seq[0, :] = np.ones((self.args.horizon)) * 0.5
self.debug_flag = 0
self.lamb_factor = 10
self.max_lamb = 1000
# self.fig, (self.ax1, self.ax2, self.ax3) = plt.subplots(1,3, num=0, figsize=(20, 5))
def set_global_plan(self, global_plan):
self.global_plan = global_plan
self.local_planner.set_global_planner(self.global_plan)
def get_nominal_trajectory(self, X_0, U):
X = np.zeros((self.args.num_states, self.args.horizon+1))
X[:, 0] = X_0
for i in range(self.args.horizon):
X[:, i+1] = self.vehicle_model.forward_simulate(X[:, i], U[:, i])
return X
def forward_pass(self, X, U, k, K):
X_new = np.zeros((self.args.num_states, self.args.horizon+1))
X_new[:, 0] = X[:, 0]
U_new = np.zeros((self.args.num_ctrls, self.args.horizon))
# Do a forward rollout and get states at all control points
for i in range(self.args.horizon):
U_new[:, i] = U[:, i] + k[:, i] + K[:, :, i] @ (X_new[:, i] - X[:, i])
X_new[:, i+1] = self.vehicle_model.forward_simulate(X_new[:, i], U_new[:, i])
return X_new, U_new
def backward_pass(self, X, U, poly_coeff, x_local_plan, npc_traj, lamb):
# Find control sequence that minimizes Q-value function
# Get derivatives of Q-function wrt to state and control
l_x, l_xx, l_u, l_uu, l_ux = self.constraints.get_cost_derivatives(X[:, 1:], U, poly_coeff, x_local_plan, npc_traj)
df_dx = self.vehicle_model.get_A_matrix(X[2, 1:], X[3, 1:], U[0,:])
df_du = self.vehicle_model.get_B_matrix(X[3, 1:])
# Value function at final timestep is known
V_x = l_x[:,-1]
V_xx = l_xx[:,:,-1]
# Allocate space for feedforward and feeback term
k = np.zeros((self.args.num_ctrls, self.args.horizon))
K = np.zeros((self.args.num_ctrls, self.args.num_states, self.args.horizon))
# Run a backwards pass from N-1 control step
for i in range(self.args.horizon-1,-1,-1):
Q_x = l_x[:,i] + df_dx[:,:,i].T @ V_x
Q_u = l_u[:,i] + df_du[:,:,i].T @ V_x
Q_xx = l_xx[:,:,i] + df_dx[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_ux = l_ux[:,:,i] + df_du[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_uu = l_uu[:,:,i] + df_du[:,:,i].T @ V_xx @ df_du[:,:,i]
# Q_uu_inv = np.linalg.pinv(Q_uu)
Q_uu_evals, Q_uu_evecs = np.linalg.eig(Q_uu)
Q_uu_evals[Q_uu_evals < 0] = 0.0
Q_uu_evals += lamb
Q_uu_inv = np.dot(Q_uu_evecs,np.dot(np.diag(1.0/Q_uu_evals), Q_uu_evecs.T))
# Calculate feedforward and feedback terms
k[:,i] = -Q_uu_inv @ Q_u
K[:,:,i] = -Q_uu_inv @ Q_ux
# Update value function for next time step
V_x = Q_x - K[:,:,i].T @ Q_uu @ k[:,i]
V_xx = Q_xx - K[:,:,i].T @ Q_uu @ K[:,:,i]
return k, K
def run_step(self, ego_state, npc_traj):
assert self.global_plan is not None, "Set a global plan in iLQR before starting run_step"
self.local_planner.set_ego_state(ego_state)
ref_traj, poly_coeff = self.local_planner.get_local_plan()
X_0 = np.array([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])
# self.control_seq[:, :-1] = self.control_seq[:, 1:]
# self.control_seq[:, -1] = np.zeros((self.args.num_ctrls))
X, U = self.get_optimal_control_seq(X_0, self.control_seq, poly_coeff, ref_traj[:, 0], npc_traj)
traj = X[:2, ::int(self.args.horizon/10)].T
self.control_seq = U
# self.plot(U, X, ref_traj)
return traj, ref_traj, U #self.filter_control(U, X[2,:])
def get_optimal_control_seq(self, X_0, U, poly_coeff, x_local_plan, npc_traj):
X = self.get_nominal_trajectory(X_0, U)
J_old = sys.float_info.max
lamb = 1 # Regularization parameter
# Run iLQR for max iterations
for itr in range(self.args.max_iters):
k, K = self.backward_pass(X, U, poly_coeff, x_local_plan, npc_traj, lamb)
# Get control values at control points and new states again by a forward rollout
X_new, U_new = self.forward_pass(X, U, k, K)
J_new = self.constraints.get_total_cost(X, U, poly_coeff, x_local_plan, npc_traj)
if J_new < J_old:
X = X_new
U = U_new
lamb /= self.lamb_factor
if (abs(J_old - J_new) < self.args.tol):
print("Tolerance reached")
break
else:
lamb *= self.lamb_factor
if lamb > self.max_lamb:
break
J_old = J_new
# print(J_new)
return X, U
def filter_control(self, U, velocity):
U[1] = np.arctan2(self.args.wheelbase*U[1],velocity[:-1])
return U
def plot(self, control, X, ref_traj):
self.ax1.clear()
self.ax1.plot(np.arange(len(control[0])), control[0,:], color='g', label='Acc')
self.ax1.plot(np.arange(len(control[0])), control[1,:], color='b', label='Yaw Rate')
self.ax1.set_ylabel('Values')
self.ax1.set_xlabel('Time')
self.ax1.set_title('Controls',fontsize=18)
# self.ax1.xlim(0, len(control[0]))
# self.ax1.ylim(-6, 6)
# self.ax1.axis('equal')
self.ax1.legend()
self.ax1.grid()
self.ax2.clear()
self.ax2.plot(ref_traj[:, 0], ref_traj[:, 1], color='r', label='Ref Traj')
self.ax2.plot(X[0, :], X[1, :], color='g', label='Real Traj')
self.ax2.set_ylabel('y')
self.ax2.set_xlabel('x')
self.ax2.set_title('Position Trajectory',fontsize=18)
self.ax2.legend()
self.ax2.grid()
# plt.legend()
self.ax3.clear()
self.ax3.plot(np.arange(len(X[0])), X[2, :], color='r', label='Velocity')
self.ax3.plot(np.arange(len(X[0])), X[3, :], color='g', label='Yaw')
self.ax3.set_ylabel('Values')
self.ax3.set_xlabel('Time')
self.ax3.set_title('Traj',fontsize=18)
self.ax3.grid()
self.ax3.legend()
plt.pause(0.001)
|
[
"ilqr.constraints.Constraints",
"numpy.ones",
"numpy.linalg.eig",
"ilqr.local_planner.LocalPlanner",
"ilqr.vehicle_model.Model",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.arctan2",
"matplotlib.pyplot.pause"
] |
[((574, 592), 'ilqr.local_planner.LocalPlanner', 'LocalPlanner', (['args'], {}), '(args)\n', (586, 592), False, 'from ilqr.local_planner import LocalPlanner\n'), ((622, 633), 'ilqr.vehicle_model.Model', 'Model', (['args'], {}), '(args)\n', (627, 633), False, 'from ilqr.vehicle_model import Model\n'), ((661, 691), 'ilqr.constraints.Constraints', 'Constraints', (['args', 'obstacle_bb'], {}), '(args, obstacle_bb)\n', (672, 691), False, 'from ilqr.constraints import Constraints\n'), ((765, 815), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (773, 815), True, 'import numpy as np\n'), ((1280, 1335), 'numpy.zeros', 'np.zeros', (['(self.args.num_states, self.args.horizon + 1)'], {}), '((self.args.num_states, self.args.horizon + 1))\n', (1288, 1335), True, 'import numpy as np\n'), ((1551, 1606), 'numpy.zeros', 'np.zeros', (['(self.args.num_states, self.args.horizon + 1)'], {}), '((self.args.num_states, self.args.horizon + 1))\n', (1559, 1606), True, 'import numpy as np\n'), ((1651, 1701), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (1659, 1701), True, 'import numpy as np\n'), ((2655, 2705), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (2663, 2705), True, 'import numpy as np\n'), ((2718, 2790), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.num_states, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.num_states, self.args.horizon))\n', (2726, 2790), True, 'import numpy as np\n'), ((4081, 4159), 'numpy.array', 'np.array', (['[ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]]'], {}), '([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])\n', (4089, 4159), True, 'import numpy as np\n'), ((5739, 5792), 'numpy.arctan2', 'np.arctan2', (['(self.args.wheelbase * U[1])', 'velocity[:-1]'], {}), '(self.args.wheelbase * U[1], velocity[:-1])\n', (5749, 5792), True, 'import numpy as np\n'), ((7091, 7107), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (7100, 7107), True, 'import matplotlib.pyplot as plt\n'), ((849, 875), 'numpy.ones', 'np.ones', (['self.args.horizon'], {}), '(self.args.horizon)\n', (856, 875), True, 'import numpy as np\n'), ((3289, 3308), 'numpy.linalg.eig', 'np.linalg.eig', (['Q_uu'], {}), '(Q_uu)\n', (3302, 3308), True, 'import numpy as np\n'), ((3433, 3458), 'numpy.diag', 'np.diag', (['(1.0 / Q_uu_evals)'], {}), '(1.0 / Q_uu_evals)\n', (3440, 3458), True, 'import numpy as np\n')]
|
import numpy as np
from spacy.pipeline.sentencizer import Sentencizer
from glob import glob
from spacy.lang.en import English
def metrics(a, b):
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
return (accuracy_score(a, b),
recall_score(a, b),
precision_score(a, b),
f1_score(a, b))
def performance(colgate=None):
colgate = colgate if colgate is not None else Sentencizer()
nlp = English()
output = []
for test in glob("marked-*.txt"):
input = test.replace("marked-", "")
txt = open(input).read()
tokens = nlp(open(test).read())
hy_tokens = colgate(nlp(txt))
assert len(tokens) == len(hy_tokens)
y = [False] * len(tokens)
seen_period = False
for i, tok in enumerate(tokens):
is_in_punct_chars = tok.text in Sentencizer.default_punct_chars
if seen_period and not tok.is_punct and not is_in_punct_chars and not tok.is_space:
y[i] = True
seen_period = False
elif tok.is_punct and tok.text == "#":
seen_period = True
y = np.array(y, dtype=bool)
y[0] = True
hy = np.array([x.is_sent_start for x in hy_tokens])
_ = metrics(y, hy)
output.append((test, _, y.sum()))
return output
if __name__ == "__main__":
from hw2 import ColgateSBD
from glob import glob
from spacy.lang.en import English
output = performance(ColgateSBD())
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
print("*" * 5, "Sentencizer", "*" * 5)
output = performance()
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
|
[
"hw2.ColgateSBD",
"sklearn.metrics.f1_score",
"spacy.lang.en.English",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"spacy.pipeline.sentencizer.Sentencizer",
"sklearn.metrics.accuracy_score",
"glob.glob"
] |
[((471, 480), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (478, 480), False, 'from spacy.lang.en import English\n'), ((514, 534), 'glob.glob', 'glob', (['"""marked-*.txt"""'], {}), "('marked-*.txt')\n", (518, 534), False, 'from glob import glob\n'), ((247, 267), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'b'], {}), '(a, b)\n', (261, 267), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((281, 299), 'sklearn.metrics.recall_score', 'recall_score', (['a', 'b'], {}), '(a, b)\n', (293, 299), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((313, 334), 'sklearn.metrics.precision_score', 'precision_score', (['a', 'b'], {}), '(a, b)\n', (328, 334), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((348, 362), 'sklearn.metrics.f1_score', 'f1_score', (['a', 'b'], {}), '(a, b)\n', (356, 362), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((447, 460), 'spacy.pipeline.sentencizer.Sentencizer', 'Sentencizer', ([], {}), '()\n', (458, 460), False, 'from spacy.pipeline.sentencizer import Sentencizer\n'), ((1174, 1197), 'numpy.array', 'np.array', (['y'], {'dtype': 'bool'}), '(y, dtype=bool)\n', (1182, 1197), True, 'import numpy as np\n'), ((1231, 1277), 'numpy.array', 'np.array', (['[x.is_sent_start for x in hy_tokens]'], {}), '([x.is_sent_start for x in hy_tokens])\n', (1239, 1277), True, 'import numpy as np\n'), ((1515, 1527), 'hw2.ColgateSBD', 'ColgateSBD', ([], {}), '()\n', (1525, 1527), False, 'from hw2 import ColgateSBD\n')]
|
from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler
import socket, traceback
import csv
import struct
import sys, time, string, pygame
import pygame
import pygame.draw
import pygame.time
import numpy as np
from math import sin, cos, acos
from modules.euclid import Vector3, Quaternion
from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen
import math
# from pygame.locals import *
# from ponycube import *
from modules.madgwickahrs import *
import modules.quaternion
from modules.quaternion import QuaternionClass
from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler
from math import atan2, atan
from numpy.linalg import inv
from numpy import linalg as LA
# import euclid
import serial
ser = serial.Serial('/dev/tty.usbmodem14411')
ser.baudrate = 115200
ser.timeout = 3
prev_time = 0
filename = open('/Users/eunsunlee/Documents/NESL/UnderwaterSensorTag/IMU_algorithms/optitrack/imu_movement.txt','w')
# offset_mx = 77.345
# offset_my = -13.725
# offset_mz = -71.64
# scale_mx = 1.1
# scale_my = 1.13
# scale_mz = 0.827
# LAB
offset_mx = 71.12
offset_my = -30.385
offset_mz = -66.24
scale_mx = 1.210645853980839
scale_my = 1.1778152745972439
scale_mz = 0.7547368963031613
dt = 1/10
visualIMU = False
if visualIMU:
pygame.init()
screen = Screen(1600,400,scale=1.5)
cube1 = Cube(40,30,60)
cube2 = Cube(40,30,60)
cube3 = Cube(40,30,60)
cube4 = Cube(40,30,60)
cube5 = Cube(40,30,60)
q1 = Quaternion(1,0,0,0)
q2 = Quaternion(1,0,0,0)
q3 = Quaternion(1,0,0,0)
q4 = Quaternion(1,0,0,0)
q5 = Quaternion(1,0,0,0)
p1 = Vector3(-400,0,0)
p2 = Vector3(-200,0,0)
p3 = Vector3(0,0,0)
p4 = Vector3(200,0,0)
p5 = Vector3(400,0,0)
incr = Quaternion(0.96,0.01,0.01,0).normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
cube2.erase(screen)
cube2.draw(screen,q2,p2)
cube3.erase(screen)
cube3.draw(screen,q3,p3)
cube4.erase(screen)
cube4.draw(screen,q4,p4)
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Madgwick
Imupredict = MadgwickAHRS();
Imupredict2 = MadgwickAHRS();
# A3
omega0 = [0,0,0]
similaritywindowA3 = 0
Sc = []
Sg = []
C = []
G = []
Eg = 0
quatA3 = QuaternionClass(1, 0, 0, 0)
quatMuseAlg = QuaternionClass(1, 0, 0, 0)
similaritywindowMUSE = 0
initial = 0
update = 0
Ax = []
Ay = []
Az = []
beta = 0.80
quat = QuaternionClass(1,0,0,0)
# 1 Hz - 1000
# 10 Hz - 100
while True:
reading = ser.readline()
print(reading, file = filename)
# print(reading)
sp = str(reading).split(',')
# print(sp)
time = float(sp[0][2:].strip())
# reads in g so multiply by 9.8
ax = float(sp[1].strip())
ay = float(sp[2].strip())
az = float(sp[3].strip())
ax = ax*9.8
ay = ay*9.8
az = az*9.8
gx = float(sp[4].strip())*math.pi/180 #rad/s
gy = float(sp[5].strip())*math.pi/180 #rad/s
gz = float(sp[6].strip())*math.pi/180 #rad/s
#uT
mx = float(sp[7].strip())
my = float(sp[8].strip())
mz = float(sp[9].strip())
mx = mx - offset_mx
my = my - offset_my
mz = mz - offset_mz
mx = mx*scale_mx
my = my*scale_my
mz = mz*scale_mz
qw = float(sp[10].strip())
qx = float(sp[11].strip())
qy = float(sp[12].strip())
qz = float(sp[13].strip())
pitch = float(sp[14].strip())
roll = float(sp[15].strip())
yaw = float(sp[16].strip())
dq = QuaternionClass(0,0,-1,0)
# print("yaw, pitch, roll: ", yaw, pitch, roll)
heading = float(sp[17].split('\\r')[0].strip())
# print("heading: ", heading)
# print(computeheading(mx,my))
# print(yaw, pitch, roll)
accel = [ax, ay, az]
gyro = [gx, gy, gz]
mag = [mx, my, mz]
# print(accel)
a333 = 0
# yawAM, pitchAM, rollAM, quatAM = AccMagOrientation(accel, mag)
# print("ypr: ", yaw, pitch, roll)
# print("ypr: ", yawAM, pitchAM, rollAM)
# print("heading: ", heading)
# print(headingM)
# time_diff = 60
if visualIMU: #quaternion from imu
# yellow area facing straight if imu hold with usbside facing me
# print("yaw: ", yaw)
# q1w = float(sp[10].strip())
# q1x = float(sp[11].strip())
# q1z = -float(sp[12].strip())
# q1y = float(sp[13].split('\\r')[0].strip())
# quatMDP = QuaternionClass(q1w, q1x, q1y, q1z)
# rollMDP, pitchMDP, yawMDP = QuatToEuler(quatMDP)
# print("yawMDP: ", yawMDP)
# quat = QuaternionClass(qw, qx, qy, -qz) *dq
q1.w = quat[0]
q1.x = quat[1]
q1.z = quat[3]
q1.y = quat[2]
q1 = q1.normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
# print("yaw: ", yaw )
if visualIMU: # Madgwick Algorithm
Imupredict.samplePeriod = 0.025#0.1
Imupredict.update(gyro,accel,mag)
quatMad = Imupredict.quaternion
quatMad = qnormalized(quatMad)
Imupredict.quaternion = quatMad
#quatMad = quatNormalized(quatMad)
yawMad, pitchMad, rollMad = QuatToEuler(quatMad)
# print("yawMad: ", yawMad*180/math.pi)
quat = QuaternionClass(quatMad[0], quatMad[1], quatMad[3], quatMad[2])
q2.w = quat[0]
q2.x = quat[1]
q2.z = quat[3]
q2.y = quat[2]
q2 = q2.normalized()
cube2.erase(screen)
cube2.draw(screen,q2,p2)
if False:
# quat = MadgwickQuaternionUpdate(ax, ay, az, gx, gy, gz, mx, my, mz, quat)
# q5.w = quat[0]
# q5.x = quat[1]
# q5.z = -quat[2]
# q5.y = quat[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
yawT, pitchT, rollT, quatT = androidAccMag2Euler(accel, mag)
if yawT > 0:
yawT = 360 - yawT*180/math.pi
else:
yawT = -yawT*180/math.pi
# print("yaw: ",yawT)
q5.w = quatT[0]
q5.x = quatT[1]
q5.z = -quatT[2]
q5.y = quatT[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Imupredict2.samplePeriod = 0.1
# Imupredict2.update_imu(gyro,accel)
# quatMad2 = Imupredict2.quaternion
# quatMad2 = qnormalized(quatMad)
# Imupredict2.quaternion = quatMad2
# q5.w = quatMad2[0]
# q5.x = quatMad2[1]
# q5.z = -quatMad2[2]
# q5.y = quatMad2[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
# https://stackoverflow.com/questions/32372847/android-algorithms-for-sensormanager-getrotationmatrix-and-sensormanager-getori/35390001#35390001
if visualIMU: #a3
q_a3 = 0
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatA3, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
if yawG < 0:
yawG = -yawG*180/math.pi
else:
yawG = 360 - yawG*180/math.pi
# # print(yawG, pitchG, rollG)
omega0 = omega1
# # # A3 Algorithm - accelerometer, magnetometer calibration
# yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
# # print(yawAM, pitchAM, rollAM)
# # # TODO: Update quaternion if w < 240 degree, a < 2g
w = max(abs(np.array(gyro)))*180/math.pi
a = max(abs(np.array(accel)))
# # # if w < 240 and a < 2*9.8:
# # # print("stable")
# # # else:
# # # print("moving")
# # headingM = headingfromMag(mag)
headingM = computeheading(mx, my)
# print("headingM:" , headingM)
# print("heading: ", headingM)
# print("yawG: ", yawG*180/math.pi)
# # print(headingM)
if similaritywindowA3 > 1:
# print("similaritywindow")
# calculate pc and pg
pc = 1/(2**np.var(np.subtract(Sc,C)))
pg = 1/(2**np.var(np.subtract(Sg,G)))
# print(pc)
# print(pg)
if pc > 0.2 and pg > 0.2:
print("change?")
# TODO: if Ec < Eg, then update quaternion
E1 = -32.14*pc + 19.93
E2 = -12.86*pg + 11.57
Ec = max(E1, E2)
Eg = (Eg + 0.0003*w*dt + 0.001*a*dt)*1000
#print(Ec)
#print(Eg)
if Ec < Eg*1000:
# print(a333)
a333 = a333 + 1
print("A3 reset ")
q_a3 = 1
#quatA3 = quatAM
# # quat = quatAM
# reset values
similaritywindowA3 = 0
C = []
Sc = []
Sg = []
G = []
Eg = 0
else:
# #TODO: update Eg
Eg = Eg + 0.0003*w*dt + 0.001*a*dt
C.append(yawAM)
Sc.append(yawG)
Sg.append(rollG)
G.append(rollAM)
similaritywindowA3 = similaritywindowA3 + dt
if q_a3:
quatA3 = quatAM #QuaternionClass(quatAM[0], quatAM[1], quatAM[2], quatAM[3])
# print("quatAM", quatAM)
else:
quatA3 = quatG
# print("quatG", quatG[0], quatG[1], quatG[2], quatG[3])
# print("quatA3", quatA3[0], quatA3[1], quatA3)
yawA3, pitchA3, rollA3 = QuatToEuler(quatA3)
# print("yawA3: ", yawA3*180/math.pi)
quatA3_temp = QuaternionClass(quatA3[0], quatA3[1], quatA3[3], -quatA3[2])
# quatA3 = quatA3_temp
q3.w = quatA3_temp[0]
q3.x = quatA3_temp[1]
q3.y = quatA3_temp[2]
q3.z = quatA3_temp[3]
q3 = q3.normalized()
cube3.erase(screen)
cube3.draw(screen,q3,p3)
if visualIMU: # MUSE
# # # Initial yaw, pitch, roll from Accelerometer and Magnetometer
#yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatMuseAlg, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
omega0 = omega1
headingM = computeheading(mx, my)
# headingM = headingfromMag(mag)
if initial < 30:
quatMuseAlg = quatAM
print("initial")
# O: orientation rotMat from quat
# O-1 : inverse of the rot Mat
# Calculate Ng = O*NL- Equation (1)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# print("N_G")
# print(N_G)
initial = initial + 1
else:
quatMuseAlg = quatAM
# print("similaritywindow: ", similaritywindowMUSE)
if similaritywindowMUSE > 1:
# print("Ax: ", Ax)
# print("Ay: ", Ay)
# print("Az: ", Az)
aAx = abs(np.array(Ax))
aAy = abs(np.array(Ay))
aAz = abs(np.array(Az))
# print("Ax: ", aAx)
# print("Ay: ", aAy)
# print("Az: ", aAz)
agAx = aAx - 9.8
agAy = aAy - 9.8
agAz = aAz - 9.8
# print("agAx: ", agAx)
# print("agAy: ", agAy)
# print("agAz: ", agAz)
aagAx = abs(agAx)
aagAy = abs(agAy)
aagAz = abs(agAz)
# print("aagAx: ", aagAx)
# print("aagAy: ", aagAy)
# print("aagAz: ", aagAz)
x_max = max(aagAx)
y_max = max(aagAy)
z_max = max(aagAz)
# Ax = abs(abs(np.array(Ax))-9.8)
# Ay = abs(abs(np.array(Ax))-9.8)
# Az = abs(abs(np.array(Az))-9.8)
# # print(Az)
# # x_max = max([abs(max(Ax)), abs(min(Ax))])
# # y_max = max([abs(max(Ay)), abs(min(Ay))])
# # z_max = max([abs(max(Az)), abs(min(Az))])
# x_max = max(Ax)
# y_max = max(Ay)
# z_max = max(Az)
# print("x: ", x_max)
# print("y: ", y_max)
# print("z: ", z_max)
xyz_min = min([x_max, y_max, z_max])
# print(xyz_min)
# acceleration roughly measures 9.8m/s2
if xyz_min < 1:
print("yes, update quat with AM")
Oa = QuatToRotMat(quatAM)
Og = QuatToRotMat(quatG)
Ocomp = np.mat(Oa)*(1-beta) + np.mat(Og)*beta
# print("Oa")
# print(Oa)
# print("Og")
# print(Og)
# print("Ocomp")
# print(Ocomp)
quatComp = RotMatToQuat(np.array(np.mat(Ocomp)))
quatMuseAlg = quatComp
update = 1
# Update 3D magnetic vector estimation
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# reset values
similaritywindowMUSE = 0
Ax = []
Ay = []
Az = []
else:
Ax.append(ax)
Ay.append(ay)
Az.append(az)
similaritywindowMUSE = similaritywindowMUSE + dt
if update == 0:
O_hat = QuatToRotMat(quatG)
Oinv_hat = inv(O_hat)
N_L_hat = Oinv_hat * N_G
# print("N_L_hat")
# print(N_L_hat)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
N_L_hat = np.array([np.array(N_L_hat)[0][0], np.array(N_L_hat)[1][0], np.array(N_L_hat)[2][0]])
N_L = np.array([mx, my, mz])
RotAxis = np.cross(N_L_hat, N_L)
RotAxis = RotAxis/LA.norm(RotAxis)
# print("RotAxis")
# print(RotAxis/LA.norm(RotAxis))
alpha = 0.01
RotAngle = angle_between(N_L_hat, N_L)
alphaRotAngle = alpha* RotAngle
deltaRotMat = AxisAngleToRotMat(RotAxis, alphaRotAngle)
Onew_hat = np.array(np.mat(inv(deltaRotMat))*np.mat(O_hat))
quatMUSE = RotMatToQuat(Onew_hat)
quatMUSE = quatNormalized(quatMUSE)
quatMuseAlg = QuaternionClass(quatMUSE[0], quatMUSE[1], quatMUSE[2], quatMUSE[3])
#print("update quat with MUSE")
update = 0
yawMUSE, pitchMUSE, rollMUSE = QuatToEuler(quatMuseAlg)
# print("yawMUSE: ", yawMUSE*180/math.pi)
q4.w = quatMuseAlg[0]
q4.x = quatMuseAlg[1]
q4.y = quatMuseAlg[3]
q4.z = -quatMuseAlg[2]
q4 = q4.normalized()
cube4.erase(screen)
cube4.draw(screen,q4,p4)
if visualIMU:
# quatDMP = QuaternionClass(qw, qx, qy, qz)
# yawDMP, pitchDMP, rollDMP = MPU9250_computeEuler(qw, qx, qy, qz)
# print("yprDMP: ", yawDMP, pitchDMP, rollDMP)
# # print("ypr: ", yaw, pitch, roll)
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP = qnormalized(quatDMP)
# print("quatDMP: " , quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# yawDMP, pitchDMP, rollDMP = quaternion_to_euler_angle(quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP1 = qnormalized(quatDMP1)
# print("quatDMP1: ", quatDMP1[0], quatDMP1[1], quatDMP1[2], quatDMP1[3])
# print("ypr: ", yawDMP*180/math.pi)
# if yaw - 180 > 0 :
# yaw -= 360
# yaw *= math.pi/180
# if roll - 180 > 0 :
# roll -= 360
# roll *= math.pi/180
# if pitch - 180 > 0 :
# pitch -= 360
# pitch *= math.pi/180
# quatDMP = Euler2Quat(yaw, pitch, roll)
# quatDMP = qnormalized(quatDMP)
# q5.w = quatDMP1[0]
# q5.x = quatDMP1[1]
# q5.y = quatDMP1[3]
# q5.z = -quatDMP1[2]
# yawES = math.atan2(mx,my)
# rollES, pitchES = RP_calculate(accel)
# rollES = rollES
# yawES *= 180/math.pi
# if yawES < 0 :
# yawES += 360.0
# rollES *= 180/math.pi
# if rollES < 0 :
# rollES += 360.0
# pitchES *= 180/math.pi
# if pitchES < 0 :
# pitchES += 360.0
# print("yaw, yawES: ", yaw, yawES)
# print("roll, rollES: ", roll, rollES)
# print("pitch, pitchES: ", pitch, pitchES)
# rollES = rollES * 180/math.pi
# if rollES < 0:
# rollES = 360 + rollES
# rollES = (360 - rollES*180/math.pi)
# rollES = rollES * math.pi/180
# yawES = yawES*math.pi/180
# rollES = rollES*math.pi/180
# print("yawES: ", yawES)
#
# quatES = Euler2Quat(yaw*math.pi/180, pitch*math.pi/180, roll*math.pi/180)
# # quatES = Euler2Quat(yawES*math.pi/180, 0, 0)
# quatES = qnormalized(quatES)
# # print("quatES: ", quatES[0], quatES[1], quatES[2], quatES[3]) # 3 - yaw
# q5.w = quatES[0]
# q5.x = quatES[1]
# q5.z = -quatES[2]
# q5.y = quatES[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
if visualIMU:
pygame.display.flip()
pygame.time.delay(0)
event = pygame.event.poll()
if event.type == pygame.QUIT \
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
break
# print(time)
# print(time-prev_time)
# print(ax)
# print(ay)
# print(az)
# print(gx)
# print(gy)
# print(gz)
# print(mx)
# print(my)
# print(mz)
# sp = reading.split()
# print(float(sp[0][:-1]))
# print(sp[1].split(','))
# # print(float(sp[1][:-1]))
|
[
"pygame.event.poll",
"pygame.init",
"numpy.array",
"numpy.linalg.norm",
"modules.euclid.Quaternion",
"modules.quaternion.QuaternionClass",
"numpy.cross",
"pygame.time.delay",
"modules.a3muse.quatNormalized",
"pygame.display.flip",
"numpy.subtract",
"modules.a3muse.RotMatToQuat",
"numpy.mat",
"modules.a3muse.qnormalized",
"modules.a3muse.QuatToEuler",
"modules.EuclidObjects.Screen",
"modules.EuclidObjects.Cube",
"modules.mpulib.computeheading",
"modules.a3muse.androidAccMag2Euler",
"modules.a3muse.IntegrationRK4",
"modules.a3muse.AxisAngleToRotMat",
"modules.a3muse.angle_between",
"numpy.linalg.inv",
"serial.Serial",
"modules.euclid.Vector3",
"modules.a3muse.QuatToRotMat"
] |
[((1012, 1051), 'serial.Serial', 'serial.Serial', (['"""/dev/tty.usbmodem14411"""'], {}), "('/dev/tty.usbmodem14411')\n", (1025, 1051), False, 'import serial\n'), ((1556, 1569), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1567, 1569), False, 'import pygame\n'), ((1580, 1608), 'modules.EuclidObjects.Screen', 'Screen', (['(1600)', '(400)'], {'scale': '(1.5)'}), '(1600, 400, scale=1.5)\n', (1586, 1608), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1616, 1632), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1620, 1632), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1640, 1656), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1644, 1656), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1664, 1680), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1668, 1680), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1688, 1704), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1692, 1704), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1712, 1728), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1716, 1728), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1734, 1756), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1744, 1756), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1760, 1782), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1770, 1782), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1786, 1808), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1796, 1808), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1812, 1834), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1822, 1834), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1838, 1860), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1848, 1860), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1865, 1884), 'modules.euclid.Vector3', 'Vector3', (['(-400)', '(0)', '(0)'], {}), '(-400, 0, 0)\n', (1872, 1884), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1889, 1908), 'modules.euclid.Vector3', 'Vector3', (['(-200)', '(0)', '(0)'], {}), '(-200, 0, 0)\n', (1896, 1908), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1913, 1929), 'modules.euclid.Vector3', 'Vector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1920, 1929), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1934, 1952), 'modules.euclid.Vector3', 'Vector3', (['(200)', '(0)', '(0)'], {}), '(200, 0, 0)\n', (1941, 1952), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1957, 1975), 'modules.euclid.Vector3', 'Vector3', (['(400)', '(0)', '(0)'], {}), '(400, 0, 0)\n', (1964, 1975), False, 'from modules.euclid import Vector3, Quaternion\n'), ((2442, 2469), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2457, 2469), False, 'from modules.quaternion import QuaternionClass\n'), ((2485, 2512), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2500, 2512), False, 'from modules.quaternion import QuaternionClass\n'), ((2617, 2644), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2632, 2644), False, 'from modules.quaternion import QuaternionClass\n'), ((3556, 3584), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(0)', '(0)', '(-1)', '(0)'], {}), '(0, 0, -1, 0)\n', (3571, 3584), False, 'from modules.quaternion import QuaternionClass\n'), ((4869, 4889), 'modules.a3muse.qnormalized', 'qnormalized', (['quatMad'], {}), '(quatMad)\n', (4880, 4889), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((4992, 5012), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatMad'], {}), '(quatMad)\n', (5003, 5012), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((5065, 5128), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatMad[0]', 'quatMad[1]', 'quatMad[3]', 'quatMad[2]'], {}), '(quatMad[0], quatMad[1], quatMad[3], quatMad[2])\n', (5080, 5128), False, 'from modules.quaternion import QuaternionClass\n'), ((5565, 5596), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (5584, 5596), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6436, 6478), 'modules.a3muse.IntegrationRK4', 'IntegrationRK4', (['omega0', 'omega1', 'quatA3', 'dt'], {}), '(omega0, omega1, quatA3, dt)\n', (6450, 6478), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6503, 6521), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatG'], {}), '(quatG)\n', (6514, 6521), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6822, 6853), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (6841, 6853), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((7176, 7198), 'modules.mpulib.computeheading', 'computeheading', (['mx', 'my'], {}), '(mx, my)\n', (7190, 7198), False, 'from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler\n'), ((8513, 8532), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatA3'], {}), '(quatA3)\n', (8524, 8532), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((8591, 8651), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatA3[0]', 'quatA3[1]', 'quatA3[3]', '(-quatA3[2])'], {}), '(quatA3[0], quatA3[1], quatA3[3], -quatA3[2])\n', (8606, 8651), False, 'from modules.quaternion import QuaternionClass\n'), ((9038, 9069), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (9057, 9069), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9105, 9152), 'modules.a3muse.IntegrationRK4', 'IntegrationRK4', (['omega0', 'omega1', 'quatMuseAlg', 'dt'], {}), '(omega0, omega1, quatMuseAlg, dt)\n', (9119, 9152), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9177, 9195), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatG'], {}), '(quatG)\n', (9188, 9195), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9228, 9250), 'modules.mpulib.computeheading', 'computeheading', (['mx', 'my'], {}), '(mx, my)\n', (9242, 9250), False, 'from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler\n'), ((15138, 15159), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (15157, 15159), False, 'import pygame\n'), ((15162, 15182), 'pygame.time.delay', 'pygame.time.delay', (['(0)'], {}), '(0)\n', (15179, 15182), False, 'import pygame\n'), ((15193, 15212), 'pygame.event.poll', 'pygame.event.poll', ([], {}), '()\n', (15210, 15212), False, 'import pygame\n'), ((1983, 2014), 'modules.euclid.Quaternion', 'Quaternion', (['(0.96)', '(0.01)', '(0.01)', '(0)'], {}), '(0.96, 0.01, 0.01, 0)\n', (1993, 2014), False, 'from modules.euclid import Vector3, Quaternion\n'), ((9474, 9500), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (9480, 9500), True, 'import numpy as np\n'), ((9541, 9561), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (9553, 9561), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12684, 12708), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatMuseAlg'], {}), '(quatMuseAlg)\n', (12695, 12708), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((7008, 7023), 'numpy.array', 'np.array', (['accel'], {}), '(accel)\n', (7016, 7023), True, 'import numpy as np\n'), ((11362, 11388), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (11368, 11388), True, 'import numpy as np\n'), ((11433, 11453), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (11445, 11453), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11719, 11738), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatG'], {}), '(quatG)\n', (11731, 11738), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11756, 11766), 'numpy.linalg.inv', 'inv', (['O_hat'], {}), '(O_hat)\n', (11759, 11766), False, 'from numpy.linalg import inv\n'), ((11858, 11884), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (11864, 11884), True, 'import numpy as np\n'), ((12030, 12052), 'numpy.array', 'np.array', (['[mx, my, mz]'], {}), '([mx, my, mz])\n', (12038, 12052), True, 'import numpy as np\n'), ((12067, 12089), 'numpy.cross', 'np.cross', (['N_L_hat', 'N_L'], {}), '(N_L_hat, N_L)\n', (12075, 12089), True, 'import numpy as np\n'), ((12229, 12256), 'modules.a3muse.angle_between', 'angle_between', (['N_L_hat', 'N_L'], {}), '(N_L_hat, N_L)\n', (12242, 12256), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12311, 12352), 'modules.a3muse.AxisAngleToRotMat', 'AxisAngleToRotMat', (['RotAxis', 'alphaRotAngle'], {}), '(RotAxis, alphaRotAngle)\n', (12328, 12352), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12432, 12454), 'modules.a3muse.RotMatToQuat', 'RotMatToQuat', (['Onew_hat'], {}), '(Onew_hat)\n', (12444, 12454), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12470, 12494), 'modules.a3muse.quatNormalized', 'quatNormalized', (['quatMUSE'], {}), '(quatMUSE)\n', (12484, 12494), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12513, 12580), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatMUSE[0]', 'quatMUSE[1]', 'quatMUSE[2]', 'quatMUSE[3]'], {}), '(quatMUSE[0], quatMUSE[1], quatMUSE[2], quatMUSE[3])\n', (12528, 12580), False, 'from modules.quaternion import QuaternionClass\n'), ((9848, 9860), 'numpy.array', 'np.array', (['Ax'], {}), '(Ax)\n', (9856, 9860), True, 'import numpy as np\n'), ((9876, 9888), 'numpy.array', 'np.array', (['Ay'], {}), '(Ay)\n', (9884, 9888), True, 'import numpy as np\n'), ((9904, 9916), 'numpy.array', 'np.array', (['Az'], {}), '(Az)\n', (9912, 9916), True, 'import numpy as np\n'), ((10993, 11013), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (11005, 11013), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11024, 11043), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatG'], {}), '(quatG)\n', (11036, 11043), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12112, 12128), 'numpy.linalg.norm', 'LA.norm', (['RotAxis'], {}), '(RotAxis)\n', (12119, 12128), True, 'from numpy import linalg as LA\n'), ((6965, 6979), 'numpy.array', 'np.array', (['gyro'], {}), '(gyro)\n', (6973, 6979), True, 'import numpy as np\n'), ((7435, 7453), 'numpy.subtract', 'np.subtract', (['Sc', 'C'], {}), '(Sc, C)\n', (7446, 7453), True, 'import numpy as np\n'), ((7476, 7494), 'numpy.subtract', 'np.subtract', (['Sg', 'G'], {}), '(Sg, G)\n', (7487, 7494), True, 'import numpy as np\n'), ((12402, 12415), 'numpy.mat', 'np.mat', (['O_hat'], {}), '(O_hat)\n', (12408, 12415), True, 'import numpy as np\n'), ((11058, 11068), 'numpy.mat', 'np.mat', (['Oa'], {}), '(Oa)\n', (11064, 11068), True, 'import numpy as np\n'), ((11080, 11090), 'numpy.mat', 'np.mat', (['Og'], {}), '(Og)\n', (11086, 11090), True, 'import numpy as np\n'), ((11248, 11261), 'numpy.mat', 'np.mat', (['Ocomp'], {}), '(Ocomp)\n', (11254, 11261), True, 'import numpy as np\n'), ((12384, 12400), 'numpy.linalg.inv', 'inv', (['deltaRotMat'], {}), '(deltaRotMat)\n', (12387, 12400), False, 'from numpy.linalg import inv\n'), ((11944, 11961), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (11952, 11961), True, 'import numpy as np\n'), ((11969, 11986), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (11977, 11986), True, 'import numpy as np\n'), ((11994, 12011), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (12002, 12011), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from gpar.regression import GPARRegressor
from wbml.experiment import WorkingDirectory
import wbml.plot
if __name__ == "__main__":
wd = WorkingDirectory("_experiments", "synthetic", seed=1)
# Create toy data set.
n = 200
x = np.linspace(0, 1, n)
noise = 0.1
# Draw functions depending on each other in complicated ways.
f1 = -np.sin(10 * np.pi * (x + 1)) / (2 * x + 1) - x ** 4
f2 = np.cos(f1) ** 2 + np.sin(3 * x)
f3 = f2 * f1 ** 2 + 3 * x
f = np.stack((f1, f2, f3), axis=0).T
# Add noise and subsample.
y = f + noise * np.random.randn(n, 3)
x_obs, y_obs = x[::8], y[::8]
# Fit and predict GPAR.
model = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
impute=True,
replace=False,
normalise_y=False,
)
model.fit(x_obs, y_obs)
means, lowers, uppers = model.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Fit and predict independent GPs: set `markov=0` in GPAR.
igp = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
markov=0,
normalise_y=False,
)
igp.fit(x_obs, y_obs)
igp_means, igp_lowers, igp_uppers = igp.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Plot the result.
plt.figure(figsize=(15, 3))
for i in range(3):
plt.subplot(1, 3, i + 1)
# Plot observations.
plt.scatter(x_obs, y_obs[:, i], label="Observations", style="train")
plt.plot(x, f[:, i], label="Truth", style="test")
# Plot GPAR.
plt.plot(x, means[:, i], label="GPAR", style="pred")
plt.fill_between(x, lowers[:, i], uppers[:, i], style="pred")
# Plot independent GPs.
plt.plot(x, igp_means[:, i], label="IGP", style="pred2")
plt.fill_between(x, igp_lowers[:, i], igp_uppers[:, i], style="pred2")
plt.xlabel("$t$")
plt.ylabel(f"$y_{i + 1}$")
wbml.plot.tweak(legend=i == 2)
plt.tight_layout()
plt.savefig(wd.file("synthetic.pdf"))
|
[
"gpar.regression.GPARRegressor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"wbml.experiment.WorkingDirectory",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.stack",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"numpy.sin",
"numpy.random.randn",
"matplotlib.pyplot.subplot"
] |
[((192, 245), 'wbml.experiment.WorkingDirectory', 'WorkingDirectory', (['"""_experiments"""', '"""synthetic"""'], {'seed': '(1)'}), "('_experiments', 'synthetic', seed=1)\n", (208, 245), False, 'from wbml.experiment import WorkingDirectory\n'), ((294, 314), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (305, 314), True, 'import numpy as np\n'), ((721, 881), 'gpar.regression.GPARRegressor', 'GPARRegressor', ([], {'scale': '(0.1)', 'linear': '(True)', 'linear_scale': '(10.0)', 'nonlinear': '(True)', 'nonlinear_scale': '(0.1)', 'noise': '(0.1)', 'impute': '(True)', 'replace': '(False)', 'normalise_y': '(False)'}), '(scale=0.1, linear=True, linear_scale=10.0, nonlinear=True,\n nonlinear_scale=0.1, noise=0.1, impute=True, replace=False, normalise_y\n =False)\n', (734, 881), False, 'from gpar.regression import GPARRegressor\n'), ((1165, 1302), 'gpar.regression.GPARRegressor', 'GPARRegressor', ([], {'scale': '(0.1)', 'linear': '(True)', 'linear_scale': '(10.0)', 'nonlinear': '(True)', 'nonlinear_scale': '(0.1)', 'noise': '(0.1)', 'markov': '(0)', 'normalise_y': '(False)'}), '(scale=0.1, linear=True, linear_scale=10.0, nonlinear=True,\n nonlinear_scale=0.1, noise=0.1, markov=0, normalise_y=False)\n', (1178, 1302), False, 'from gpar.regression import GPARRegressor\n'), ((1545, 1572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (1555, 1572), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2249), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as plt\n'), ((487, 500), 'numpy.sin', 'np.sin', (['(3 * x)'], {}), '(3 * x)\n', (493, 500), True, 'import numpy as np\n'), ((539, 569), 'numpy.stack', 'np.stack', (['(f1, f2, f3)'], {'axis': '(0)'}), '((f1, f2, f3), axis=0)\n', (547, 569), True, 'import numpy as np\n'), ((1605, 1629), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(i + 1)'], {}), '(1, 3, i + 1)\n', (1616, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1736), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_obs', 'y_obs[:, i]'], {'label': '"""Observations"""', 'style': '"""train"""'}), "(x_obs, y_obs[:, i], label='Observations', style='train')\n", (1679, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1794), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f[:, i]'], {'label': '"""Truth"""', 'style': '"""test"""'}), "(x, f[:, i], label='Truth', style='test')\n", (1753, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1877), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'means[:, i]'], {'label': '"""GPAR"""', 'style': '"""pred"""'}), "(x, means[:, i], label='GPAR', style='pred')\n", (1833, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1947), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'lowers[:, i]', 'uppers[:, i]'], {'style': '"""pred"""'}), "(x, lowers[:, i], uppers[:, i], style='pred')\n", (1902, 1947), True, 'import matplotlib.pyplot as plt\n'), ((1989, 2045), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'igp_means[:, i]'], {'label': '"""IGP"""', 'style': '"""pred2"""'}), "(x, igp_means[:, i], label='IGP', style='pred2')\n", (1997, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2054, 2124), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'igp_lowers[:, i]', 'igp_uppers[:, i]'], {'style': '"""pred2"""'}), "(x, igp_lowers[:, i], igp_uppers[:, i], style='pred2')\n", (2070, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2151), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (2144, 2151), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""$y_{i + 1}$"""'], {}), "(f'$y_{i + 1}$')\n", (2170, 2186), True, 'import matplotlib.pyplot as plt\n'), ((469, 479), 'numpy.cos', 'np.cos', (['f1'], {}), '(f1)\n', (475, 479), True, 'import numpy as np\n'), ((624, 645), 'numpy.random.randn', 'np.random.randn', (['n', '(3)'], {}), '(n, 3)\n', (639, 645), True, 'import numpy as np\n'), ((408, 436), 'numpy.sin', 'np.sin', (['(10 * np.pi * (x + 1))'], {}), '(10 * np.pi * (x + 1))\n', (414, 436), True, 'import numpy as np\n')]
|
#%%
import numpy as np
import pandas as pd
import bokeh.plotting
import bokeh.io
import bokeh.models
import growth.model
import growth.viz
const = growth.model.load_constants()
colors, palette = growth.viz.bokeh_style()
mapper = growth.viz.load_markercolors()
bokeh.io.output_file('../../figures/interactive/interactive_ecoli_data.html')
# Define constants
gamma_max = const['gamma_max']
phi_O = const['phi_O']
Kd_cpc = const['Kd_cpc']
nu_max= np.arange(0.001, 50, 0.001)
const_phiRb = 0.25
# Load the mass_frac
mass_frac = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv')
elong = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv')
# Add markers and colors to maintain consistency.
markers = [mapper[g]['m_bokeh'] for g in mass_frac['source'].values]
_colors = [mapper[g]['c'] for g in mass_frac['source'].values]
mass_frac['marker'] = markers
mass_frac['color'] = _colors
markers = [mapper[g]['m_bokeh'] for g in elong['source'].values]
_colors = [mapper[g]['c'] for g in elong['source'].values]
elong['marker'] = markers
elong['color'] = _colors
mass_frac = bokeh.models.ColumnDataSource(mass_frac)
elong = bokeh.models.ColumnDataSource(elong)
# Set up the initial scenarios
opt_phiRb = growth.model.phiRb_optimal_allocation(gamma_max, nu_max, Kd_cpc, phi_O)
opt_gamma = growth.model.steady_state_gamma(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
opt_lam = growth.model.steady_state_growth_rate(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O)
const_phiRb = const_phiRb * np.ones_like(nu_max)
const_gamma = growth.model.steady_state_gamma(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
const_lam = growth.model.steady_state_growth_rate(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O)
trans_phiRb = growth.model.phiRb_constant_translation(gamma_max, nu_max, 10, Kd_cpc, phi_O)
trans_gamma = growth.model.steady_state_gamma(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
trans_lam = growth.model.steady_state_growth_rate(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O)
source = bokeh.models.ColumnDataSource({'phiRb': [const_phiRb, trans_phiRb, opt_phiRb],
'gamma': [const_gamma, trans_gamma, opt_gamma],
'lam': [const_lam, trans_lam, opt_lam],
'color': [colors['primary_black'],
colors['primary_green'],
colors['primary_blue']],
'label': ['scenario I: constant allocation',
'scenario II: constant translation rate',
'scenario III: optimal allocation'],
'filler_xs': [[], [], []],
'filler_ys': [[], [], []]})
# ##############################################################################
# WIDGET DEFINITIONS
# ##############################################################################
phiO_slider = bokeh.models.Slider(start=0, end=0.95, step=0.001, value=phi_O,
title='allocation to other proteins')
gamma_slider = bokeh.models.Slider(start=1, end=25, step=0.001, value=gamma_max * 7459 / 3600,
title='maximum translation speed [AA / s]')
Kd_cpc_slider = bokeh.models.Slider(start=-4, end=-0.0001, step=0.001, value=np.log10(Kd_cpc),
title='log\u2081\u2080 precursor Michaelis-Menten constant')
phiRb_slider = bokeh.models.Slider(start=0.001, end=0.45, step=0.001,
value = 0.25,
title='scenario I: constant ribosomal allocation parameter',
bar_color=colors['primary_black'],
default_size=350)
sc2_cpc_slider = bokeh.models.Slider(start=0, end=0.999, step=0.01,
value = 0.9,
title='scenario II: target translation speed (relative to max)',
bar_color=colors['primary_green'],
default_size=350)
# ##############################################################################
# CANVAS DEFINITION
# ##############################################################################
mass_frac_tooltips = [('source', '@source'),
('ribosomal allocation', '@mass_fraction{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}'),
('method', '@method')]
elong_tooltips = [('source', '@source'),
('translation rate [AA/s]', '@elongation_rate_aa_s{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}')]
mass_hover = bokeh.models.HoverTool(names=['data'], tooltips=mass_frac_tooltips)
elong_hover = bokeh.models.HoverTool(names=['data'], tooltips=elong_tooltips)
allocation_axis = bokeh.plotting.figure(width=450, height=400,
x_axis_label='growth rate λ [inv. hr]',
y_axis_label = 'ribosomal allocation',
y_range=[0, 0.35],
x_range=[0, 2],
tools = [mass_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
elongation_axis = bokeh.plotting.figure(width=450, height=400,
y_axis_label='translation speed [AA / s]',
x_axis_label = 'growth rate λ [inv. hr]',
y_range=[5, 20],
x_range = [0, 2],
tools = [elong_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
legend_axis = bokeh.plotting.figure(width=370, height=120, tools=[])
legend_axis.axis.axis_label = None
legend_axis.axis.visible = False
legend_axis.grid.grid_line_color = None
legend_axis.background_fill_color = None
legend_axis.outline_line_color = None
# ##############################################################################
# GLYPH DEFINITION
# ##############################################################################
allocation_axis.scatter(x='growth_rate_hr', y='mass_fraction', marker='marker',
color='color', source=mass_frac, size=10, line_color='black',
alpha=0.75, name='data')
elongation_axis.scatter(x='growth_rate_hr', y='elongation_rate_aa_s', marker='marker',
color='color', source=elong, size=10, line_color='black',
alpha=0.75, name='data')
allocation_axis.multi_line(xs='lam', ys='phiRb', color='color', line_width=2,
source=source)
elongation_axis.multi_line(xs='lam', ys='gamma', color='color', line_width=2,
source=source)
legend_axis.multi_line(xs='filler_xs', ys='filler_ys', line_width=2.5,
line_color='color', legend_field='label' ,
source=source)
##############################################################################
# CALLBACK DEFINITION
# ##############################################################################
args = {'gamma_slider': gamma_slider,
'Kd_cpc_slider': Kd_cpc_slider,
'phiO_slider': phiO_slider,
'phiRb_slider': phiRb_slider,
'source': source,
'nu_max': nu_max,
'sc2_cpc_slider': sc2_cpc_slider}
callback = growth.viz.load_js(['./interactive_ecoli_data.js', './functions.js'],
args=args)
for s in [gamma_slider, Kd_cpc_slider, phiO_slider, phiRb_slider, sc2_cpc_slider]:
s.js_on_change('value', callback)
# ##############################################################################
# LAYOUT
# ##############################################################################
col1 = bokeh.layouts.Column(gamma_slider, phiO_slider)
col2 = bokeh.layouts.Column(Kd_cpc_slider, phiRb_slider, sc2_cpc_slider)
sliders = bokeh.layouts.Row(col1, col2, legend_axis)
row1 = bokeh.layouts.Row(allocation_axis, elongation_axis)
layout = bokeh.layouts.Column(sliders, row1)
bokeh.io.save(layout)
|
[
"numpy.ones_like",
"numpy.log10",
"numpy.arange",
"pandas.read_csv"
] |
[((450, 477), 'numpy.arange', 'np.arange', (['(0.001)', '(50)', '(0.001)'], {}), '(0.001, 50, 0.001)\n', (459, 477), True, 'import numpy as np\n'), ((532, 619), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv"""'], {}), "(\n '../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv')\n", (543, 619), True, 'import pandas as pd\n'), ((623, 710), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv"""'], {}), "(\n '../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv')\n", (634, 710), True, 'import pandas as pd\n'), ((1563, 1583), 'numpy.ones_like', 'np.ones_like', (['nu_max'], {}), '(nu_max)\n', (1575, 1583), True, 'import numpy as np\n'), ((3525, 3541), 'numpy.log10', 'np.log10', (['Kd_cpc'], {}), '(Kd_cpc)\n', (3533, 3541), True, 'import numpy as np\n')]
|
from helpers import poseRt
from frame import Frame
import time
import numpy as np
import g2o
import json
LOCAL_WINDOW = 20
#LOCAL_WINDOW = None
class Point(object):
# A Point is a 3-D point in the world
# Each Point is observed in multiple Frames
def __init__(self, mapp, loc, color, tid=None):
self.pt = np.array(loc)
self.frames = []
self.idxs = []
self.color = np.copy(color)
self.id = tid if tid is not None else mapp.add_point(self)
def homogeneous(self):
return np.array([self.pt[0], self.pt[1], self.pt[2], 1.0])
def orb(self):
return [f.des[idx] for f,idx in zip(self.frames, self.idxs)]
def delete(self):
for f,idx in zip(self.frames, self.idxs):
f.pts[idx] = None
del self
def add_observation(self, frame, idx):
frame.pts[idx] = self
self.frames.append(frame)
self.idxs.append(idx)
class Map(object):
def __init__(self):
self.frames = []
self.points = []
self.max_frame = 0
self.max_point = 0
def serialize(self):
ret = {}
ret['points'] = [{'id': p.id, 'pt': p.pt.tolist(), 'color': p.color.tolist()} for p in self.points]
ret['frames'] = []
for f in self.frames:
ret['frames'].append({
'id': f.id, 'K': f.K.tolist(), 'pose': f.pose.tolist(), 'h': f.h, 'w': f.w,
'kpus': f.kpus.tolist(), 'des': f.des.tolist(),
'pts': [p.id if p is not None else -1 for p in f.pts]})
ret['max_frame'] = self.max_frame
ret['max_point'] = self.max_point
return json.dumps(ret)
def deserialize(self, s):
ret = json.loads(s)
self.max_frame = ret['max_frame']
self.max_point = ret['max_point']
self.points = []
self.frames = []
pids = {}
for p in ret['points']:
pp = Point(self, p['pt'], p['color'], p['id'])
self.points.append(pp)
pids[p['id']] = pp
for f in ret['frames']:
ff = Frame(self, None, f['K'], f['pose'], f['id'])
ff.w, ff.h = f['w'], f['h']
ff.kpus = np.array(f['kpus'])
ff.des = np.array(f['des'])
ff.pts = [None] * len(ff.kpus)
for i,p in enumerate(f['pts']):
if p != -1:
ff.pts[i] = pids[p]
self.frames.append(ff)
def add_point(self, point):
ret = self.max_point
self.max_point += 1
self.points.append(point)
return ret
def add_frame(self, frame):
ret = self.max_frame
self.max_frame += 1
self.frames.append(frame)
return ret
# *** optimizer ***
def optimize(self, local_window=LOCAL_WINDOW, fix_points=False, verbose=False):
# create g2o optimizer
opt = g2o.SparseOptimizer()
solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())
solver = g2o.OptimizationAlgorithmLevenberg(solver)
opt.set_algorithm(solver)
robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))
if local_window is None:
local_frames = self.frames
else:
local_frames = self.frames[-local_window:]
# add frames to graph
for f in self.frames:
pose = np.linalg.inv(f.pose)
sbacam = g2o.SBACam(g2o.SE3Quat(pose[0:3, 0:3], pose[0:3, 3]))
sbacam.set_cam(f.K[0][0], f.K[1][1], f.K[0][2], f.K[1][2], 1.0)
v_se3 = g2o.VertexCam()
v_se3.set_id(f.id)
v_se3.set_estimate(sbacam)
v_se3.set_fixed(f.id <= 1 or f not in local_frames)
opt.add_vertex(v_se3)
# add points to frames
PT_ID_OFFSET = 0x10000
for p in self.points:
if not any([f in local_frames for f in p.frames]):
continue
pt = g2o.VertexSBAPointXYZ()
pt.set_id(p.id + PT_ID_OFFSET)
pt.set_estimate(p.pt[0:3])
pt.set_marginalized(True)
pt.set_fixed(fix_points)
opt.add_vertex(pt)
for f,idx in zip(p.frames, p.idxs):
edge = g2o.EdgeProjectP2MC()
edge.set_vertex(0, pt)
edge.set_vertex(1, opt.vertex(f.id))
uv = f.kpus[idx]
edge.set_measurement(uv)
edge.set_information(np.eye(2))
edge.set_robust_kernel(robust_kernel)
opt.add_edge(edge)
if verbose:
opt.set_verbose(True)
opt.initialize_optimization()
opt.optimize(20)
# put frames back
for f in self.frames:
est = opt.vertex(f.id).estimate()
R = est.rotation().matrix()
t = est.translation()
f.pose = np.linalg.inv(poseRt(R, t))
# put points back (and cull)
if not fix_points:
new_points = []
for p in self.points:
vert = opt.vertex(p.id + PT_ID_OFFSET)
if vert is None:
new_points.append(p)
continue
est = vert.estimate()
# <= 3 match point that's old
old_point = len(p.frames) <= 3 and p.frames[-1] not in local_frames
# compute reprojection error
errs = []
for f,idx in zip(p.frames, p.idxs):
uv = f.kpus[idx]
proj = np.dot(np.dot(f.K, f.pose[:3]),
np.array([est[0], est[1], est[2], 1.0]))
proj = proj[0:2] / proj[2]
errs.append(np.linalg.norm(proj-uv))
# cull
if old_point or np.mean(errs) > 5:
p.delete()
continue
p.pt = np.array(est)
new_points.append(p)
print("Culled: %d points" % (len(self.points) - len(new_points)))
self.points = new_points
return opt.active_chi2()
|
[
"numpy.sqrt",
"numpy.array",
"g2o.VertexCam",
"numpy.linalg.norm",
"numpy.mean",
"g2o.SparseOptimizer",
"json.dumps",
"numpy.dot",
"g2o.LinearSolverCholmodSE3",
"frame.Frame",
"g2o.OptimizationAlgorithmLevenberg",
"g2o.EdgeProjectP2MC",
"helpers.poseRt",
"json.loads",
"numpy.eye",
"g2o.VertexSBAPointXYZ",
"numpy.copy",
"numpy.linalg.inv",
"g2o.SE3Quat"
] |
[((318, 331), 'numpy.array', 'np.array', (['loc'], {}), '(loc)\n', (326, 331), True, 'import numpy as np\n'), ((389, 403), 'numpy.copy', 'np.copy', (['color'], {}), '(color)\n', (396, 403), True, 'import numpy as np\n'), ((504, 555), 'numpy.array', 'np.array', (['[self.pt[0], self.pt[1], self.pt[2], 1.0]'], {}), '([self.pt[0], self.pt[1], self.pt[2], 1.0])\n', (512, 555), True, 'import numpy as np\n'), ((1510, 1525), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (1520, 1525), False, 'import json\n'), ((1565, 1578), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1575, 1578), False, 'import json\n'), ((2584, 2605), 'g2o.SparseOptimizer', 'g2o.SparseOptimizer', ([], {}), '()\n', (2603, 2605), False, 'import g2o\n'), ((2681, 2723), 'g2o.OptimizationAlgorithmLevenberg', 'g2o.OptimizationAlgorithmLevenberg', (['solver'], {}), '(solver)\n', (2715, 2723), False, 'import g2o\n'), ((1887, 1932), 'frame.Frame', 'Frame', (['self', 'None', "f['K']", "f['pose']", "f['id']"], {}), "(self, None, f['K'], f['pose'], f['id'])\n", (1892, 1932), False, 'from frame import Frame\n'), ((1983, 2002), 'numpy.array', 'np.array', (["f['kpus']"], {}), "(f['kpus'])\n", (1991, 2002), True, 'import numpy as np\n'), ((2018, 2036), 'numpy.array', 'np.array', (["f['des']"], {}), "(f['des'])\n", (2026, 2036), True, 'import numpy as np\n'), ((2638, 2666), 'g2o.LinearSolverCholmodSE3', 'g2o.LinearSolverCholmodSE3', ([], {}), '()\n', (2664, 2666), False, 'import g2o\n'), ((2797, 2811), 'numpy.sqrt', 'np.sqrt', (['(5.991)'], {}), '(5.991)\n', (2804, 2811), True, 'import numpy as np\n'), ((3001, 3022), 'numpy.linalg.inv', 'np.linalg.inv', (['f.pose'], {}), '(f.pose)\n', (3014, 3022), True, 'import numpy as np\n'), ((3177, 3192), 'g2o.VertexCam', 'g2o.VertexCam', ([], {}), '()\n', (3190, 3192), False, 'import g2o\n'), ((3504, 3527), 'g2o.VertexSBAPointXYZ', 'g2o.VertexSBAPointXYZ', ([], {}), '()\n', (3525, 3527), False, 'import g2o\n'), ((3049, 3090), 'g2o.SE3Quat', 'g2o.SE3Quat', (['pose[0:3, 0:3]', 'pose[0:3, 3]'], {}), '(pose[0:3, 0:3], pose[0:3, 3])\n', (3060, 3090), False, 'import g2o\n'), ((3744, 3765), 'g2o.EdgeProjectP2MC', 'g2o.EdgeProjectP2MC', ([], {}), '()\n', (3763, 3765), False, 'import g2o\n'), ((4301, 4313), 'helpers.poseRt', 'poseRt', (['R', 't'], {}), '(R, t)\n', (4307, 4313), False, 'from helpers import poseRt\n'), ((5129, 5142), 'numpy.array', 'np.array', (['est'], {}), '(est)\n', (5137, 5142), True, 'import numpy as np\n'), ((3929, 3938), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3935, 3938), True, 'import numpy as np\n'), ((4840, 4863), 'numpy.dot', 'np.dot', (['f.K', 'f.pose[:3]'], {}), '(f.K, f.pose[:3])\n', (4846, 4863), True, 'import numpy as np\n'), ((4889, 4928), 'numpy.array', 'np.array', (['[est[0], est[1], est[2], 1.0]'], {}), '([est[0], est[1], est[2], 1.0])\n', (4897, 4928), True, 'import numpy as np\n'), ((4989, 5014), 'numpy.linalg.norm', 'np.linalg.norm', (['(proj - uv)'], {}), '(proj - uv)\n', (5003, 5014), True, 'import numpy as np\n'), ((5054, 5067), 'numpy.mean', 'np.mean', (['errs'], {}), '(errs)\n', (5061, 5067), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import nltk
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
#from nltk import pos_tag
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
style.use('fivethirtyeight')
# Process text
raw_text = open("news_article.txt").read()
token_text = word_tokenize(raw_text)
def stanford_tagger(token_text):
st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz',
'stanford-ner.jar')
ne_tagged = st.tag(token_text)
return ne_tagged
def nltk_tagger(token_text):
tagged_words = nltk.pos_tag(token_text)
ne_tagged = nltk.ne_chunk(tagged_words)
return ne_tagged
def stanford_main():
print (stanford_tagger(token_text))
def nltk_main():
print (nltk_tagger(token_text))
def time_plot(stanford_total_time, nltk_total_time):
N = 1
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
stanford_total_time = stanford_total_time
nltk_total_time = nltk_total_time
fig, ax = plt.subplots()
rects1 = ax.bar(ind, stanford_total_time, width, color='r')
rects2 = ax.bar(ind+width, nltk_total_time, width, color='y')
# Add text for labels, title and axes ticks
ax.set_xlabel('Classifier')
ax.set_ylabel('Time (in seconds)')
ax.set_title('Speed by NER Classifier')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('') )
ax.legend( (rects1[0], rects2[0]), ('Stanford', 'NLTK'), bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
def autolabel(rects):
#attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.02*height, '%10.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
if __name__ == '__main__':
stanford_t0 = os.times()[4]
stanford_main()
stanford_t1 = os.times()[4]
stanford_total_time = stanford_t1 - stanford_t0
nltk_t0 = os.times()[4]
nltk_main()
nltk_t1 = os.times()[4]
nltk_total_time = nltk_t1 - nltk_t0
time_plot(stanford_total_time, nltk_total_time)
|
[
"nltk.pos_tag",
"os.times",
"nltk.ne_chunk",
"nltk.tokenize.word_tokenize",
"matplotlib.style.use",
"nltk.tag.StanfordNERTagger",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((233, 261), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (242, 261), False, 'from matplotlib import style\n'), ((336, 359), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['raw_text'], {}), '(raw_text)\n', (349, 359), False, 'from nltk.tokenize import word_tokenize\n'), ((403, 481), 'nltk.tag.StanfordNERTagger', 'StanfordNERTagger', (['"""english.all.3class.distsim.crf.ser.gz"""', '"""stanford-ner.jar"""'], {}), "('english.all.3class.distsim.crf.ser.gz', 'stanford-ner.jar')\n", (420, 481), False, 'from nltk.tag import StanfordNERTagger\n'), ((611, 635), 'nltk.pos_tag', 'nltk.pos_tag', (['token_text'], {}), '(token_text)\n', (623, 635), False, 'import nltk\n'), ((649, 676), 'nltk.ne_chunk', 'nltk.ne_chunk', (['tagged_words'], {}), '(tagged_words)\n', (662, 676), False, 'import nltk\n'), ((886, 898), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (895, 898), True, 'import numpy as np\n'), ((1081, 1095), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1093, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1900, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1960), 'os.times', 'os.times', ([], {}), '()\n', (1958, 1960), False, 'import os\n'), ((1996, 2006), 'os.times', 'os.times', ([], {}), '()\n', (2004, 2006), False, 'import os\n'), ((2072, 2082), 'os.times', 'os.times', ([], {}), '()\n', (2080, 2082), False, 'import os\n'), ((2110, 2120), 'os.times', 'os.times', ([], {}), '()\n', (2118, 2120), False, 'import os\n')]
|
import sys
sys.path.append('../')
sys.path.append('../..')
import cmbnncs.utils as utils
import cmbnncs.spherical as spherical
import cmbnncs.simulator as simulator
import numpy as np
import time
start_time = time.time()
def sim_Dust(dust_seed, frequ, amplitude_randn, spectralIndex_randn, temp_randn):
### ComDust = simulator.DustComponents(nside, 3)
ComDust = simulator.DustComponents(nside, 1)#use this
## ComDust.ReadParameter('paramsML.ini')#don't use
#ParametersSampling() don't use when using model 3 in DustComponents(nside, 2)
ComDust.ParametersSampling()
print (ComDust.paramsample, '\n')
ComDust.RealizationSampling( seed = int(dust_seed), amplitude_randn=amplitude_randn,
spectralIndex_randn=spectralIndex_randn, temp_randn=temp_randn)
ComDust.WriteMap(frequencies = frequ)
out_put = ComDust.out_put
return out_put
#%% generate the Dust full map - training (test) data
nside = 512
# temp_randn = '0'
temp_randn = '0.05Multi'
# amplitude_randn = '0'; spectralIndex_randn = '0' #training set: 1000 #
amplitude_randn = '0'; spectralIndex_randn = '0.1One' #training set: 1000 ##
# amplitude_randn = '0'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0.1One' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0.1One' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
part_n = 0 #0,1,...
part_size = 1000
frequencies = [100, 143, 217, 353] #for Planck
# frequencies = [85, 95, 145, 155, 220, 270] #for CMB-S4
print ('dust_freqs: %s'%frequencies, 'part_n: %s'%part_n, 'part_size: %s'%part_size, 'start_n: %s'%(part_n*part_size))
np.random.seed(2)#note!!!
Dustseed = np.random.choice(1000000, 50000, replace=False)
for i in range(part_size):
for freq in frequencies:
map_I, map_Q, map_U = sim_Dust(Dustseed[i+part_n*part_size], [freq], amplitude_randn,
spectralIndex_randn, temp_randn=temp_randn)
map_I_piece = spherical.sphere2piecePlane(map_I, nside=nside)
map_Q_piece = spherical.sphere2piecePlane(map_Q, nside=nside)
map_U_piece = spherical.sphere2piecePlane(map_U, nside=nside)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_I_piece, dtype=np.float32)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_Q_piece, dtype=np.float32)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_U_piece, dtype=np.float32)
#%%
print ('\n', "Time elapsed: %.3f" %((time.time()-start_time)/60), "mins")
|
[
"numpy.random.choice",
"cmbnncs.simulator.DustComponents",
"cmbnncs.utils.savenpy",
"numpy.random.seed",
"cmbnncs.spherical.sphere2piecePlane",
"time.time",
"sys.path.append"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((34, 58), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (49, 58), False, 'import sys\n'), ((209, 220), 'time.time', 'time.time', ([], {}), '()\n', (218, 220), False, 'import time\n'), ((2007, 2024), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (2021, 2024), True, 'import numpy as np\n'), ((2044, 2091), 'numpy.random.choice', 'np.random.choice', (['(1000000)', '(50000)'], {'replace': '(False)'}), '(1000000, 50000, replace=False)\n', (2060, 2091), True, 'import numpy as np\n'), ((371, 405), 'cmbnncs.simulator.DustComponents', 'simulator.DustComponents', (['nside', '(1)'], {}), '(nside, 1)\n', (395, 405), True, 'import cmbnncs.simulator as simulator\n'), ((2357, 2404), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_I'], {'nside': 'nside'}), '(map_I, nside=nside)\n', (2384, 2404), True, 'import cmbnncs.spherical as spherical\n'), ((2427, 2474), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_Q'], {'nside': 'nside'}), '(map_Q, nside=nside)\n', (2454, 2474), True, 'import cmbnncs.spherical as spherical\n'), ((2497, 2544), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_U'], {'nside': 'nside'}), '(map_U, nside=nside)\n', (2524, 2544), True, 'import cmbnncs.spherical as spherical\n'), ((2562, 2807), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_I_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_I_piece, dtype=np.float32)\n", (2575, 2807), True, 'import cmbnncs.utils as utils\n'), ((2811, 3056), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_Q_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_Q_piece, dtype=np.float32)\n", (2824, 3056), True, 'import cmbnncs.utils as utils\n'), ((3060, 3305), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_U_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_U_piece, dtype=np.float32)\n", (3073, 3305), True, 'import cmbnncs.utils as utils\n'), ((3344, 3355), 'time.time', 'time.time', ([], {}), '()\n', (3353, 3355), False, 'import time\n')]
|
from .datafetcher import fetch_measure_levels
from .stationdata import build_station_list, update_water_levels
from .flood import stations_highest_rel_level
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from floodsystem.station import inconsistent_typical_range_stations
import datetime
stations = build_station_list()
def run():
stations = build_station_list()
update_water_levels(stations)
station = stations_highest_rel_level(stations, 6)
stations_high_risk_level = []
for n in station:
stations_high_risk_level.append(n[0])
return stations_high_risk_level
stations_at_risk = run()
stations_at_risk.pop(0)
y = inconsistent_typical_range_stations(stations)
print(y)
update_water_levels(stations)
def plot_water_levels(station, dates, levels):
typical_range_high = []
typical_range_low = []
for i in range(len(dates)):
typical_range_high.append(typical_range[0])
typical_range_low.append(typical_range[1])
plt.plot(dates , levels , label="water level")
plt.xlabel("data")
plt.ylabel("water level (m)")
plt.xticks(rotation=45);
plt.title(station)
plt.tight_layout()
plt.plot(dates , typical_range_high , "-y" , label="typical high")
plt.plot(dates , typical_range_low , "-o" , label="typical low")
plt.legend()
plt.show()
counter = 0
for i in stations:
if i.name in stations_at_risk:
dt = 10
dates, levels = fetch_measure_levels(i.measure_id , dt = datetime.timedelta(days=dt))
typical_range = i.typical_range
plot_water_levels(i.name , dates , levels)
counter = counter + 1
if counter > 5:
raise RuntimeError("All of the 5 stations have displayed the outputs")
def plot_water_level_with_fit(station, dates, levels, p):
x = dates
y = levels
p_coeff = np.polyfit(x , y , p)
poly = np.poly1d(p_coeff)
plt.plot(x , y , '.')
plt.xlabel("time")
plt.ylabel("water level")
plt.xticks(rotation=45);
plt.title(station)
x1 = np.linspace(x[0] , x[-1] , 30)
plt.plot(x1 , poly(x1))
plt.show()
return poly
|
[
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"datetime.timedelta",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"numpy.poly1d",
"matplotlib.pyplot.title",
"floodsystem.station.inconsistent_typical_range_stations",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((714, 759), 'floodsystem.station.inconsistent_typical_range_stations', 'inconsistent_typical_range_stations', (['stations'], {}), '(stations)\n', (749, 759), False, 'from floodsystem.station import inconsistent_typical_range_stations\n'), ((1054, 1098), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {'label': '"""water level"""'}), "(dates, levels, label='water level')\n", (1062, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data"""'], {}), "('data')\n", (1116, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level (m)"""'], {}), "('water level (m)')\n", (1139, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1163, 1186), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1173, 1186), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1210), 'matplotlib.pyplot.title', 'plt.title', (['station'], {}), '(station)\n', (1201, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1233), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1306), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'typical_range_high', '"""-y"""'], {'label': '"""typical high"""'}), "(dates, typical_range_high, '-y', label='typical high')\n", (1251, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1375), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'typical_range_low', '"""-o"""'], {'label': '"""typical low"""'}), "(dates, typical_range_low, '-o', label='typical low')\n", (1322, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1388, 1400), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1398, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1944), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'p'], {}), '(x, y, p)\n', (1935, 1944), True, 'import numpy as np\n'), ((1958, 1976), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (1967, 1976), True, 'import numpy as np\n'), ((1981, 2000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (1989, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2025), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2017, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level"""'], {}), "('water level')\n", (2040, 2055), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2083), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2070, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2107), 'matplotlib.pyplot.title', 'plt.title', (['station'], {}), '(station)\n', (2098, 2107), True, 'import matplotlib.pyplot as plt\n'), ((2117, 2145), 'numpy.linspace', 'np.linspace', (['x[0]', 'x[-1]', '(30)'], {}), '(x[0], x[-1], 30)\n', (2128, 2145), True, 'import numpy as np\n'), ((2180, 2190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2188, 2190), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1591), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'dt'}), '(days=dt)\n', (1582, 1591), False, 'import datetime\n')]
|
import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion.hamiltonians import MolecularData
ref_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_ref_files")
table_1 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.68238953],
[0.0, 1.0, 1.0, 0.0, 0.68238953],
[1.0, 0.0, 0.0, 1.0, 0.68238953],
[1.0, 1.0, 1.0, 1.0, 0.68238953],
[0.0, 0.0, 2.0, 2.0, 0.17900058],
[0.0, 1.0, 3.0, 2.0, 0.17900058],
[1.0, 0.0, 2.0, 3.0, 0.17900058],
[1.0, 1.0, 3.0, 3.0, 0.17900058],
[0.0, 2.0, 0.0, 2.0, 0.17900058],
[0.0, 3.0, 1.0, 2.0, 0.17900058],
[1.0, 2.0, 0.0, 3.0, 0.17900058],
[1.0, 3.0, 1.0, 3.0, 0.17900058],
[0.0, 2.0, 2.0, 0.0, 0.67073278],
[0.0, 3.0, 3.0, 0.0, 0.67073278],
[1.0, 2.0, 2.0, 1.0, 0.67073278],
[1.0, 3.0, 3.0, 1.0, 0.67073278],
[2.0, 0.0, 0.0, 2.0, 0.67073278],
[2.0, 1.0, 1.0, 2.0, 0.67073278],
[3.0, 0.0, 0.0, 3.0, 0.67073278],
[3.0, 1.0, 1.0, 3.0, 0.67073278],
[2.0, 0.0, 2.0, 0.0, 0.17900058],
[2.0, 1.0, 3.0, 0.0, 0.17900058],
[3.0, 0.0, 2.0, 1.0, 0.17900058],
[3.0, 1.0, 3.0, 1.0, 0.17900058],
[2.0, 2.0, 0.0, 0.0, 0.17900058],
[2.0, 3.0, 1.0, 0.0, 0.17900058],
[3.0, 2.0, 0.0, 1.0, 0.17900058],
[3.0, 3.0, 1.0, 1.0, 0.17900058],
[2.0, 2.0, 2.0, 2.0, 0.70510563],
[2.0, 3.0, 3.0, 2.0, 0.70510563],
[3.0, 2.0, 2.0, 3.0, 0.70510563],
[3.0, 3.0, 3.0, 3.0, 0.70510563],
]
)
table_2 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.70510563],
[0.0, 1.0, 1.0, 0.0, 0.70510563],
[1.0, 0.0, 0.0, 1.0, 0.70510563],
[1.0, 1.0, 1.0, 1.0, 0.70510563],
]
)
table_3 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.48731097],
[0.0, 1.0, 1.0, 0.0, 0.48731097],
[1.0, 0.0, 0.0, 1.0, 0.48731097],
[1.0, 1.0, 1.0, 1.0, 0.48731097],
[0.0, 0.0, 0.0, 2.0, -0.04857958],
[0.0, 1.0, 1.0, 2.0, -0.04857958],
[1.0, 0.0, 0.0, 3.0, -0.04857958],
[1.0, 1.0, 1.0, 3.0, -0.04857958],
[0.0, 0.0, 2.0, 0.0, -0.04857958],
[0.0, 1.0, 3.0, 0.0, -0.04857958],
[1.0, 0.0, 2.0, 1.0, -0.04857958],
[1.0, 1.0, 3.0, 1.0, -0.04857958],
[0.0, 0.0, 2.0, 2.0, 0.01306398],
[0.0, 1.0, 3.0, 2.0, 0.01306398],
[1.0, 0.0, 2.0, 3.0, 0.01306398],
[1.0, 1.0, 3.0, 3.0, 0.01306398],
[0.0, 2.0, 0.0, 0.0, -0.04857958],
[0.0, 3.0, 1.0, 0.0, -0.04857958],
[1.0, 2.0, 0.0, 1.0, -0.04857958],
[1.0, 3.0, 1.0, 1.0, -0.04857958],
[0.0, 2.0, 0.0, 2.0, 0.01306398],
[0.0, 3.0, 1.0, 2.0, 0.01306398],
[1.0, 2.0, 0.0, 3.0, 0.01306398],
[1.0, 3.0, 1.0, 3.0, 0.01306398],
[0.0, 2.0, 2.0, 0.0, 0.22361004],
[0.0, 3.0, 3.0, 0.0, 0.22361004],
[1.0, 2.0, 2.0, 1.0, 0.22361004],
[1.0, 3.0, 3.0, 1.0, 0.22361004],
[0.0, 2.0, 2.0, 2.0, 0.00748417],
[0.0, 3.0, 3.0, 2.0, 0.00748417],
[1.0, 2.0, 2.0, 3.0, 0.00748417],
[1.0, 3.0, 3.0, 3.0, 0.00748417],
[2.0, 0.0, 0.0, 0.0, -0.04857958],
[2.0, 1.0, 1.0, 0.0, -0.04857958],
[3.0, 0.0, 0.0, 1.0, -0.04857958],
[3.0, 1.0, 1.0, 1.0, -0.04857958],
[2.0, 0.0, 0.0, 2.0, 0.22361004],
[2.0, 1.0, 1.0, 2.0, 0.22361004],
[3.0, 0.0, 0.0, 3.0, 0.22361004],
[3.0, 1.0, 1.0, 3.0, 0.22361004],
[2.0, 0.0, 2.0, 0.0, 0.01306398],
[2.0, 1.0, 3.0, 0.0, 0.01306398],
[3.0, 0.0, 2.0, 1.0, 0.01306398],
[3.0, 1.0, 3.0, 1.0, 0.01306398],
[2.0, 0.0, 2.0, 2.0, 0.00748417],
[2.0, 1.0, 3.0, 2.0, 0.00748417],
[3.0, 0.0, 2.0, 3.0, 0.00748417],
[3.0, 1.0, 3.0, 3.0, 0.00748417],
[2.0, 2.0, 0.0, 0.0, 0.01306398],
[2.0, 3.0, 1.0, 0.0, 0.01306398],
[3.0, 2.0, 0.0, 1.0, 0.01306398],
[3.0, 3.0, 1.0, 1.0, 0.01306398],
[2.0, 2.0, 0.0, 2.0, 0.00748417],
[2.0, 3.0, 1.0, 2.0, 0.00748417],
[3.0, 2.0, 0.0, 3.0, 0.00748417],
[3.0, 3.0, 1.0, 3.0, 0.00748417],
[2.0, 2.0, 2.0, 0.0, 0.00748417],
[2.0, 3.0, 3.0, 0.0, 0.00748417],
[3.0, 2.0, 2.0, 1.0, 0.00748417],
[3.0, 3.0, 3.0, 1.0, 0.00748417],
[2.0, 2.0, 2.0, 2.0, 0.33788228],
[2.0, 3.0, 3.0, 2.0, 0.33788228],
[3.0, 2.0, 2.0, 3.0, 0.33788228],
[3.0, 3.0, 3.0, 3.0, 0.33788228],
]
)
@pytest.mark.parametrize(
("name", "core", "active", "table_exp", "v_core_exp"),
[
("h2_pyscf", None, None, table_1, 0),
("h2_pyscf", [0], None, table_2, 0.6823895331520422),
("h2_pyscf", None, [0, 1], table_1, 0),
("h2_pyscf", [0], [1], table_2, 0.6823895331520422),
("lih", [0], [1, 2], table_3, 1.6585666870874103),
],
)
def test_table_two_particle(name, core, active, table_exp, v_core_exp, tol):
r"""Test the table of two-particle matrix elements and the contribution of core orbitals
as implemented in the `two_particle` function of the `obs` module"""
hf_data = MolecularData(filename=os.path.join(ref_dir, name))
table, v_core = qchem.two_particle(hf_data.two_body_integrals, core=core, active=active)
assert np.allclose(table, table_exp, **tol)
assert np.allclose(v_core, v_core_exp, **tol)
v_me_1D = np.array([1, 2, 3, 4])
v_me_4D = np.full((2, 2, 2, 2), 0.5)
@pytest.mark.parametrize(
("v_me", "core", "active", "msg_match"),
[
(v_me_1D, [0], None, "'matrix_elements' must be a 4D array"),
(v_me_4D, [-1, 0, 1, 2], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, [0, 1, 2, 3], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, None, [-1, 0], "Indices of active orbitals must be between 0 and"),
(v_me_4D, None, [2, 6], "Indices of active orbitals must be between 0 and"),
],
)
def test_exceptions_two_particle(v_me, core, active, msg_match):
"""Test that the function `'two_particle'` throws an exception
if the dimension of the matrix elements array is not a 4D array or
if the indices of core and/or active orbitals are out of range."""
with pytest.raises(ValueError, match=msg_match):
qchem.two_particle(v_me, core=core, active=active)
|
[
"numpy.allclose",
"pennylane.qchem.two_particle",
"os.path.join",
"os.path.realpath",
"numpy.array",
"pytest.mark.parametrize",
"pytest.raises",
"numpy.full"
] |
[((222, 1390), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.68238953], [0.0, 1.0, 1.0, 0.0, 0.68238953], [1.0, \n 0.0, 0.0, 1.0, 0.68238953], [1.0, 1.0, 1.0, 1.0, 0.68238953], [0.0, 0.0,\n 2.0, 2.0, 0.17900058], [0.0, 1.0, 3.0, 2.0, 0.17900058], [1.0, 0.0, 2.0,\n 3.0, 0.17900058], [1.0, 1.0, 3.0, 3.0, 0.17900058], [0.0, 2.0, 0.0, 2.0,\n 0.17900058], [0.0, 3.0, 1.0, 2.0, 0.17900058], [1.0, 2.0, 0.0, 3.0, \n 0.17900058], [1.0, 3.0, 1.0, 3.0, 0.17900058], [0.0, 2.0, 2.0, 0.0, \n 0.67073278], [0.0, 3.0, 3.0, 0.0, 0.67073278], [1.0, 2.0, 2.0, 1.0, \n 0.67073278], [1.0, 3.0, 3.0, 1.0, 0.67073278], [2.0, 0.0, 0.0, 2.0, \n 0.67073278], [2.0, 1.0, 1.0, 2.0, 0.67073278], [3.0, 0.0, 0.0, 3.0, \n 0.67073278], [3.0, 1.0, 1.0, 3.0, 0.67073278], [2.0, 0.0, 2.0, 0.0, \n 0.17900058], [2.0, 1.0, 3.0, 0.0, 0.17900058], [3.0, 0.0, 2.0, 1.0, \n 0.17900058], [3.0, 1.0, 3.0, 1.0, 0.17900058], [2.0, 2.0, 0.0, 0.0, \n 0.17900058], [2.0, 3.0, 1.0, 0.0, 0.17900058], [3.0, 2.0, 0.0, 1.0, \n 0.17900058], [3.0, 3.0, 1.0, 1.0, 0.17900058], [2.0, 2.0, 2.0, 2.0, \n 0.70510563], [2.0, 3.0, 3.0, 2.0, 0.70510563], [3.0, 2.0, 2.0, 3.0, \n 0.70510563], [3.0, 3.0, 3.0, 3.0, 0.70510563]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.68238953], [0.0, 1.0, 1.0, 0.0, 0.68238953\n ], [1.0, 0.0, 0.0, 1.0, 0.68238953], [1.0, 1.0, 1.0, 1.0, 0.68238953],\n [0.0, 0.0, 2.0, 2.0, 0.17900058], [0.0, 1.0, 3.0, 2.0, 0.17900058], [\n 1.0, 0.0, 2.0, 3.0, 0.17900058], [1.0, 1.0, 3.0, 3.0, 0.17900058], [0.0,\n 2.0, 0.0, 2.0, 0.17900058], [0.0, 3.0, 1.0, 2.0, 0.17900058], [1.0, 2.0,\n 0.0, 3.0, 0.17900058], [1.0, 3.0, 1.0, 3.0, 0.17900058], [0.0, 2.0, 2.0,\n 0.0, 0.67073278], [0.0, 3.0, 3.0, 0.0, 0.67073278], [1.0, 2.0, 2.0, 1.0,\n 0.67073278], [1.0, 3.0, 3.0, 1.0, 0.67073278], [2.0, 0.0, 0.0, 2.0, \n 0.67073278], [2.0, 1.0, 1.0, 2.0, 0.67073278], [3.0, 0.0, 0.0, 3.0, \n 0.67073278], [3.0, 1.0, 1.0, 3.0, 0.67073278], [2.0, 0.0, 2.0, 0.0, \n 0.17900058], [2.0, 1.0, 3.0, 0.0, 0.17900058], [3.0, 0.0, 2.0, 1.0, \n 0.17900058], [3.0, 1.0, 3.0, 1.0, 0.17900058], [2.0, 2.0, 0.0, 0.0, \n 0.17900058], [2.0, 3.0, 1.0, 0.0, 0.17900058], [3.0, 2.0, 0.0, 1.0, \n 0.17900058], [3.0, 3.0, 1.0, 1.0, 0.17900058], [2.0, 2.0, 2.0, 2.0, \n 0.70510563], [2.0, 3.0, 3.0, 2.0, 0.70510563], [3.0, 2.0, 2.0, 3.0, \n 0.70510563], [3.0, 3.0, 3.0, 3.0, 0.70510563]])\n', (230, 1390), True, 'import numpy as np\n'), ((1601, 1752), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.70510563], [0.0, 1.0, 1.0, 0.0, 0.70510563], [1.0, \n 0.0, 0.0, 1.0, 0.70510563], [1.0, 1.0, 1.0, 1.0, 0.70510563]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.70510563], [0.0, 1.0, 1.0, 0.0, 0.70510563\n ], [1.0, 0.0, 0.0, 1.0, 0.70510563], [1.0, 1.0, 1.0, 1.0, 0.70510563]])\n', (1609, 1752), True, 'import numpy as np\n'), ((1804, 4156), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.48731097], [0.0, 1.0, 1.0, 0.0, 0.48731097], [1.0, \n 0.0, 0.0, 1.0, 0.48731097], [1.0, 1.0, 1.0, 1.0, 0.48731097], [0.0, 0.0,\n 0.0, 2.0, -0.04857958], [0.0, 1.0, 1.0, 2.0, -0.04857958], [1.0, 0.0, \n 0.0, 3.0, -0.04857958], [1.0, 1.0, 1.0, 3.0, -0.04857958], [0.0, 0.0, \n 2.0, 0.0, -0.04857958], [0.0, 1.0, 3.0, 0.0, -0.04857958], [1.0, 0.0, \n 2.0, 1.0, -0.04857958], [1.0, 1.0, 3.0, 1.0, -0.04857958], [0.0, 0.0, \n 2.0, 2.0, 0.01306398], [0.0, 1.0, 3.0, 2.0, 0.01306398], [1.0, 0.0, 2.0,\n 3.0, 0.01306398], [1.0, 1.0, 3.0, 3.0, 0.01306398], [0.0, 2.0, 0.0, 0.0,\n -0.04857958], [0.0, 3.0, 1.0, 0.0, -0.04857958], [1.0, 2.0, 0.0, 1.0, -\n 0.04857958], [1.0, 3.0, 1.0, 1.0, -0.04857958], [0.0, 2.0, 0.0, 2.0, \n 0.01306398], [0.0, 3.0, 1.0, 2.0, 0.01306398], [1.0, 2.0, 0.0, 3.0, \n 0.01306398], [1.0, 3.0, 1.0, 3.0, 0.01306398], [0.0, 2.0, 2.0, 0.0, \n 0.22361004], [0.0, 3.0, 3.0, 0.0, 0.22361004], [1.0, 2.0, 2.0, 1.0, \n 0.22361004], [1.0, 3.0, 3.0, 1.0, 0.22361004], [0.0, 2.0, 2.0, 2.0, \n 0.00748417], [0.0, 3.0, 3.0, 2.0, 0.00748417], [1.0, 2.0, 2.0, 3.0, \n 0.00748417], [1.0, 3.0, 3.0, 3.0, 0.00748417], [2.0, 0.0, 0.0, 0.0, -\n 0.04857958], [2.0, 1.0, 1.0, 0.0, -0.04857958], [3.0, 0.0, 0.0, 1.0, -\n 0.04857958], [3.0, 1.0, 1.0, 1.0, -0.04857958], [2.0, 0.0, 0.0, 2.0, \n 0.22361004], [2.0, 1.0, 1.0, 2.0, 0.22361004], [3.0, 0.0, 0.0, 3.0, \n 0.22361004], [3.0, 1.0, 1.0, 3.0, 0.22361004], [2.0, 0.0, 2.0, 0.0, \n 0.01306398], [2.0, 1.0, 3.0, 0.0, 0.01306398], [3.0, 0.0, 2.0, 1.0, \n 0.01306398], [3.0, 1.0, 3.0, 1.0, 0.01306398], [2.0, 0.0, 2.0, 2.0, \n 0.00748417], [2.0, 1.0, 3.0, 2.0, 0.00748417], [3.0, 0.0, 2.0, 3.0, \n 0.00748417], [3.0, 1.0, 3.0, 3.0, 0.00748417], [2.0, 2.0, 0.0, 0.0, \n 0.01306398], [2.0, 3.0, 1.0, 0.0, 0.01306398], [3.0, 2.0, 0.0, 1.0, \n 0.01306398], [3.0, 3.0, 1.0, 1.0, 0.01306398], [2.0, 2.0, 0.0, 2.0, \n 0.00748417], [2.0, 3.0, 1.0, 2.0, 0.00748417], [3.0, 2.0, 0.0, 3.0, \n 0.00748417], [3.0, 3.0, 1.0, 3.0, 0.00748417], [2.0, 2.0, 2.0, 0.0, \n 0.00748417], [2.0, 3.0, 3.0, 0.0, 0.00748417], [3.0, 2.0, 2.0, 1.0, \n 0.00748417], [3.0, 3.0, 3.0, 1.0, 0.00748417], [2.0, 2.0, 2.0, 2.0, \n 0.33788228], [2.0, 3.0, 3.0, 2.0, 0.33788228], [3.0, 2.0, 2.0, 3.0, \n 0.33788228], [3.0, 3.0, 3.0, 3.0, 0.33788228]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.48731097], [0.0, 1.0, 1.0, 0.0, 0.48731097\n ], [1.0, 0.0, 0.0, 1.0, 0.48731097], [1.0, 1.0, 1.0, 1.0, 0.48731097],\n [0.0, 0.0, 0.0, 2.0, -0.04857958], [0.0, 1.0, 1.0, 2.0, -0.04857958], [\n 1.0, 0.0, 0.0, 3.0, -0.04857958], [1.0, 1.0, 1.0, 3.0, -0.04857958], [\n 0.0, 0.0, 2.0, 0.0, -0.04857958], [0.0, 1.0, 3.0, 0.0, -0.04857958], [\n 1.0, 0.0, 2.0, 1.0, -0.04857958], [1.0, 1.0, 3.0, 1.0, -0.04857958], [\n 0.0, 0.0, 2.0, 2.0, 0.01306398], [0.0, 1.0, 3.0, 2.0, 0.01306398], [1.0,\n 0.0, 2.0, 3.0, 0.01306398], [1.0, 1.0, 3.0, 3.0, 0.01306398], [0.0, 2.0,\n 0.0, 0.0, -0.04857958], [0.0, 3.0, 1.0, 0.0, -0.04857958], [1.0, 2.0, \n 0.0, 1.0, -0.04857958], [1.0, 3.0, 1.0, 1.0, -0.04857958], [0.0, 2.0, \n 0.0, 2.0, 0.01306398], [0.0, 3.0, 1.0, 2.0, 0.01306398], [1.0, 2.0, 0.0,\n 3.0, 0.01306398], [1.0, 3.0, 1.0, 3.0, 0.01306398], [0.0, 2.0, 2.0, 0.0,\n 0.22361004], [0.0, 3.0, 3.0, 0.0, 0.22361004], [1.0, 2.0, 2.0, 1.0, \n 0.22361004], [1.0, 3.0, 3.0, 1.0, 0.22361004], [0.0, 2.0, 2.0, 2.0, \n 0.00748417], [0.0, 3.0, 3.0, 2.0, 0.00748417], [1.0, 2.0, 2.0, 3.0, \n 0.00748417], [1.0, 3.0, 3.0, 3.0, 0.00748417], [2.0, 0.0, 0.0, 0.0, -\n 0.04857958], [2.0, 1.0, 1.0, 0.0, -0.04857958], [3.0, 0.0, 0.0, 1.0, -\n 0.04857958], [3.0, 1.0, 1.0, 1.0, -0.04857958], [2.0, 0.0, 0.0, 2.0, \n 0.22361004], [2.0, 1.0, 1.0, 2.0, 0.22361004], [3.0, 0.0, 0.0, 3.0, \n 0.22361004], [3.0, 1.0, 1.0, 3.0, 0.22361004], [2.0, 0.0, 2.0, 0.0, \n 0.01306398], [2.0, 1.0, 3.0, 0.0, 0.01306398], [3.0, 0.0, 2.0, 1.0, \n 0.01306398], [3.0, 1.0, 3.0, 1.0, 0.01306398], [2.0, 0.0, 2.0, 2.0, \n 0.00748417], [2.0, 1.0, 3.0, 2.0, 0.00748417], [3.0, 0.0, 2.0, 3.0, \n 0.00748417], [3.0, 1.0, 3.0, 3.0, 0.00748417], [2.0, 2.0, 0.0, 0.0, \n 0.01306398], [2.0, 3.0, 1.0, 0.0, 0.01306398], [3.0, 2.0, 0.0, 1.0, \n 0.01306398], [3.0, 3.0, 1.0, 1.0, 0.01306398], [2.0, 2.0, 0.0, 2.0, \n 0.00748417], [2.0, 3.0, 1.0, 2.0, 0.00748417], [3.0, 2.0, 0.0, 3.0, \n 0.00748417], [3.0, 3.0, 1.0, 3.0, 0.00748417], [2.0, 2.0, 2.0, 0.0, \n 0.00748417], [2.0, 3.0, 3.0, 0.0, 0.00748417], [3.0, 2.0, 2.0, 1.0, \n 0.00748417], [3.0, 3.0, 3.0, 1.0, 0.00748417], [2.0, 2.0, 2.0, 2.0, \n 0.33788228], [2.0, 3.0, 3.0, 2.0, 0.33788228], [3.0, 2.0, 2.0, 3.0, \n 0.33788228], [3.0, 3.0, 3.0, 3.0, 0.33788228]])\n', (1812, 4156), True, 'import numpy as np\n'), ((4535, 4868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('name', 'core', 'active', 'table_exp', 'v_core_exp')", "[('h2_pyscf', None, None, table_1, 0), ('h2_pyscf', [0], None, table_2, \n 0.6823895331520422), ('h2_pyscf', None, [0, 1], table_1, 0), (\n 'h2_pyscf', [0], [1], table_2, 0.6823895331520422), ('lih', [0], [1, 2],\n table_3, 1.6585666870874103)]"], {}), "(('name', 'core', 'active', 'table_exp',\n 'v_core_exp'), [('h2_pyscf', None, None, table_1, 0), ('h2_pyscf', [0],\n None, table_2, 0.6823895331520422), ('h2_pyscf', None, [0, 1], table_1,\n 0), ('h2_pyscf', [0], [1], table_2, 0.6823895331520422), ('lih', [0], [\n 1, 2], table_3, 1.6585666870874103)])\n", (4558, 4868), False, 'import pytest\n'), ((5425, 5447), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5433, 5447), True, 'import numpy as np\n'), ((5458, 5484), 'numpy.full', 'np.full', (['(2, 2, 2, 2)', '(0.5)'], {}), '((2, 2, 2, 2), 0.5)\n', (5465, 5484), True, 'import numpy as np\n'), ((5488, 5958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('v_me', 'core', 'active', 'msg_match')", '[(v_me_1D, [0], None, "\'matrix_elements\' must be a 4D array"), (v_me_4D, [-\n 1, 0, 1, 2], None, \'Indices of core orbitals must be between 0 and\'), (\n v_me_4D, [0, 1, 2, 3], None,\n \'Indices of core orbitals must be between 0 and\'), (v_me_4D, None, [-1,\n 0], \'Indices of active orbitals must be between 0 and\'), (v_me_4D, None,\n [2, 6], \'Indices of active orbitals must be between 0 and\')]'], {}), '((\'v_me\', \'core\', \'active\', \'msg_match\'), [(v_me_1D,\n [0], None, "\'matrix_elements\' must be a 4D array"), (v_me_4D, [-1, 0, 1,\n 2], None, \'Indices of core orbitals must be between 0 and\'), (v_me_4D,\n [0, 1, 2, 3], None, \'Indices of core orbitals must be between 0 and\'),\n (v_me_4D, None, [-1, 0],\n \'Indices of active orbitals must be between 0 and\'), (v_me_4D, None, [2,\n 6], \'Indices of active orbitals must be between 0 and\')])\n', (5511, 5958), False, 'import pytest\n'), ((5241, 5313), 'pennylane.qchem.two_particle', 'qchem.two_particle', (['hf_data.two_body_integrals'], {'core': 'core', 'active': 'active'}), '(hf_data.two_body_integrals, core=core, active=active)\n', (5259, 5313), False, 'from pennylane import qchem\n'), ((5326, 5362), 'numpy.allclose', 'np.allclose', (['table', 'table_exp'], {}), '(table, table_exp, **tol)\n', (5337, 5362), True, 'import numpy as np\n'), ((5374, 5412), 'numpy.allclose', 'np.allclose', (['v_core', 'v_core_exp'], {}), '(v_core, v_core_exp, **tol)\n', (5385, 5412), True, 'import numpy as np\n'), ((164, 190), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (180, 190), False, 'import os\n'), ((6277, 6319), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg_match'}), '(ValueError, match=msg_match)\n', (6290, 6319), False, 'import pytest\n'), ((6329, 6379), 'pennylane.qchem.two_particle', 'qchem.two_particle', (['v_me'], {'core': 'core', 'active': 'active'}), '(v_me, core=core, active=active)\n', (6347, 6379), False, 'from pennylane import qchem\n'), ((5191, 5218), 'os.path.join', 'os.path.join', (['ref_dir', 'name'], {}), '(ref_dir, name)\n', (5203, 5218), False, 'import os\n')]
|
import os
import sys
import time
import torch
import torch.nn as nn
import random
import numpy as np
import torchvision.transforms as transforms
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_ROOT = os.path.join(FILE_DIR, '../../../data')
sys.path.append(os.path.join(FILE_DIR, '../'))
sys.path.append(os.path.join(FILE_DIR, '../../'))
from dataset import CIFAR10, CIFAR100
from utils import BaseTrainer, Partition
class CIFARTrainer(BaseTrainer):
def set_dataloader(self):
"""The function to set the dataset parameters"""
if self.args.dataset == 'CIFAR10':
self.dataset = CIFAR10
self.num_classes = 10
self.dataset_size = 60000
elif self.args.dataset == 'CIFAR100':
self.dataset = CIFAR100
self.num_classes = 100
self.dataset_size = 60000
if self.args.if_data_augmentation:
print('With data augmentation')
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
else:
print('Without data augmentation')
transform_train = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
self.transform_train = transform_train
self.transform_test = transform_test
### Set partition
if self.args.partition == 'target':
indices = np.arange(self.dataset_size).astype(int)
np.random.shuffle(indices)
np.save(os.path.join(self.save_dir, 'full_idx'), indices)
partition = Partition(dataset_size=self.dataset_size, indices=indices)
self.partition = partition
self.trainset_idx, self.testset_idx = partition.get_target_indices()
elif self.args.partition == 'shadow':
try:
target_path = os.path.join(self.save_dir.replace("shadow", ""), 'full_idx.npy')
indices = np.load(target_path)
print('Load indices from target model:', target_path)
except:
print('Cannot find target model, reinitialize indices')
indices = np.arange(self.dataset_size).astype(int)
np.random.shuffle(indices)
np.save(os.path.join(self.save_dir, 'full_idx'), indices)
partition = Partition(dataset_size=self.dataset_size, indices=indices)
self.partition = partition
self.trainset_idx, self.testset_idx = partition.get_shadow_indices()
## Set dataloader
trainset = self.dataset(root=self.data_root, indices=self.trainset_idx,
download=True, transform=self.transform_train)
testset = self.dataset(root=self.data_root, indices=self.testset_idx,
download=True, transform=self.transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.args.train_batchsize,
shuffle=True, num_workers=self.args.num_workers)
testloader = torch.utils.data.DataLoader(testset, batch_size=self.args.test_batchsize,
shuffle=False, num_workers=self.args.num_workers)
self.trainset = trainset
self.trainloader = trainloader
self.testset = testset
self.testloader = testloader
|
[
"utils.Partition",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"os.path.abspath",
"torchvision.transforms.ToTensor",
"numpy.load",
"numpy.arange",
"numpy.random.shuffle"
] |
[((212, 251), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../../../data"""'], {}), "(FILE_DIR, '../../../data')\n", (224, 251), False, 'import os\n'), ((173, 198), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (188, 198), False, 'import os\n'), ((268, 297), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../"""'], {}), "(FILE_DIR, '../')\n", (280, 297), False, 'import os\n'), ((315, 347), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../../"""'], {}), "(FILE_DIR, '../../')\n", (327, 347), False, 'import os\n'), ((3511, 3639), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'self.args.train_batchsize', 'shuffle': '(True)', 'num_workers': 'self.args.num_workers'}), '(trainset, batch_size=self.args.train_batchsize,\n shuffle=True, num_workers=self.args.num_workers)\n', (3538, 3639), False, 'import torch\n'), ((3707, 3834), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'self.args.test_batchsize', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(testset, batch_size=self.args.test_batchsize,\n shuffle=False, num_workers=self.args.num_workers)\n', (3734, 3834), False, 'import torch\n'), ((2093, 2119), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2110, 2119), True, 'import numpy as np\n'), ((2214, 2272), 'utils.Partition', 'Partition', ([], {'dataset_size': 'self.dataset_size', 'indices': 'indices'}), '(dataset_size=self.dataset_size, indices=indices)\n', (2223, 2272), False, 'from utils import BaseTrainer, Partition\n'), ((1711, 1732), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1730, 1732), True, 'import torchvision.transforms as transforms\n'), ((1779, 1850), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1799, 1850), True, 'import torchvision.transforms as transforms\n'), ((2140, 2179), 'os.path.join', 'os.path.join', (['self.save_dir', '"""full_idx"""'], {}), "(self.save_dir, 'full_idx')\n", (2152, 2179), False, 'import os\n'), ((2969, 3027), 'utils.Partition', 'Partition', ([], {'dataset_size': 'self.dataset_size', 'indices': 'indices'}), '(dataset_size=self.dataset_size, indices=indices)\n', (2978, 3027), False, 'from utils import BaseTrainer, Partition\n'), ((993, 1029), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1014, 1029), True, 'import torchvision.transforms as transforms\n'), ((1081, 1114), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1112, 1114), True, 'import torchvision.transforms as transforms\n'), ((1116, 1137), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1135, 1137), True, 'import torchvision.transforms as transforms\n'), ((1189, 1260), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1209, 1260), True, 'import torchvision.transforms as transforms\n'), ((1446, 1467), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1465, 1467), True, 'import torchvision.transforms as transforms\n'), ((1519, 1590), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1539, 1590), True, 'import torchvision.transforms as transforms\n'), ((2040, 2068), 'numpy.arange', 'np.arange', (['self.dataset_size'], {}), '(self.dataset_size)\n', (2049, 2068), True, 'import numpy as np\n'), ((2578, 2598), 'numpy.load', 'np.load', (['target_path'], {}), '(target_path)\n', (2585, 2598), True, 'import numpy as np\n'), ((2844, 2870), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2861, 2870), True, 'import numpy as np\n'), ((2895, 2934), 'os.path.join', 'os.path.join', (['self.save_dir', '"""full_idx"""'], {}), "(self.save_dir, 'full_idx')\n", (2907, 2934), False, 'import os\n'), ((2787, 2815), 'numpy.arange', 'np.arange', (['self.dataset_size'], {}), '(self.dataset_size)\n', (2796, 2815), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""SPECFIT.PY - Generic stellar abundance determination software
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20200711' # yyyymmdd
import os
import shutil
import contextlib, io, sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.table import Table
from dlnpyutils.minpack import curve_fit
from dlnpyutils.least_squares import least_squares
from scipy.interpolate import interp1d
from dlnpyutils import utils as dln, bindata, astro
import doppler
from doppler.spec1d import Spec1D
from doppler import (cannon,utils,reader)
import copy
import logging
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.legend import Legend
import tempfile
from . import models
from synple import synple
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
def synmodel(spec,params,alinefile=None,mlinefile=None,verbose=False,normalize=True):
"""
Synthetic spectrum model.
Parameters
----------
spec : Spec1D object or str
The observed Spec1D spectrum to match or the name of a spectrum file.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
normalize : bool, optional
Renormalize the model spectrum using the observed spectrum's continuum function. The
synthetic spectrum will already have been normalized using the "true" continuum. This
step is to simulate any systematic effects of the spectrum normalization algorithm that
the observed spectrum undergoes. Default is True.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
Returns
-------
model : Spec1D object
The synthetic spectrum. The "true" continuum is in model.cont.
Example
-------
.. code-block:: python
model = synmodel(spec,params)
"""
# Read in the spectrum
if type(spec) is str:
filename = spec
spec = doppler.read(filename)
if spec is None:
print('Problem loading '+filename)
return
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Initialize the fitter
fitparams = ['TEFF'] # "dummy" fitting variable
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.norm = normalize # normalize the synthetic spectrum
model = spfitter.model(spec.wave.flatten(),params['TEFF'],retobj=True)
model.instrument = 'Model'
return model
class SpecFitter:
def __init__ (self,spec,params,fitparams=None,norm=True,verbose=False,
alinefile=None,mlinefile=None):
# Parameters
self.params = params
if fitparams is not None:
self.fitparams = fitparams
else:
self.fitparams = list(params.keys()) # by default fit all parameters
self.nsynfev = 0 # number of synthetic spectra made
self.njac = 0 # number of times jacobian called
# Save spectrum information
self.spec = spec.copy()
self.flux = spec.flux.flatten()
self.err = spec.err.flatten()
self.wave = spec.wave.flatten()
self.lsf = spec.lsf.copy()
self.lsf.wavevac = spec.wavevac # need this later for synspec prep
self.wavevac = spec.wavevac
self.verbose = verbose
self.norm = norm # normalize
self.continuum_func = spec.continuum_func
self.alinefile = alinefile
self.mlinefile = mlinefile
# Convert vacuum to air wavelengths
# synspec uses air wavelengths
if spec.wavevac is True:
wave = astro.vactoair(spec.wave.copy().flatten()).reshape(spec.wave.shape)
else:
wave = spec.wave.copy()
if wave.ndim==1:
wave = np.atleast_2d(wave).T
# Figure out the wavelength parameters
npix = spec.npix
norder = spec.norder
xp = np.arange(npix//20)*20
wr = np.zeros((spec.lsf.norder,2),np.float64)
dw = np.zeros(spec.lsf.norder,np.float64)
mindw = np.zeros(norder,np.float64)
for o in range(spec.norder):
dw[o] = np.median(dln.slope(wave[:,o]))
wr[o,0] = np.min(wave[:,o])
wr[o,1] = np.max(wave[:,o])
fwhm = spec.lsf.fwhm(wave[xp,o],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if spec.lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw[o])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
mindw[o] = np.min(fwhm/4)
self._dwair = np.min(mindw) # IN AIR WAVELENGTHS!!
self._w0air = np.min(wave)
self._w1air = np.max(wave)
# parameters to save
self._all_pars = []
self._all_model = []
self._all_chisq = []
self._jac_array = None
@property
def params(self):
return self._params
@params.setter
def params(self,params):
""" Dictionary, keys must be all CAPS."""
self._params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
@property
def fitparams(self):
return self._fitparams
@fitparams.setter
def fitparams(self,fitparams):
""" list, keys must be all CAPS."""
self._fitparams = [v.upper() for v in fitparams] # all CAPS
def mkinputs(self,args):
""" Make INPUTS dictionary."""
# Create INPUTS with all arguments needed to make the spectrum
inputs = self.params.copy() # initialize with initial/fixed values
for k in range(len(self.fitparams)): # this overwrites the values for the fitted values
inputs[self.fitparams[k]] = args[k]
inputs['DW'] = self._dwair # add in wavelength parameters
inputs['W0'] = self._w0air
inputs['W1'] = self._w1air
return inputs
def chisq(self,model):
return np.sqrt( np.sum( (self.flux-model)**2/self.err**2 )/len(self.flux) )
def model(self, xx, *args, retobj=False):
""" Return a model spectrum flux with the given input arguments."""
# The input arguments correspond to FITPARAMS
# This corrects for air/vacuum wavelength differences
if self.verbose:
print(args)
# The arguments correspond to the fitting parameters
inputs = self.mkinputs(args)
if self.verbose:
print(inputs)
# Create the synthetic spectrum
synspec = model_spectrum(inputs,verbose=self.verbose, # always returns air wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(pspec.flux.flatten().copy())
self._all_chisq.append(self.chisq(pspec.flux.flatten()))
# Return flattened spectrum
if retobj:
return pspec
else:
return pspec.flux.flatten()
def getstep(self,name,val,relstep=0.02):
""" Calculate step for a parameter."""
# It mainly deals with edge cases
#if val != 0.0:
# step = relstep*val
#else:
# if name=='RV':
# step = 1.0
# elif name=='VROT':
# step = 0.5
# elif name=='VMICRO':
# step = 0.5
# elif name.endswith('_H'):
# step = 0.02
# else:
# step = 0.02
if name=='TEFF':
step = 5.0
elif name=='RV':
step = 0.1
elif name=='VROT':
step = 0.5
elif name=='VMICRO':
step = 0.5
elif name.endswith('_H'):
step = 0.01
else:
step = 0.01
return step
return step
def jac(self,x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
if hasattr(self,'logger') is False:
logger = dln.basiclogger()
else:
logger = self.logger
logger.info(args)
if self.verbose:
logger.info(' ')
logger.info('##### Calculating Jacobian Matrix #####')
logger.info(' ')
# A new synthetic spectrum does not need to be generated RV, vmicro or vsini.
# Some time can be saved by not remaking those.
# Use a one-sided derivative.
# Boundaries
lbounds,ubounds = mkbounds(self.fitparams)
relstep = 0.02
npix = len(x)
npar = len(args)
# Get INPUTS dictionary and make keys all CAPS
inputs = self.mkinputs(args)
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Some important parameters
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
# Create synthetic spectrum at current values
# set vrot=vmicro=rv=0, will modify later if necessary
if self.verbose:
logger.info('--- Current values ---')
logger.info(args)
tinputs = inputs.copy()
tinputs['VMICRO'] = 0
tinputs['VROT'] = 0
tinputs['RV'] = 0
origspec = model_spectrum(tinputs,keepextend=True, # always are wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Smooth and shift
smorigspec = smoothshift_spectrum(origspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
smorigspec = trim_spectrum(smorigspec,w0,w1)
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(smorigspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f0 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(f0.copy())
self._all_chisq.append(self.chisq(f0))
chisq = np.sqrt( np.sum( (self.flux-f0)**2/self.err**2 )/len(self.flux) )
self
if self.verbose:
logger.info('chisq = '+str(chisq))
# MASK PIXELS!?
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = self.getstep(self.fitparams[i],pars[i],relstep)
# Check boundaries, if above upper boundary
# go the opposite way
if pars[i]>ubounds[i]:
step *= -1
pars[i] += step
tinputs = self.mkinputs(pars)
if self.verbose:
logger.info(' ')
logger.info('--- '+str(i+1)+' '+self.fitparams[i]+' '+str(pars[i])+' ---')
logger.info(pars)
# VROT/VMICRO/RV, just shift/smooth original spectrum
if self.fitparams[i]=='VROT' or self.fitparams[i]=='VMICRO' or self.fitparams[i]=='RV':
tvrot = tinputs.get('VROT')
tvmicro = tinputs.get('VMICRO')
trv = tinputs.get('RV')
#import pdb; pdb.set_trace()
# Smooth and shift
synspec = smoothshift_spectrum(origspec,vrot=tvrot,vmicro=tvmicro,rv=trv)
# Trim to final wavelengths
synspec = trim_spectrum(synspec,w0,w1)
else:
synspec = model_spectrum(tinputs,alinefile=self.alinefile,
mlinefile=self.mlinefile) # always returns air wavelengths
self.nsynfev += 1
# Convert to vacuum wavelengths if necessary
if self.wavevac:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f1 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(pars).copy())
self._all_model.append(f1.copy())
self._all_chisq.append(self.chisq(f1))
if np.sum(~np.isfinite(f1))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
jac[:,i] = (f1-f0)/step
if np.sum(~np.isfinite(jac))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
self._jac_array = jac.copy() # keep a copy
self.njac += 1
return jac
def trim_spectrum(spec,w0,w1):
""" Trim a synthetic spectrum to [w0,w1]."""
# This assumes that the spectrum has a single order
wv1, ind1 = dln.closest(spec.wave,w0)
wv2, ind2 = dln.closest(spec.wave,w1)
# Nothing to do
if ind1==0 and ind2==(spec.npix-1):
return spec
outspec = spec.copy()
outspec.flux = outspec.flux[ind1:ind2+1]
outspec.wave = outspec.wave[ind1:ind2+1]
if outspec.err is not None:
outspec.err = outspec.err[ind1:ind2+1]
if outspec.mask is not None:
outspec.mask = outspec.mask[ind1:ind2+1]
if hasattr(outspec,'cont'):
if outspec.cont is not None:
outspec.cont = outspec.cont[ind1:ind2+1]
outspec.npix = len(outspec.flux)
return outspec
def getabund(inputs,verbose=False):
""" Grab the abundances out of the input file and return array of abundances."""
# Create the input 99-element abundance array
codedir = os.path.dirname(os.path.abspath(__file__))
pertab = Table.read(codedir+'/data/periodic_table.txt',format='ascii')
feh = inputs.get('FEH')
if feh is None:
feh = inputs.get('FE_H')
if feh is None:
raise ValueError('FE_H missing from inputs')
# Read model atmosphere
modelfile = inputs.get('modelfile')
if modelfile is None:
raise ValueError('modelfile missing from inputs')
atmostype, teff, logg, vmicro2, mabu, nd, atmos = synple.read_model(modelfile,verbose=verbose)
mlines = dln.readlines(modelfile)
# solar abundances
# first two are Teff and logg
# last two are Hydrogen and Helium
solar_abund = np.array([ 4750., 2.5,
-10.99, -10.66, -9.34, -3.61, -4.21,
-3.35, -7.48, -4.11, -5.80, -4.44,
-5.59, -4.53, -6.63, -4.92, -6.54,
-5.64, -7.01, -5.70, -8.89, -7.09,
-8.11, -6.40, -6.61, -4.54, -7.05,
-5.82, -7.85, -7.48, -9.00, -8.39,
-9.74, -8.70, -9.50, -8.79, -9.52,
-9.17, -9.83, -9.46, -10.58, -10.16,
-20.00, -10.29, -11.13, -10.47, -11.10,
-10.33, -11.24, -10.00, -11.03, -9.86,
-10.49, -9.80, -10.96, -9.86, -10.94,
-10.46, -11.32, -10.62, -20.00, -11.08,
-11.52, -10.97, -11.74, -10.94, -11.56,
-11.12, -11.94, -11.20, -11.94, -11.19,
-12.16, -11.19, -11.78, -10.64, -10.66,
-10.42, -11.12, -10.87, -11.14, -10.29,
-11.39, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00, -12.02, -20.00, -12.58,
-20.00, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00])
# Deal with alpha abundances
# only add the individual alpha abundance if it's not already there
# sometimes we might fit a single alpha element but want to use
# ALPHA_H to set the rest of them
if inputs.get('ALPHA_H') is not None:
alpha = inputs['ALPHA_H']
elem = ['O','MG','SI','S','CA','TI']
for k in range(len(elem)):
if inputs.get(elem[k]+'_H') is None:
inputs[elem[k]+'_H'] = alpha
# Scale global metallicity
abu = solar_abund.copy()
abu[2:] += feh
# Now offset the elements with [X/Fe], [X/Fe]=[X/H]-[Fe/H]
g, = np.where( (np.char.array(list(inputs.keys())).find('_H') != -1) &
(np.char.array(list(inputs.keys())) != 'FE_H') )
if len(g)>0:
ind1,ind2 = dln.match(np.char.array(list(inputs.keys()))[g],np.char.array(pertab['symbol']).upper()+'_H')
for k in range(len(ind1)):
key1 = np.char.array(list(inputs.keys()))[g[ind1[k]]]
abu[ind2[k]] += float(inputs[key1]) - feh
if verbose:
print('%s %f' % (key1,float(inputs[key1])))
# convert to linear
abu[2:] = 10**abu[2:]
# Divide by N(H)
g, = np.where(np.char.array(mlines).find('ABUNDANCE SCALE') != -1)
nhtot = np.float64(mlines[g[0]].split()[6])
abu[2:] /= nhtot
# use model values for H and He
abu[0:2] = mabu[0:2]
return abu
def synple_wrapper(inputs,verbose=False,tmpbase='/tmp',alinefile=None,mlinefile=None):
""" This is a wrapper around synple to generate a new synthetic spectrum."""
# Wavelengths are all AIR!!
# inputs is a dictionary with all of the inputs
# Teff, logg, [Fe/H], some [X/Fe], and the wavelength parameters (w0, w1, dw).
# Make temporary directory for synple to work in
curdir = os.path.abspath(os.curdir)
tdir = os.path.abspath(tempfile.mkdtemp(prefix="syn",dir=tmpbase))
os.chdir(tdir)
# Linelists to use
linelist = ['gfallx3_bpo.19','kmol3_0.01_30.20'] # default values
if alinefile is not None: # atomic linelist input
linelist[0] = alinefile
if mlinefile is not None: # molecular linelist input
linelist[1] = mlinefile
if verbose:
print('Using linelist: ',linelist)
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Make the model atmosphere file
teff = inputs['TEFF']
logg = inputs['LOGG']
metal = inputs['FE_H']
tid,modelfile = tempfile.mkstemp(prefix="mod",dir=".")
os.close(tid) # close the open file
# Limit values
# of course the logg/feh ranges vary with Teff
mteff = dln.limit(teff,3500.0,60000.0)
mlogg = dln.limit(logg,0.0,5.0)
mmetal = dln.limit(metal,-2.5,0.5)
model, header, tail = models.mkmodel(mteff,mlogg,mmetal,modelfile)
inputs['modelfile'] = modelfile
if os.path.exists(modelfile) is False or os.stat(modelfile).st_size==0:
print('model atmosphere file does NOT exist')
import pdb; pdb.set_trace()
# Create the synspec synthetic spectrum
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
vmicro = inputs.get('VMICRO')
vrot = inputs.get('VROT')
if vrot is None:
vrot = 0.0
# Get the abundances
abu = getabund(inputs,verbose=verbose)
wave,flux,cont = synple.syn(modelfile,(w0,w1),dw,vmicro=vmicro,vrot=vrot,
abu=list(abu),verbose=verbose,linelist=linelist)
# Delete temporary files
shutil.rmtree(tdir)
os.chdir(curdir)
return (wave,flux,cont)
def smoothshift_spectrum(inpspec,vmicro=None,vrot=None,rv=None):
""" This smoothes the spectrum by Vrot+Vmicro and
shifts it by RV."""
#vmicro = inputs.get('VMICRO')
#vrot = inputs.get('VROT')
#rv = inputs.get('RV')
# Nothing to do
if vmicro is None and vrot is None and rv is None:
return inpspec.copy()
# Initialize output spectrum
spec = inpspec.copy()
# Some broadening
if vmicro is not None or vrot is not None:
flux = utils.broaden(spec.wave,spec.flux,vgauss=vmicro,vsini=vrot)
spec.flux = flux
## Vrot/Vsini (km/s) and Vmicro (in km/s)
#if vrot is not None or vmicro is not None:
# wave, flux = synple.call_rotin(wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
# Doppler shift only (in km/s)
if rv is not None:
if rv != 0.0:
shiftwave = spec.wave*(1+rv/cspeed)
gd,ngd,bd,nbd = dln.where( (spec.wave >= np.min(shiftwave)) & (spec.wave <= np.max(shiftwave)), comp=True)
# Doppler shift and interpolate onto wavelength array
if hasattr(spec,'cont'):
cont = synple.interp_spl(spec.wave[gd], shiftwave, spec.cont)
spec.cont *= 0
spec.cont[gd] = cont
# interpolate the continuing to the missing pixels
if nbd>0:
contmissing = dln.interp(spec.wave[gd],spec.cont[gd],spec.wave[bd],kind='linear',assume_sorted=False)
spec.cont[bd] = contmissing
flux = synple.interp_spl(spec.wave[gd], shiftwave, spec.flux)
spec.flux *= 0
spec.flux[gd] = flux
if nbd>0:
# Fill in missing values with interpolated values
if np.sum(np.isfinite(spec.flux[gd]))>0:
coef = dln.poly_fit(spec.wave[gd],spec.flux[gd],2)
fluxmissing = dln.poly(spec.wave[bd],coef)
spec.flux[bd] = fluxmissing
# Mask these pixels
if spec.mask is None:
spec.mask = np.zeros(len(spec.flux),bool)
spec.mask[bd] = True
return spec
def model_spectrum(inputs,verbose=False,keepextend=False,alinefile=None,mlinefile=None):
"""
This creates a model spectrum given the inputs:
RV, Teff, logg, vmicro, vsini, [Fe/H], [X/Fe], w0, w1, dw.
This creates the new synthetic spectrum and then convolves with vmicro, vsini and
shifts to velocity RV.
The returned spectrum always uses AIR wavelengths!!!
Parameters
----------
inputs : dictionary
Input parameters, stellar parameters, abundances.
keepextend : bool, optional
Keep the extensions on the ends. Default is False.
alinefile : str, optional
Atomic linelist filename. Default is None (use synple's default one).
mlinefile : str, optional
Molecular linelist filename. Default is None (use synple's default one).
verbose : bool, optional
Verbose output. Default is False.
Returns
-------
synspec : Spec1D
The synthetic spectrum as Spec1D object.
"""
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Extend on the ends for RV/convolution purposes
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
inputsext = inputs.copy()
if rv is not None or vrot is not None or vmicro is not None:
numext = int(np.ceil(w1*(1.0+1500/cspeed)-w1))
inputsext['W0'] = w0-numext*dw
inputsext['W1'] = w1+numext*dw
if verbose:
print('Extending wavelength by '+str(numext)+' pixels on each end')
# Create the synthetic spectrum
# set vrot=vmicro=0, will convolve later if necessary
inputsext['VMICRO'] = 0
inputsext['VROT'] = 0
wave1,flux1,cont1 = synple_wrapper(inputsext,verbose=verbose,alinefile=alinefile,
mlinefile=mlinefile)
# Get final wavelength array
wv1, ind1 = dln.closest(wave1,w0)
wv2, ind2 = dln.closest(wave1,w1)
synspec = Spec1D(flux1/cont1,err=flux1*0,wave=wave1,lsfpars=np.array(0.0))
synspec.cont = cont1
synspec.wavevac = False
# Smooth and shift
if rv is not None or vrot is not None or vmicro is not None:
synspec = smoothshift_spectrum(synspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
if keepextend is False:
synspec = trim_spectrum(synspec,w0,w1)
return synspec
def prepare_synthspec(synspec,lsf,norm=True,continuum_func=None):
""" Prepare a synthetic spectrum to be compared to an observed spectrum."""
# Convolve with LSF and do air<->vacuum wavelength conversion
# Convert wavelength from air->vacuum or vice versa
if synspec.wavevac != lsf.wavevac:
# Air -> Vacuum
if synspec.wavevac is False:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Vacuum -> Air
else:
synspec.dispersion = astro.vactoair(synspec.wave)
synspec.wavevac = False
# Initialize the output spectrum
if lsf.wave.ndim==2:
npix,norder = lsf.wave.shape
else:
npix = len(lsf.wave)
norder = 1
pspec = Spec1D(np.zeros((npix,norder),np.float32),err=np.zeros((npix,norder),np.float32),
wave=lsf.wave,lsfpars=lsf.pars,lsftype=lsf.lsftype,lsfxtype=lsf.xtype)
pspec.cont = np.zeros((npix,norder),np.float32)
if continuum_func is not None:
pspec.continuum_func = continuum_func
# Loop over orders
if lsf.wave.ndim==1:
wave = np.atleast_2d(lsf.wave.copy()).T
else:
wave = lsf.wave.copy()
for o in range(lsf.norder):
wobs = wave[:,o]
dw = np.median(dln.slope(wobs))
wv1,ind1 = dln.closest(synspec.wave,np.min(wobs)-2*np.abs(dw))
wv2,ind2 = dln.closest(synspec.wave,np.max(wobs)+2*np.abs(dw))
modelflux = synspec.flux[ind1:ind2+1]
modelwave = synspec.wave[ind1:ind2+1]
modelcont = synspec.cont[ind1:ind2+1]
# Rebin, if necessary
# get LSF FWHM (A) for a handful of positions across the spectrum
xp = np.arange(npix//20)*20
fwhm = lsf.fwhm(wobs[xp],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw)
# convert FWHM (A) in number of model pixels at those positions
dwmod = dln.slope(modelwave)
dwmod = np.hstack((dwmod,dwmod[-1]))
xpmod = dln.interp(modelwave,np.arange(len(modelwave)),wobs[xp],kind='cubic',assume_sorted=False,extrapolate=True)
xpmod = np.round(xpmod).astype(int)
fwhmpix = np.abs(fwhm/dwmod[xpmod])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
nbin = np.round(np.min(fwhmpix)//4).astype(int)
if np.min(fwhmpix) < 3.7:
warnings.warn('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if np.min(fwhmpix) < 2.8:
raise Exception('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if nbin>1:
npix2 = np.round(len(synspec.flux) // nbin).astype(int)
modelflux = dln.rebin(modelflux[0:npix2*nbin],npix2)
modelwave = dln.rebin(modelwave[0:npix2*nbin],npix2)
modelcont = dln.rebin(modelcont[0:npix2*nbin],npix2)
# Convolve
lsf2d = lsf.anyarray(modelwave,xtype='Wave',order=o,original=False)
cflux = utils.convolve_sparse(modelflux,lsf2d)
# Interpolate onto final wavelength array
flux = synple.interp_spl(wobs, modelwave, cflux)
cont = synple.interp_spl(wobs, modelwave, modelcont)
pspec.flux[:,o] = flux
pspec.cont[:,o] = cont
pspec.normalized = True
# Normalize
if norm is True:
newcont = pspec.continuum_func(pspec)
pspec.flux /= newcont
pspec.cont *= newcont
return pspec
def mkbounds(params,paramlims=None):
""" Make lower and upper boundaries for parameters """
params = np.char.array(params).upper()
if paramlims is not None:
limkeys = np.char.array(list(paramlims.keys())).upper()
n = len(params)
lbounds = np.zeros(n,np.float64)
ubounds = np.zeros(n,np.float64)
# Teff
g, = np.where(params=='TEFF')
if len(g)>0:
lbounds[g[0]] = 3500
ubounds[g[0]] = 60000
# logg
g, = np.where(params=='LOGG')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# fe_h
g, = np.where(params=='FE_H')
if len(g)>0:
lbounds[g[0]] = -3
ubounds[g[0]] = 1
# Vmicro
g, = np.where(params=='VMICRO')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# Vsini/vrot
g, = np.where(params=='VROT')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 500
# RV
g, = np.where(params=='RV')
if len(g)>0:
lbounds[g[0]] = -1500
ubounds[g[0]] = 1500
# abundances
g, = np.where( (params.find('_H') != -1) & (params != 'FE_H') )
if len(g)>0:
lbounds[g] = -3
ubounds[g] = 10
# Use input parameter limits
if paramlims is not None:
for i,f in enumerate(params):
g, = np.where(limkeys==f)
if len(g)>0:
lbounds[i] = paramlims[limkeys[g[0]]][0]
ubounds[i] = paramlims[limkeys[g[0]]][1]
bounds = (lbounds,ubounds)
return bounds
def mkdxlim(fitparams):
""" Make array of parameter changes at which curve_fit should finish."""
npar = len(fitparams)
dx_lim = np.zeros(npar,float)
for k in range(npar):
if fitparams[k]=='TEFF':
dx_lim[k] = 1.0
elif fitparams[k]=='LOGG':
dx_lim[k] = 0.005
elif fitparams[k]=='VMICRO':
dx_lim[k] = 0.1
elif fitparams[k]=='VROT':
dx_lim[k] = 0.1
elif fitparams[k]=='RV':
dx_lim[k] = 0.01
elif fitparams[k].endswith('_H'):
dx_lim[k] = 0.005
else:
dx_lim[k] = 0.01
return dx_lim
def initpars(params,fitparams,bounds=None):
""" Make initial set of parameters given PARAMS and
FITPARAMS."""
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
fitparams = [v.upper() for v in fitparams] # all CAPS
npars = len(fitparams)
pinit = np.zeros(npars,np.float64)
# Loop over parameters
for k in range(npars):
ind, = np.where(np.char.array(list(params.keys()))==fitparams[k])
# This parameter is in PARAMS
if len(ind)>0:
pinit[k] = params[fitparams[k]]
# Not in PARAMS
else:
if fitparams[k]=='RV':
pinit[k] = 0.0
elif fitparams[k]=='VMICRO':
pinit[k] = 2.0
elif fitparams[k]=='VROT':
pinit[k] = 0.0
elif fitparams[k]=='TEFF':
pinit[k] = 5000.0
elif fitparams[k]=='LOGG':
pinit[k] = 3.0
elif fitparams[k].endswith('_H'):
# Abundances, use FE_H if possible
if 'FE_H' in params.keys():
pinit[k] = params['FE_H']
else:
pinit[k] = 0.0
else:
pinit[k] = 0.0
# Make sure inital parameters are within the boundary limits
if bounds is not None:
for k in range(npars):
pinit[k] = dln.limit(pinit[k],bounds[0][k],bounds[1][k])
return pinit
def specfigure(figfile,spec,fmodel,out,original=None,verbose=True,figsize=10):
""" Make diagnostic figure."""
#import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
if os.path.exists(figfile): os.remove(figfile)
norder = spec.norder
nlegcol = 2
if original is not None: nlegcol=3
# Single-order plot
if norder==1:
fig,ax = plt.subplots()
fig.set_figheight(figsize*0.5)
fig.set_figwidth(figsize)
if original is not None:
plt.plot(original.wave,original.flux,color='green',label='Original',linewidth=1)
plt.plot(spec.wave,spec.flux,'b',label='Masked Data',linewidth=0.5)
plt.plot(fmodel.wave,fmodel.flux,'r',label='Model',linewidth=0.5,alpha=0.8)
leg = ax.legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Normalized Flux')
xr = dln.minmax(spec.wave)
yr = [np.min([spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
if original is not None:
yr = [np.min([original.flux,spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.005]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
plt.xlim(xr)
plt.ylim(yr)
snr = np.nanmedian(spec.flux/spec.err)
plt.title(spec.filename)
#ax.annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr, out['TEFF'], out['tefferr'], out['LOGG'], out['loggerr'], out['FE_H'], out['feherr'], out['RV'], out['vrelerr'], out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
# Multi-order plot
else:
fig,ax = plt.subplots(norder)
fig.set_figheight(figsize)
fig.set_figwidth(figsize)
for i in range(norder):
if original is not None:
ax[i].plot(original.wave[:,i],original.flux[:,i],color='green',label='Original',linewidth=1)
ax[i].plot(spec.wave[:,i],spec.flux[:,i],'b',label='Masked Data',linewidth=0.5)
ax[i].plot(fmodel.wave[:,i],fmodel.flux[:,i],'r',label='Model',linewidth=0.5,alpha=0.8)
if i==0:
leg = ax[i].legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
ax[i].set_xlabel('Wavelength (Angstroms)')
ax[i].set_ylabel('Normalized Flux')
xr = dln.minmax(spec.wave[:,i])
yr = [np.min([spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
if original is not None:
yr = [np.min([original.flux[:,i],spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
yr = [yr[0]-dln.valrange(yr)*0.05,yr[1]+dln.valrange(yr)*0.05]
if i==0:
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.05]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
ax[i].set_xlim(xr)
ax[i].set_ylim(yr)
# legend
if i==0:
snr = np.nanmedian(spec.flux/spec.err)
ax[i].set_title(spec.filename)
#ax[i].annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr,out['teff'],out['tefferr'],out['logg'],out['loggerr'],out['feh'],out['feherr'],out['vrel'],out['vrelerr'],out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
plt.savefig(figfile,bbox_inches='tight')
plt.close(fig)
if verbose is True: print('Figure saved to '+figfile)
def dopvrot_lsq(spec,models=None,initpar=None,verbose=False,logger=None):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV, vsini], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq(spec)
"""
if logger is None:
logger = dln.basiclogger()
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Get initial estimates
if initpar is None:
initpar = np.array([6000.0, 2.5, -0.5, 0.0, 0.0])
initpar = np.array(initpar).flatten()
# Calculate the bounds
lbounds = np.zeros(5,float)+1e5
ubounds = np.zeros(5,float)-1e5
for p in models:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3] = -1000
ubounds[3] = 1000
lbounds[4] = 0.0
ubounds[4] = 500.0
bounds = (lbounds, ubounds)
# function to use with curve_fit
def spec_interp_vsini(x,teff,logg,feh,rv,vsini):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
m = models(teff=teff,logg=logg,feh=feh,rv=rv)
if m is None: # there was a problem
return np.zeros(spec.flux.shape,float).flatten()+1e30
# Broaden to vsini
if spec.norder>1:
smflux = spec.flux*0
for k in range(spec.norder):
smflux[:,k] = utils.broaden(m.wave[:,k],m.flux[:,k],vsini=vsini)
else:
smflux = utils.broaden(m.wave.flatten(),m.flux.flatten(),vsini=vsini)
return smflux.flatten()
def spec_interp_vsini_jac(x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
relstep = 0.02
npix = len(x)
npar = len(args)
# Current values
f0 = spec_interp_vsini(x,*args)
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = relstep*pars[i]
if step<=0.0:
step = 0.02
pars[i] += step
f1 = spec_interp_vsini(x,*pars)
jac[:,i] = (f1-f0)/step
return jac
# Use curve_fit
lspars, lscov = curve_fit(spec_interp_vsini, spec.wave.flatten(), spec.flux.flatten(), sigma=spec.err.flatten(),
p0=initpar, bounds=bounds, jac=spec_interp_vsini_jac)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
logger.info('Least Squares RV and stellar parameters:')
for k,n in enumerate(['Teff','logg','[Fe/H]','RV','Vsini']):
logger.info('%s = %f' % (n,lspars[k]))
lsmodel = spec_interp_vsini(spec.wave,teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=lspars[3],vsini=lspars[4])
lschisq = np.sqrt(np.sum(((spec.flux.flatten()-lsmodel)/spec.err.flatten())**2)/len(lsmodel))
if verbose is True: logger.info('chisq = %5.2f' % lschisq)
# Put it into the output structure
npar = len(lspars)
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def fit_elem(spec,params,elem,verbose=0,alinefile=None,mlinefile=None,logger=None):
""" Fit an individual element."""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Create fitparams
#fitparams = [e+'_H' for e in elem]
fitparams = elem.copy()
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
#spfitter.verbose = True
bounds = mkbounds(elem)
pinit = initpars(params,elem,bounds)
# Initalize output
npar = len(fitparams)
dtyp = []
for f in fitparams:
dtyp += [(f,float)]
dtyp += [('pars',float,npar),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
# Loop over elemental abundances
flag = 0
abund = -2.0
dabund = 1.0
count = 0
abundarr = []
chisq = []
modelarr = []
# Loop from -2 to +1 or until we get through the minimum
while (flag==0):
model = spfitter.model(spec.wave.flatten(),abund)
chisq1 = spfitter.chisq(model)
abundarr.append(abund)
modelarr.append(model)
chisq.append(chisq1)
if verbose>0:
logger.info('%f %f' % (abund,chisq1))
# Are we done?
if (abund>=1) and (chisq1 != np.min(np.array(chisq))):
flag = 1
if (abund >= 10):
flag = 1
# Increment the abundance
abund += dabund
count += 1
# Best value is at the end, just return that value
bestind = np.argmin(chisq)
if (bestind==0) or (bestind==len(chisq)-1):
bestabund = abundarr[bestind]
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = np.min(chisq)
out['nsynfev'] = spfitter.nsynfev
model = modelarr[bestind]
if verbose>0:
logger.info('%f %f' % (bestabund,np.min(chisq)))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
# Now refine twice
for i in range(2):
# Get best value
bestind = np.argmin(np.array(chisq))
# get values half-way to left and right
# Left
lftind = bestind-1
lftabund = np.mean([abundarr[lftind],abundarr[bestind]])
lftmodel = spfitter.model(spec.wave.flatten(),lftabund)
lftchisq = spfitter.chisq(lftmodel)
abundarr.append(lftabund)
modelarr.append(lftmodel)
chisq.append(lftchisq)
if verbose>0:
logger.info('%f %f' % (lftabund,lftchisq))
# Right
rgtind = bestind+1
rgtabund = np.mean([abundarr[bestind],abundarr[rgtind]])
rgtmodel = spfitter.model(spec.wave.flatten(),rgtabund)
rgtchisq = spfitter.chisq(rgtmodel)
abundarr.append(rgtabund)
modelarr.append(rgtmodel)
chisq.append(rgtchisq)
if verbose>0:
logger.info('%f %f' % (rgtabund,rgtchisq))
# Sort arrays
si = np.argsort(abundarr)
abundarr = [abundarr[k] for k in si]
chisq = [chisq[k] for k in si]
modelarr = [modelarr[k] for k in si]
# Now interpolate to find the best value
abundarr2 = np.linspace(np.min(abundarr),np.max(abundarr),1000)
chisq2 = interp1d(abundarr,chisq,kind='quadratic')(abundarr2)
bestind = np.argmin(chisq2)
bestabund = abundarr2[bestind]
# Get the model at the best value
model = spfitter.model(spec.wave.flatten(),bestabund)
bestchisq = spfitter.chisq(model)
# Populate output structure
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = bestchisq
out['nsynfev'] = spfitter.nsynfev
if verbose>0:
logger.info('%f %f' % (bestabund,bestchisq))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
def fit_lsq(spec,params,fitparams=None,fparamlims=None,verbose=0,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using least-squares.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
fitparams : list, optional
List of parameter names to fit (e.g., TEFF, LOGG, FE_H, RV). By default all values
in PARAMS are fit.
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitparams.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
params = {'teff':5500,'logg':3.0,'fe_h':-1.0,'rv':0.0,'ca_h':-1.0}
fitparams = ['teff','logg','fe_h','rv','ca_h']
out,model = specfit.fit_lsq(spec,params,fitparams=fitparams)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Capitalize the inputs
# Make key names all CAPS
params = dict((key.upper(), value) for (key, value) in params.items())
# Fitting parameters
if fitparams is None:
fitparams = list(params.keys())
fitparams = [v.upper() for v in fitparams] # all CAPS
npar = len(fitparams)
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
bounds = mkbounds(fitparams,fparamlims)
pinit = initpars(params,fitparams,bounds)
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Fit the spectrum using curve_fit
dx_lim = mkdxlim(fitparams)
pars, cov = curve_fit(spfitter.model,spfitter.wave,spfitter.flux,dx_lim=dx_lim,
sigma=spfitter.err,p0=pinit,bounds=bounds,jac=spfitter.jac)
error = np.sqrt(np.diag(cov))
if verbose>0:
logger.info('Best values:')
for k in range(npar):
logger.info('%s = %.3f +/- %.3f' % (fitparams[k],pars[k],error[k]))
model = spfitter.model(spfitter.wave,*pars)
chisq = np.sqrt(np.sum(((spfitter.flux-model)/spfitter.err)**2)/len(model))
if verbose>0:
logger.info('chisq = %.2f' % chisq)
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
# Put it into the output structure
dtyp = []
for f in fitparams:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams):
out[f] = pars[k]
out[f+'_ERR'] = error[k]
out['pars'] = pars
out['parerr'] = error
out['parcov'] = cov
out['chisq'] = chisq
out['nsynfev'] = spfitter.nsynfev
# Reshape final model spectrum
model = model.reshape(spec.flux.shape)
return out, model
def fit(spec,params=None,elem=None,figfile=None,fitvsini=False,fitvmicro=False,
fparamlims=None,verbose=1,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using a multi-step iterative method.
Step 1: Fit Teff/logg/[Fe/H]/RV using Doppler
Step 2: Fit Teff/logg/[Fe/H]/RV + vsini with Doppler model
Step 3: Fit stellar parameters (Teff/logg/[Fe/H]/[alpha/H]), RV and broadening (Vrot/Vmicro)
Step 4: Fit each element one at a time holding everything else fixed.
Step 5: Fit everything simultaneously
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict, optional
Dictionary of initial values to use or parameters/elements to hold fixed.
elem : list, optional
List of elements to fit. The default is:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','CE','ND']
Input an empty list [] to fit no elements.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum and model spectrum.
fitvsini : bool, optional
Fit rotational velocity (vsini). By default, Vsini will be fit initially with a Doppler
model, but only included in the final fit if it improved chisq.
fitvmicro : bool, optional
Fit Vmicro. Default is False. By default, Vmicro is set (if not included in PARAMS)
logg>=3.8: vmicro = 2.0
logg<3.8: vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitted parameter.
For example, if params is {'teff': 9000, 'logg': 4.00, 'rv': -16.124}, fparamlims
could be {'teff': [8000,10000], 'logg': [3.50,4.50], 'rv': [-20.124,-12.124]}.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
out,model = specfit.fit(spec)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
# Default set of elements
if elem is None:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','SR','CE','ND']
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Print out inputs
if verbose>0:
logger.info('Inputs:')
if params is not None:
logger.info('PARAMS:')
for k,n in enumerate(params.keys()):
logger.info('%s = %f' % (n,params[n]))
else:
logger.info('PARAMS: None')
if fitvmicro:
logger.info('Fitting VMICRO')
if fitvsini:
logger.info('Fitting VSINI')
if len(elem)>0:
logger.info('Elements to fit: '+', '.join(elem))
else:
logger.info('No elements to fit')
logger.info(' ')
# Input linelists
if verbose and alinefile is not None:
logger.info('Using input atomic linelist: ',alinefile)
if verbose and mlinefile is not None:
logger.info('Using input molecular linelist: ',mlinefile)
# 1) Doppler (Teff, logg, feh, RV)
#---------------------------------
t1 = time.time()
if verbose>0:
logger.info('Step 1: Running Doppler')
# Use Doppler to get initial guess of stellar parameters and RV
dopout, dopfmodel, dopspecm = doppler.fit(spec)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (dopout['teff'][0],dopout['tefferr'][0]))
logger.info('logg = %.3f +/- %.3f' % (dopout['logg'][0],dopout['loggerr'][0]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (dopout['feh'][0],dopout['feherr'][0]))
logger.info('Vrel = %.4f +/- %.4f' % (dopout['vrel'][0],dopout['vrelerr'][0]))
logger.info('chisq = %.3f' % dopout['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t1))
# typically 5 sec
# 2) Fit vsini as well with Doppler model
#-----------------------------------------
t2 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 2: Fitting vsini with Doppler model')
# For APOGEE resolution you need vsini~4 km/s or greater to see an effect
initpar2 = [dopout['teff'][0], dopout['logg'][0], dopout['feh'][0], dopout['vrel'][0], 5.0]
out2, model2 = dopvrot_lsq(spec,initpar=initpar2,verbose=verbose,logger=logger)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (out2['pars'][0][0],out2['parerr'][0][0]))
logger.info('logg = %.3f +/- %.3f' % (out2['pars'][0][1],out2['parerr'][0][1]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (out2['pars'][0][2],out2['parerr'][0][2]))
logger.info('Vrel = %.4f +/- %.4f' % (out2['pars'][0][3],out2['parerr'][0][3]))
logger.info('Vsini = %.3f +/- %.3f' % (out2['pars'][0][4],out2['parerr'][0][4]))
logger.info('chisq = %.3f' % out2['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t2))
# typically 5 sec
if out2['chisq'][0] > dopout['chisq'][0]:
if verbose>0:
logger.info('Doppler Vrot=0 chisq is better')
out2['pars'][0] = [dopout['teff'][0],dopout['logg'][0],dopout['feh'][0],dopout['vrel'][0],0.0]
# Initialize params
if params is None:
params = {}
else:
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Using input values when possible, otherwise Doppler values
for k,name in enumerate(['TEFF','LOGG','FE_H','RV','VROT']):
if params.get(name) is None:
params[name] = out2['pars'][0][k]
# Get Vmicro using Teff/logg relation
# APOGEE DR14 vmicro relation (Holtzman et al. 2018)
# for stars with [M/H]>-1 and logg<3.8
# vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
# coef = [0.226,0.0228,0.0297,−0.0113]
# only giants, was fit in dwarfs
if params.get('VMICRO') is None:
vmicro = 2.0 # default
if params['LOGG']<3.8:
vmcoef = [0.226,0.0228,0.0297,-0.0113]
vmicro = 10**dln.poly(params['LOGG'],vmcoef[::-1])
params['VMICRO'] = vmicro
# for giants
# vmacro = 10^(0.741−0.0998*logg−0.225[M/H])
# maximum of 15 km/s
# 3) Fit stellar parameters (Teff, logg, feh, alpha, RV, Vsini)
#--------------------------------------------------------------
t3 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 3: Fitting stellar parameters, RV and broadening')
params3 = params.copy()
fitparams3 = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
if params3['VROT']>0 or fitvsini is True:
fitparams3.append('VROT')
# Fit Vmicro as well if it's a dwarf
if params3['LOGG']>3.8 or params3['TEFF']>8000 or fitvmicro is True:
fitparams3.append('VMICRO')
out3, model3 = fit_lsq(spec,params3,fitparams3,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
# typically 9 min.
# Should we fit C_H and N_H as well??
# Tweak the continuum
if verbose is not None:
logger.info('Tweaking continuum using best-fit synthetic model')
tmodel = Spec1D(model3,wave=spec.wave.copy(),lsfpars=np.array(0.0))
spec = doppler.rv.tweakcontinuum(spec,tmodel)
# 4) Fit each element separately
#-------------------------------
t4 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 4: Fitting each element separately')
params4 = params3.copy()
for k in range(len(fitparams3)):
params4[fitparams3[k]] = out3['pars'][0][k]
nelem = len(elem)
if nelem>0:
if verbose>0:
logger.info('Elements: '+', '.join(elem))
elemcat = np.zeros(nelem,dtype=np.dtype([('name',np.str,10),('par',np.float64),('parerr',np.float64)]))
elemcat['name'] = elem
for k in range(nelem):
t4b = time.time()
parselem = params4.copy()
if elem[k] in ['O','MG','SI','S','CA','TI']:
parselem[elem[k]+'_H'] = params4['ALPHA_H']
else:
parselem[elem[k]+'_H'] = params4['FE_H']
fitparselem = [elem[k]+'_H']
#out4, model4 = fit_lsq(spec,parselem,fitparselem,verbose=verbose,logger=logger)
out4, model4 = fit_elem(spec,parselem,fitparselem,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
elemcat['par'][k] = out4['pars'][0]
#elemcat['parerr'][k] = out4['parerr'][0]
if verbose>0:
logger.info('dt = %f sec.' % (time.time()-t4))
logger.info(' ')
else:
if verbose>0:
logger.info('No elements to fit')
# about 50 min.
# 5) Fit all parameters simultaneously
#---------------------------------------
# if NO elements to fit, then nothing to do
if nelem>0:
t5 = time.time()
if verbose>0:
logger.info('Step 5: Fit all parameters simultaneously')
params5 = params4.copy()
for k in range(nelem):
params5[elem[k]+'_H'] = elemcat['par'][k]
if params5.get('ALPHA_H') is not None:
del params5['ALPHA_H']
fitparams5 = ['TEFF','LOGG','FE_H','RV']
if 'VROT' in fitparams3 or fitvsini is True:
fitparams5.append('VROT')
if 'VMICRO' in fitparams3 or fitvmicro is True:
fitparams5.append('VMICRO')
fitparams5 = fitparams5+list(np.char.array(elem)+'_H')
out5, model5 = fit_lsq(spec,params5,fitparams5,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
else:
out5 = out3
model5 = model3
fitparams5 = fitparams3
# Make final structure and save the figure
out = out5
dtyp = []
npar = len(fitparams5)
for f in fitparams5:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('vhelio',float)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams5):
out[f] = out5['pars'][0][k]
out[f+'_ERR'] = out5['parerr'][0][k]
out['pars'] = out5['pars'][0]
out['parerr'] = out5['parerr'][0]
out['parcov'] = out5['parcov'][0]
out['chisq'] = out5['chisq'][0]
out['vhelio'] = out5['RV']+spec.barycorr()
if verbose>0:
logger.info('Vhelio = %.3f' % out['vhelio'])
# Final model
model = Spec1D(model5,wave=spec.wave.copy(),lsfpars=np.array(0.0))
model.lsf = spec.lsf.copy()
# Make figure
if figfile is not None:
specfigure(figfile,spec,model,out,verbose=(verbose>=2))
if verbose>0:
logger.info('dt = %.2f sec.' % (time.time()-t0))
return out, model
|
[
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"dlnpyutils.utils.basiclogger",
"numpy.array",
"numpy.argsort",
"dlnpyutils.utils.interp",
"numpy.isfinite",
"copy.deepcopy",
"synple.synple.read_model",
"doppler.rv.tweakcontinuum",
"numpy.arange",
"os.remove",
"os.path.exists",
"numpy.mean",
"numpy.atleast_2d",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"dlnpyutils.utils.minmax",
"dlnpyutils.utils.valrange",
"dlnpyutils.astro.vactoair",
"numpy.max",
"matplotlib.pyplot.close",
"doppler.utils.convolve_sparse",
"synple.synple.interp_spl",
"numpy.min",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"numpy.dtype",
"numpy.round",
"doppler.fit",
"numpy.abs",
"dlnpyutils.utils.limit",
"matplotlib.pyplot.savefig",
"numpy.ceil",
"matplotlib.use",
"os.close",
"doppler.read",
"dlnpyutils.utils.poly_fit",
"dlnpyutils.utils.poly",
"dlnpyutils.utils.readlines",
"dlnpyutils.utils.slope",
"tempfile.mkdtemp",
"dlnpyutils.minpack.curve_fit",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"tempfile.mkstemp",
"warnings.filterwarnings",
"time.time",
"astropy.table.Table.read",
"dlnpyutils.utils.rebin",
"os.stat",
"numpy.nanmedian",
"logging.Formatter",
"numpy.diag",
"os.chdir",
"dlnpyutils.astro.airtovac",
"numpy.zeros",
"numpy.char.array",
"doppler.cannon.models.copy",
"numpy.sum",
"dlnpyutils.utils.closest",
"shutil.rmtree",
"os.path.abspath",
"pdb.set_trace",
"matplotlib.pyplot.subplots",
"doppler.utils.broaden"
] |
[((681, 702), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (695, 702), False, 'import matplotlib\n'), ((872, 941), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.dtype size changed"""'}), "('ignore', message='numpy.dtype size changed')\n", (895, 941), False, 'import warnings\n'), ((942, 1011), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.ufunc size changed"""'}), "('ignore', message='numpy.ufunc size changed')\n", (965, 1011), False, 'import warnings\n'), ((14280, 14306), 'dlnpyutils.utils.closest', 'dln.closest', (['spec.wave', 'w0'], {}), '(spec.wave, w0)\n', (14291, 14306), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((14322, 14348), 'dlnpyutils.utils.closest', 'dln.closest', (['spec.wave', 'w1'], {}), '(spec.wave, w1)\n', (14333, 14348), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((15139, 15203), 'astropy.table.Table.read', 'Table.read', (["(codedir + '/data/periodic_table.txt')"], {'format': '"""ascii"""'}), "(codedir + '/data/periodic_table.txt', format='ascii')\n", (15149, 15203), False, 'from astropy.table import Table\n'), ((15571, 15616), 'synple.synple.read_model', 'synple.read_model', (['modelfile'], {'verbose': 'verbose'}), '(modelfile, verbose=verbose)\n', (15588, 15616), False, 'from synple import synple\n'), ((15629, 15653), 'dlnpyutils.utils.readlines', 'dln.readlines', (['modelfile'], {}), '(modelfile)\n', (15642, 15653), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((15769, 16552), 'numpy.array', 'np.array', (['[4750.0, 2.5, -10.99, -10.66, -9.34, -3.61, -4.21, -3.35, -7.48, -4.11, -\n 5.8, -4.44, -5.59, -4.53, -6.63, -4.92, -6.54, -5.64, -7.01, -5.7, -\n 8.89, -7.09, -8.11, -6.4, -6.61, -4.54, -7.05, -5.82, -7.85, -7.48, -\n 9.0, -8.39, -9.74, -8.7, -9.5, -8.79, -9.52, -9.17, -9.83, -9.46, -\n 10.58, -10.16, -20.0, -10.29, -11.13, -10.47, -11.1, -10.33, -11.24, -\n 10.0, -11.03, -9.86, -10.49, -9.8, -10.96, -9.86, -10.94, -10.46, -\n 11.32, -10.62, -20.0, -11.08, -11.52, -10.97, -11.74, -10.94, -11.56, -\n 11.12, -11.94, -11.2, -11.94, -11.19, -12.16, -11.19, -11.78, -10.64, -\n 10.66, -10.42, -11.12, -10.87, -11.14, -10.29, -11.39, -20.0, -20.0, -\n 20.0, -20.0, -20.0, -20.0, -12.02, -20.0, -12.58, -20.0, -20.0, -20.0, \n -20.0, -20.0, -20.0, -20.0]'], {}), '([4750.0, 2.5, -10.99, -10.66, -9.34, -3.61, -4.21, -3.35, -7.48, -\n 4.11, -5.8, -4.44, -5.59, -4.53, -6.63, -4.92, -6.54, -5.64, -7.01, -\n 5.7, -8.89, -7.09, -8.11, -6.4, -6.61, -4.54, -7.05, -5.82, -7.85, -\n 7.48, -9.0, -8.39, -9.74, -8.7, -9.5, -8.79, -9.52, -9.17, -9.83, -9.46,\n -10.58, -10.16, -20.0, -10.29, -11.13, -10.47, -11.1, -10.33, -11.24, -\n 10.0, -11.03, -9.86, -10.49, -9.8, -10.96, -9.86, -10.94, -10.46, -\n 11.32, -10.62, -20.0, -11.08, -11.52, -10.97, -11.74, -10.94, -11.56, -\n 11.12, -11.94, -11.2, -11.94, -11.19, -12.16, -11.19, -11.78, -10.64, -\n 10.66, -10.42, -11.12, -10.87, -11.14, -10.29, -11.39, -20.0, -20.0, -\n 20.0, -20.0, -20.0, -20.0, -12.02, -20.0, -12.58, -20.0, -20.0, -20.0, \n -20.0, -20.0, -20.0, -20.0])\n', (15777, 16552), True, 'import numpy as np\n'), ((18937, 18963), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (18952, 18963), False, 'import os\n'), ((19040, 19054), 'os.chdir', 'os.chdir', (['tdir'], {}), '(tdir)\n', (19048, 19054), False, 'import os\n'), ((19646, 19685), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""mod"""', 'dir': '"""."""'}), "(prefix='mod', dir='.')\n", (19662, 19685), False, 'import tempfile\n'), ((19689, 19702), 'os.close', 'os.close', (['tid'], {}), '(tid)\n', (19697, 19702), False, 'import os\n'), ((19809, 19841), 'dlnpyutils.utils.limit', 'dln.limit', (['teff', '(3500.0)', '(60000.0)'], {}), '(teff, 3500.0, 60000.0)\n', (19818, 19841), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((19852, 19877), 'dlnpyutils.utils.limit', 'dln.limit', (['logg', '(0.0)', '(5.0)'], {}), '(logg, 0.0, 5.0)\n', (19861, 19877), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((19889, 19916), 'dlnpyutils.utils.limit', 'dln.limit', (['metal', '(-2.5)', '(0.5)'], {}), '(metal, -2.5, 0.5)\n', (19898, 19916), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((20673, 20692), 'shutil.rmtree', 'shutil.rmtree', (['tdir'], {}), '(tdir)\n', (20686, 20692), False, 'import shutil\n'), ((20697, 20713), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (20705, 20713), False, 'import os\n'), ((24976, 24998), 'dlnpyutils.utils.closest', 'dln.closest', (['wave1', 'w0'], {}), '(wave1, w0)\n', (24987, 24998), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((25014, 25036), 'dlnpyutils.utils.closest', 'dln.closest', (['wave1', 'w1'], {}), '(wave1, w1)\n', (25025, 25036), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26430, 26466), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26438, 26466), True, 'import numpy as np\n'), ((29528, 29551), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (29536, 29551), True, 'import numpy as np\n'), ((29565, 29588), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (29573, 29588), True, 'import numpy as np\n'), ((29612, 29638), 'numpy.where', 'np.where', (["(params == 'TEFF')"], {}), "(params == 'TEFF')\n", (29620, 29638), True, 'import numpy as np\n'), ((29733, 29759), 'numpy.where', 'np.where', (["(params == 'LOGG')"], {}), "(params == 'LOGG')\n", (29741, 29759), True, 'import numpy as np\n'), ((29851, 29877), 'numpy.where', 'np.where', (["(params == 'FE_H')"], {}), "(params == 'FE_H')\n", (29859, 29877), True, 'import numpy as np\n'), ((29975, 30003), 'numpy.where', 'np.where', (["(params == 'VMICRO')"], {}), "(params == 'VMICRO')\n", (29983, 30003), True, 'import numpy as np\n'), ((30105, 30131), 'numpy.where', 'np.where', (["(params == 'VROT')"], {}), "(params == 'VROT')\n", (30113, 30131), True, 'import numpy as np\n'), ((30227, 30251), 'numpy.where', 'np.where', (["(params == 'RV')"], {}), "(params == 'RV')\n", (30235, 30251), True, 'import numpy as np\n'), ((30967, 30988), 'numpy.zeros', 'np.zeros', (['npar', 'float'], {}), '(npar, float)\n', (30975, 30988), True, 'import numpy as np\n'), ((31781, 31808), 'numpy.zeros', 'np.zeros', (['npars', 'np.float64'], {}), '(npars, np.float64)\n', (31789, 31808), True, 'import numpy as np\n'), ((33082, 33103), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33096, 33103), False, 'import matplotlib\n'), ((33148, 33171), 'os.path.exists', 'os.path.exists', (['figfile'], {}), '(figfile)\n', (33162, 33171), False, 'import os\n'), ((36675, 36716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile'], {'bbox_inches': '"""tight"""'}), "(figfile, bbox_inches='tight')\n", (36686, 36716), True, 'import matplotlib.pyplot as plt\n'), ((36720, 36734), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (36729, 36734), True, 'import matplotlib.pyplot as plt\n'), ((41088, 41201), 'numpy.dtype', 'np.dtype', (["[('pars', float, npar), ('parerr', float, npar), ('parcov', float, (npar,\n npar)), ('chisq', float)]"], {}), "([('pars', float, npar), ('parerr', float, npar), ('parcov', float,\n (npar, npar)), ('chisq', float)])\n", (41096, 41201), True, 'import numpy as np\n'), ((41197, 41221), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (41205, 41221), True, 'import numpy as np\n'), ((41491, 41502), 'time.time', 'time.time', ([], {}), '()\n', (41500, 41502), False, 'import time\n'), ((42297, 42311), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (42305, 42311), True, 'import numpy as np\n'), ((42322, 42346), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (42330, 42346), True, 'import numpy as np\n'), ((43143, 43159), 'numpy.argmin', 'np.argmin', (['chisq'], {}), '(chisq)\n', (43152, 43159), True, 'import numpy as np\n'), ((45081, 45098), 'numpy.argmin', 'np.argmin', (['chisq2'], {}), '(chisq2)\n', (45090, 45098), True, 'import numpy as np\n'), ((47324, 47335), 'time.time', 'time.time', ([], {}), '()\n', (47333, 47335), False, 'import time\n'), ((48333, 48470), 'dlnpyutils.minpack.curve_fit', 'curve_fit', (['spfitter.model', 'spfitter.wave', 'spfitter.flux'], {'dx_lim': 'dx_lim', 'sigma': 'spfitter.err', 'p0': 'pinit', 'bounds': 'bounds', 'jac': 'spfitter.jac'}), '(spfitter.model, spfitter.wave, spfitter.flux, dx_lim=dx_lim,\n sigma=spfitter.err, p0=pinit, bounds=bounds, jac=spfitter.jac)\n', (48342, 48470), False, 'from dlnpyutils.minpack import curve_fit\n'), ((49245, 49259), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (49253, 49259), True, 'import numpy as np\n'), ((49270, 49294), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (49278, 49294), True, 'import numpy as np\n'), ((52337, 52348), 'time.time', 'time.time', ([], {}), '()\n', (52346, 52348), False, 'import time\n'), ((53761, 53772), 'time.time', 'time.time', ([], {}), '()\n', (53770, 53772), False, 'import time\n'), ((53948, 53965), 'doppler.fit', 'doppler.fit', (['spec'], {}), '(spec)\n', (53959, 53965), False, 'import doppler\n'), ((54580, 54591), 'time.time', 'time.time', ([], {}), '()\n', (54589, 54591), False, 'import time\n'), ((56971, 56982), 'time.time', 'time.time', ([], {}), '()\n', (56980, 56982), False, 'import time\n'), ((57892, 57931), 'doppler.rv.tweakcontinuum', 'doppler.rv.tweakcontinuum', (['spec', 'tmodel'], {}), '(spec, tmodel)\n', (57917, 57931), False, 'import doppler\n'), ((58025, 58036), 'time.time', 'time.time', ([], {}), '()\n', (58034, 58036), False, 'import time\n'), ((60798, 60812), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (60806, 60812), True, 'import numpy as np\n'), ((60823, 60847), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (60831, 60847), True, 'import numpy as np\n'), ((2512, 2534), 'doppler.read', 'doppler.read', (['filename'], {}), '(filename)\n', (2524, 2534), False, 'import doppler\n'), ((4640, 4682), 'numpy.zeros', 'np.zeros', (['(spec.lsf.norder, 2)', 'np.float64'], {}), '((spec.lsf.norder, 2), np.float64)\n', (4648, 4682), True, 'import numpy as np\n'), ((4694, 4731), 'numpy.zeros', 'np.zeros', (['spec.lsf.norder', 'np.float64'], {}), '(spec.lsf.norder, np.float64)\n', (4702, 4731), True, 'import numpy as np\n'), ((4747, 4775), 'numpy.zeros', 'np.zeros', (['norder', 'np.float64'], {}), '(norder, np.float64)\n', (4755, 4775), True, 'import numpy as np\n'), ((5392, 5405), 'numpy.min', 'np.min', (['mindw'], {}), '(mindw)\n', (5398, 5405), True, 'import numpy as np\n'), ((5454, 5466), 'numpy.min', 'np.min', (['wave'], {}), '(wave)\n', (5460, 5466), True, 'import numpy as np\n'), ((5489, 5501), 'numpy.max', 'np.max', (['wave'], {}), '(wave)\n', (5495, 5501), True, 'import numpy as np\n'), ((11586, 11620), 'numpy.zeros', 'np.zeros', (['(npix, npar)', 'np.float64'], {}), '((npix, npar), np.float64)\n', (11594, 11620), True, 'import numpy as np\n'), ((15099, 15124), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (15114, 15124), False, 'import os\n'), ((18992, 19035), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""syn"""', 'dir': 'tmpbase'}), "(prefix='syn', dir=tmpbase)\n", (19008, 19035), False, 'import tempfile\n'), ((20172, 20187), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (20185, 20187), False, 'import pdb\n'), ((21245, 21307), 'doppler.utils.broaden', 'utils.broaden', (['spec.wave', 'spec.flux'], {'vgauss': 'vmicro', 'vsini': 'vrot'}), '(spec.wave, spec.flux, vgauss=vmicro, vsini=vrot)\n', (21258, 21307), False, 'from doppler import cannon, utils, reader\n'), ((26248, 26284), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26256, 26284), True, 'import numpy as np\n'), ((27518, 27538), 'dlnpyutils.utils.slope', 'dln.slope', (['modelwave'], {}), '(modelwave)\n', (27527, 27538), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((27555, 27584), 'numpy.hstack', 'np.hstack', (['(dwmod, dwmod[-1])'], {}), '((dwmod, dwmod[-1]))\n', (27564, 27584), True, 'import numpy as np\n'), ((27769, 27796), 'numpy.abs', 'np.abs', (['(fwhm / dwmod[xpmod])'], {}), '(fwhm / dwmod[xpmod])\n', (27775, 27796), True, 'import numpy as np\n'), ((28771, 28810), 'doppler.utils.convolve_sparse', 'utils.convolve_sparse', (['modelflux', 'lsf2d'], {}), '(modelflux, lsf2d)\n', (28792, 28810), False, 'from doppler import cannon, utils, reader\n'), ((28875, 28916), 'synple.synple.interp_spl', 'synple.interp_spl', (['wobs', 'modelwave', 'cflux'], {}), '(wobs, modelwave, cflux)\n', (28892, 28916), False, 'from synple import synple\n'), ((28932, 28977), 'synple.synple.interp_spl', 'synple.interp_spl', (['wobs', 'modelwave', 'modelcont'], {}), '(wobs, modelwave, modelcont)\n', (28949, 28977), False, 'from synple import synple\n'), ((33173, 33191), 'os.remove', 'os.remove', (['figfile'], {}), '(figfile)\n', (33182, 33191), False, 'import os\n'), ((33331, 33345), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (33343, 33345), True, 'import matplotlib.pyplot as plt\n'), ((33553, 33624), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wave', 'spec.flux', '"""b"""'], {'label': '"""Masked Data"""', 'linewidth': '(0.5)'}), "(spec.wave, spec.flux, 'b', label='Masked Data', linewidth=0.5)\n", (33561, 33624), True, 'import matplotlib.pyplot as plt\n'), ((33629, 33714), 'matplotlib.pyplot.plot', 'plt.plot', (['fmodel.wave', 'fmodel.flux', '"""r"""'], {'label': '"""Model"""', 'linewidth': '(0.5)', 'alpha': '(0.8)'}), "(fmodel.wave, fmodel.flux, 'r', label='Model', linewidth=0.5, alpha=0.8\n )\n", (33637, 33714), True, 'import matplotlib.pyplot as plt\n'), ((33799, 33835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength (Angstroms)"""'], {}), "('Wavelength (Angstroms)')\n", (33809, 33835), True, 'import matplotlib.pyplot as plt\n'), ((33844, 33873), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Flux"""'], {}), "('Normalized Flux')\n", (33854, 33873), True, 'import matplotlib.pyplot as plt\n'), ((33887, 33908), 'dlnpyutils.utils.minmax', 'dln.minmax', (['spec.wave'], {}), '(spec.wave)\n', (33897, 33908), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((34269, 34281), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xr'], {}), '(xr)\n', (34277, 34281), True, 'import matplotlib.pyplot as plt\n'), ((34290, 34302), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yr'], {}), '(yr)\n', (34298, 34302), True, 'import matplotlib.pyplot as plt\n'), ((34317, 34351), 'numpy.nanmedian', 'np.nanmedian', (['(spec.flux / spec.err)'], {}), '(spec.flux / spec.err)\n', (34329, 34351), True, 'import numpy as np\n'), ((34358, 34382), 'matplotlib.pyplot.title', 'plt.title', (['spec.filename'], {}), '(spec.filename)\n', (34367, 34382), True, 'import matplotlib.pyplot as plt\n'), ((34810, 34830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['norder'], {}), '(norder)\n', (34822, 34830), True, 'import matplotlib.pyplot as plt\n'), ((37825, 37842), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (37840, 37842), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((37974, 37994), 'doppler.cannon.models.copy', 'cannon.models.copy', ([], {}), '()\n', (37992, 37994), False, 'from doppler import cannon, utils, reader\n'), ((38099, 38138), 'numpy.array', 'np.array', (['[6000.0, 2.5, -0.5, 0.0, 0.0]'], {}), '([6000.0, 2.5, -0.5, 0.0, 0.0])\n', (38107, 38138), True, 'import numpy as np\n'), ((38227, 38245), 'numpy.zeros', 'np.zeros', (['(5)', 'float'], {}), '(5, float)\n', (38235, 38245), True, 'import numpy as np\n'), ((38263, 38281), 'numpy.zeros', 'np.zeros', (['(5)', 'float'], {}), '(5, float)\n', (38271, 38281), True, 'import numpy as np\n'), ((39725, 39759), 'numpy.zeros', 'np.zeros', (['(npix, npar)', 'np.float64'], {}), '((npix, npar), np.float64)\n', (39733, 39759), True, 'import numpy as np\n'), ((40507, 40521), 'numpy.diag', 'np.diag', (['lscov'], {}), '(lscov)\n', (40514, 40521), True, 'import numpy as np\n'), ((41548, 41565), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (41563, 41565), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((43373, 43386), 'numpy.min', 'np.min', (['chisq'], {}), '(chisq)\n', (43379, 43386), True, 'import numpy as np\n'), ((43948, 43994), 'numpy.mean', 'np.mean', (['[abundarr[lftind], abundarr[bestind]]'], {}), '([abundarr[lftind], abundarr[bestind]])\n', (43955, 43994), True, 'import numpy as np\n'), ((44348, 44394), 'numpy.mean', 'np.mean', (['[abundarr[bestind], abundarr[rgtind]]'], {}), '([abundarr[bestind], abundarr[rgtind]])\n', (44355, 44394), True, 'import numpy as np\n'), ((44721, 44741), 'numpy.argsort', 'np.argsort', (['abundarr'], {}), '(abundarr)\n', (44731, 44741), True, 'import numpy as np\n'), ((44961, 44977), 'numpy.min', 'np.min', (['abundarr'], {}), '(abundarr)\n', (44967, 44977), True, 'import numpy as np\n'), ((44978, 44994), 'numpy.max', 'np.max', (['abundarr'], {}), '(abundarr)\n', (44984, 44994), True, 'import numpy as np\n'), ((45014, 45057), 'scipy.interpolate.interp1d', 'interp1d', (['abundarr', 'chisq'], {'kind': '"""quadratic"""'}), "(abundarr, chisq, kind='quadratic')\n", (45022, 45057), False, 'from scipy.interpolate import interp1d\n'), ((47381, 47398), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (47396, 47398), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((48507, 48519), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (48514, 48519), True, 'import numpy as np\n'), ((52394, 52411), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (52409, 52411), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((59624, 59635), 'time.time', 'time.time', ([], {}), '()\n', (59633, 59635), False, 'import time\n'), ((4604, 4625), 'numpy.arange', 'np.arange', (['(npix // 20)'], {}), '(npix // 20)\n', (4613, 4625), True, 'import numpy as np\n'), ((4886, 4904), 'numpy.min', 'np.min', (['wave[:, o]'], {}), '(wave[:, o])\n', (4892, 4904), True, 'import numpy as np\n'), ((4926, 4944), 'numpy.max', 'np.max', (['wave[:, o]'], {}), '(wave[:, o])\n', (4932, 4944), True, 'import numpy as np\n'), ((5355, 5371), 'numpy.min', 'np.min', (['(fwhm / 4)'], {}), '(fwhm / 4)\n', (5361, 5371), True, 'import numpy as np\n'), ((9144, 9161), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (9159, 9161), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((13984, 13999), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13997, 13999), False, 'import pdb\n'), ((20029, 20054), 'os.path.exists', 'os.path.exists', (['modelfile'], {}), '(modelfile)\n', (20043, 20054), False, 'import os\n'), ((22349, 22403), 'synple.synple.interp_spl', 'synple.interp_spl', (['spec.wave[gd]', 'shiftwave', 'spec.flux'], {}), '(spec.wave[gd], shiftwave, spec.flux)\n', (22366, 22403), False, 'from synple import synple\n'), ((24406, 24446), 'numpy.ceil', 'np.ceil', (['(w1 * (1.0 + 1500 / cspeed) - w1)'], {}), '(w1 * (1.0 + 1500 / cspeed) - w1)\n', (24413, 24446), True, 'import numpy as np\n'), ((25100, 25113), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (25108, 25113), True, 'import numpy as np\n'), ((25863, 25891), 'dlnpyutils.astro.airtovac', 'astro.airtovac', (['synspec.wave'], {}), '(synspec.wave)\n', (25877, 25891), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((25998, 26026), 'dlnpyutils.astro.vactoair', 'astro.vactoair', (['synspec.wave'], {}), '(synspec.wave)\n', (26012, 26026), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26287, 26323), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26295, 26323), True, 'import numpy as np\n'), ((26772, 26787), 'dlnpyutils.utils.slope', 'dln.slope', (['wobs'], {}), '(wobs)\n', (26781, 26787), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((27188, 27209), 'numpy.arange', 'np.arange', (['(npix // 20)'], {}), '(npix // 20)\n', (27197, 27209), True, 'import numpy as np\n'), ((27418, 27428), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (27424, 27428), True, 'import numpy as np\n'), ((27989, 28004), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (27995, 28004), True, 'import numpy as np\n'), ((28173, 28188), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28179, 28188), True, 'import numpy as np\n'), ((28468, 28511), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelflux[0:npix2 * nbin]', 'npix2'], {}), '(modelflux[0:npix2 * nbin], npix2)\n', (28477, 28511), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((28533, 28576), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelwave[0:npix2 * nbin]', 'npix2'], {}), '(modelwave[0:npix2 * nbin], npix2)\n', (28542, 28576), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((28598, 28641), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelcont[0:npix2 * nbin]', 'npix2'], {}), '(modelcont[0:npix2 * nbin], npix2)\n', (28607, 28641), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((29369, 29390), 'numpy.char.array', 'np.char.array', (['params'], {}), '(params)\n', (29382, 29390), True, 'import numpy as np\n'), ((30599, 30621), 'numpy.where', 'np.where', (['(limkeys == f)'], {}), '(limkeys == f)\n', (30607, 30621), True, 'import numpy as np\n'), ((32870, 32917), 'dlnpyutils.utils.limit', 'dln.limit', (['pinit[k]', 'bounds[0][k]', 'bounds[1][k]'], {}), '(pinit[k], bounds[0][k], bounds[1][k])\n', (32879, 32917), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((33464, 33552), 'matplotlib.pyplot.plot', 'plt.plot', (['original.wave', 'original.flux'], {'color': '"""green"""', 'label': '"""Original"""', 'linewidth': '(1)'}), "(original.wave, original.flux, color='green', label='Original',\n linewidth=1)\n", (33472, 33552), True, 'import matplotlib.pyplot as plt\n'), ((33923, 33955), 'numpy.min', 'np.min', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (33929, 33955), True, 'import numpy as np\n'), ((33956, 33988), 'numpy.max', 'np.max', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (33962, 33988), True, 'import numpy as np\n'), ((34218, 34239), 'numpy.max', 'np.max', (['[yr[0], -0.2]'], {}), '([yr[0], -0.2])\n', (34224, 34239), True, 'import numpy as np\n'), ((34240, 34260), 'numpy.min', 'np.min', (['[yr[1], 2.0]'], {}), '([yr[1], 2.0])\n', (34246, 34260), True, 'import numpy as np\n'), ((35520, 35547), 'dlnpyutils.utils.minmax', 'dln.minmax', (['spec.wave[:, i]'], {}), '(spec.wave[:, i])\n', (35530, 35547), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((38153, 38170), 'numpy.array', 'np.array', (['initpar'], {}), '(initpar)\n', (38161, 38170), True, 'import numpy as np\n'), ((38353, 38377), 'numpy.min', 'np.min', (['p.ranges'], {'axis': '(1)'}), '(p.ranges, axis=1)\n', (38359, 38377), True, 'import numpy as np\n'), ((38425, 38449), 'numpy.max', 'np.max', (['p.ranges'], {'axis': '(1)'}), '(p.ranges, axis=1)\n', (38431, 38449), True, 'import numpy as np\n'), ((43822, 43837), 'numpy.array', 'np.array', (['chisq'], {}), '(chisq)\n', (43830, 43837), True, 'import numpy as np\n'), ((48754, 48807), 'numpy.sum', 'np.sum', (['(((spfitter.flux - model) / spfitter.err) ** 2)'], {}), '(((spfitter.flux - model) / spfitter.err) ** 2)\n', (48760, 48807), True, 'import numpy as np\n'), ((52452, 52517), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)-5.5s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)-5.5s] %(message)s')\n", (52469, 52517), False, 'import logging\n'), ((57858, 57871), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (57866, 57871), True, 'import numpy as np\n'), ((58571, 58582), 'time.time', 'time.time', ([], {}), '()\n', (58580, 58582), False, 'import time\n'), ((61312, 61325), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (61320, 61325), True, 'import numpy as np\n'), ((4468, 4487), 'numpy.atleast_2d', 'np.atleast_2d', (['wave'], {}), '(wave)\n', (4481, 4487), True, 'import numpy as np\n'), ((4842, 4863), 'dlnpyutils.utils.slope', 'dln.slope', (['wave[:, o]'], {}), '(wave[:, o])\n', (4851, 4863), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((5191, 5204), 'numpy.abs', 'np.abs', (['dw[o]'], {}), '(dw[o])\n', (5197, 5204), True, 'import numpy as np\n'), ((6749, 6797), 'numpy.sum', 'np.sum', (['((self.flux - model) ** 2 / self.err ** 2)'], {}), '((self.flux - model) ** 2 / self.err ** 2)\n', (6755, 6797), True, 'import numpy as np\n'), ((11351, 11396), 'numpy.sum', 'np.sum', (['((self.flux - f0) ** 2 / self.err ** 2)'], {}), '((self.flux - f0) ** 2 / self.err ** 2)\n', (11357, 11396), True, 'import numpy as np\n'), ((11717, 11736), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (11730, 11736), False, 'import copy\n'), ((13162, 13190), 'dlnpyutils.astro.airtovac', 'astro.airtovac', (['synspec.wave'], {}), '(synspec.wave)\n', (13176, 13190), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((13830, 13845), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13843, 13845), False, 'import pdb\n'), ((20067, 20085), 'os.stat', 'os.stat', (['modelfile'], {}), '(modelfile)\n', (20074, 20085), False, 'import os\n'), ((21944, 21998), 'synple.synple.interp_spl', 'synple.interp_spl', (['spec.wave[gd]', 'shiftwave', 'spec.cont'], {}), '(spec.wave[gd], shiftwave, spec.cont)\n', (21961, 21998), False, 'from synple import synple\n'), ((26833, 26845), 'numpy.min', 'np.min', (['wobs'], {}), '(wobs)\n', (26839, 26845), True, 'import numpy as np\n'), ((26904, 26916), 'numpy.max', 'np.max', (['wobs'], {}), '(wobs)\n', (26910, 26916), True, 'import numpy as np\n'), ((27723, 27738), 'numpy.round', 'np.round', (['xpmod'], {}), '(xpmod)\n', (27731, 27738), True, 'import numpy as np\n'), ((34040, 34087), 'numpy.min', 'np.min', (['[original.flux, spec.flux, fmodel.flux]'], {}), '([original.flux, spec.flux, fmodel.flux])\n', (34046, 34087), True, 'import numpy as np\n'), ((34087, 34119), 'numpy.max', 'np.max', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (34093, 34119), True, 'import numpy as np\n'), ((35565, 35609), 'numpy.min', 'np.min', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35571, 35609), True, 'import numpy as np\n'), ((35608, 35652), 'numpy.max', 'np.max', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35614, 35652), True, 'import numpy as np\n'), ((36020, 36041), 'numpy.max', 'np.max', (['[yr[0], -0.2]'], {}), '([yr[0], -0.2])\n', (36026, 36041), True, 'import numpy as np\n'), ((36042, 36062), 'numpy.min', 'np.min', (['[yr[1], 2.0]'], {}), '([yr[1], 2.0])\n', (36048, 36062), True, 'import numpy as np\n'), ((36189, 36223), 'numpy.nanmedian', 'np.nanmedian', (['(spec.flux / spec.err)'], {}), '(spec.flux / spec.err)\n', (36201, 36223), True, 'import numpy as np\n'), ((39143, 39197), 'doppler.utils.broaden', 'utils.broaden', (['m.wave[:, k]', 'm.flux[:, k]'], {'vsini': 'vsini'}), '(m.wave[:, k], m.flux[:, k], vsini=vsini)\n', (39156, 39197), False, 'from doppler import cannon, utils, reader\n'), ((39856, 39875), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (39869, 39875), False, 'import copy\n'), ((56651, 56689), 'dlnpyutils.utils.poly', 'dln.poly', (["params['LOGG']", 'vmcoef[::-1]'], {}), "(params['LOGG'], vmcoef[::-1])\n", (56659, 56689), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((58418, 58495), 'numpy.dtype', 'np.dtype', (["[('name', np.str, 10), ('par', np.float64), ('parerr', np.float64)]"], {}), "([('name', np.str, 10), ('par', np.float64), ('parerr', np.float64)])\n", (58426, 58495), True, 'import numpy as np\n'), ((13903, 13919), 'numpy.isfinite', 'np.isfinite', (['jac'], {}), '(jac)\n', (13914, 13919), True, 'import numpy as np\n'), ((18325, 18346), 'numpy.char.array', 'np.char.array', (['mlines'], {}), '(mlines)\n', (18338, 18346), True, 'import numpy as np\n'), ((22194, 22289), 'dlnpyutils.utils.interp', 'dln.interp', (['spec.wave[gd]', 'spec.cont[gd]', 'spec.wave[bd]'], {'kind': '"""linear"""', 'assume_sorted': '(False)'}), "(spec.wave[gd], spec.cont[gd], spec.wave[bd], kind='linear',\n assume_sorted=False)\n", (22204, 22289), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((22636, 22681), 'dlnpyutils.utils.poly_fit', 'dln.poly_fit', (['spec.wave[gd]', 'spec.flux[gd]', '(2)'], {}), '(spec.wave[gd], spec.flux[gd], 2)\n', (22648, 22681), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((22714, 22743), 'dlnpyutils.utils.poly', 'dln.poly', (['spec.wave[bd]', 'coef'], {}), '(spec.wave[bd], coef)\n', (22722, 22743), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26848, 26858), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (26854, 26858), True, 'import numpy as np\n'), ((26919, 26929), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (26925, 26929), True, 'import numpy as np\n'), ((34152, 34168), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (34164, 34168), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((34180, 34196), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (34192, 34196), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35710, 35775), 'numpy.min', 'np.min', (['[original.flux[:, i], spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([original.flux[:, i], spec.flux[:, i], fmodel.flux[:, i]])\n', (35716, 35775), True, 'import numpy as np\n'), ((35772, 35816), 'numpy.max', 'np.max', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35778, 35816), True, 'import numpy as np\n'), ((42909, 42924), 'numpy.array', 'np.array', (['chisq'], {}), '(chisq)\n', (42917, 42924), True, 'import numpy as np\n'), ((45624, 45635), 'time.time', 'time.time', ([], {}), '()\n', (45633, 45635), False, 'import time\n'), ((48968, 48979), 'time.time', 'time.time', ([], {}), '()\n', (48977, 48979), False, 'import time\n'), ((54429, 54440), 'time.time', 'time.time', ([], {}), '()\n', (54438, 54440), False, 'import time\n'), ((55521, 55532), 'time.time', 'time.time', ([], {}), '()\n', (55530, 55532), False, 'import time\n'), ((60200, 60219), 'numpy.char.array', 'np.char.array', (['elem'], {}), '(elem)\n', (60213, 60219), True, 'import numpy as np\n'), ((61528, 61539), 'time.time', 'time.time', ([], {}), '()\n', (61537, 61539), False, 'import time\n'), ((13742, 13757), 'numpy.isfinite', 'np.isfinite', (['f1'], {}), '(f1)\n', (13753, 13757), True, 'import numpy as np\n'), ((17951, 17982), 'numpy.char.array', 'np.char.array', (["pertab['symbol']"], {}), "(pertab['symbol'])\n", (17964, 17982), True, 'import numpy as np\n'), ((21752, 21769), 'numpy.min', 'np.min', (['shiftwave'], {}), '(shiftwave)\n', (21758, 21769), True, 'import numpy as np\n'), ((21787, 21804), 'numpy.max', 'np.max', (['shiftwave'], {}), '(shiftwave)\n', (21793, 21804), True, 'import numpy as np\n'), ((22578, 22604), 'numpy.isfinite', 'np.isfinite', (['spec.flux[gd]'], {}), '(spec.flux[gd])\n', (22589, 22604), True, 'import numpy as np\n'), ((27937, 27952), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (27943, 27952), True, 'import numpy as np\n'), ((35839, 35855), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35851, 35855), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35867, 35883), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35879, 35883), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((38939, 38971), 'numpy.zeros', 'np.zeros', (['spec.flux.shape', 'float'], {}), '(spec.flux.shape, float)\n', (38947, 38971), True, 'import numpy as np\n'), ((43530, 43543), 'numpy.min', 'np.min', (['chisq'], {}), '(chisq)\n', (43536, 43543), True, 'import numpy as np\n'), ((43646, 43657), 'time.time', 'time.time', ([], {}), '()\n', (43655, 43657), False, 'import time\n'), ((59295, 59306), 'time.time', 'time.time', ([], {}), '()\n', (59304, 59306), False, 'import time\n'), ((28105, 28120), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28111, 28120), True, 'import numpy as np\n'), ((28291, 28306), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28297, 28306), True, 'import numpy as np\n'), ((35939, 35955), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35951, 35955), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35967, 35983), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35979, 35983), True, 'from dlnpyutils import utils as dln, bindata, astro\n')]
|
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import contractions # Expanding contractions
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print(' ------------------------------------------')
print('| Classifying Gender Dysphoria Disclosures |')
print('| on Social Media with Machine Learning. |')
print(' ------------------------------------------')
print()
print('Team members: <NAME>')
print(' <NAME> ')
print(' <NAME>')
print()
print('Data Processing....')
print()
#num_of_lines = 2
dataset = pd.read_csv('df_truth.csv')
dataset.tail()
#print('Dataset size: ',dataset.shape)
# ------ ORIGINAL DATA --------
#print('Original Dataset: \n',dataset)
headers = list(dataset.columns.values)
#print(headers)
text = dataset.iloc[:,1] # text = dataset['text']
#print(text.shape)
#print(text)
# ---------------- EXPANDING CONTRACTIONS -------------------
n_text = []
expanded_words = []
for i in range(len(text)):
a = str(text[i])
# -------------- LOWERCASE ----------
a_lower = a.lower()
line = a_lower.split()
for h in line:
expanded_words.append(contractions.fix(h))
expanded_text = ' '.join(expanded_words)
n_text.append(expanded_text)
expanded_words.clear() # Clearing List
#print(n_text)
#print('Original text: ' + text)
#print('Expanded_text: ' + n_text)
mySeries = pd.Series(n_text)
#print(mySeries)
# ----------------------------------------------------------
new_text = []
w_stopwords_text = []
for k in range(len(mySeries)):
a = str(mySeries[k])
# ----------------- REMOVING NUMBERS --------
text_ = ''.join([i for i in a if not i.isdigit()])
# -------- REMOVING SPECIAL CHARACTERS AND PUNCTUATION --------
punc = '''!()-[]{};:'"\,“”<>’./?@#$%^&*ðÿ˜=∆+_~'''
for j in text_:
if j in punc:
text_ = text_.replace(j,'')
#print(text_)
new_text.append(text_)
#print(new_text)
# -------------------- REMOVING STOP WORDS -------------------
for j in range(len(new_text)):
text_tokens = word_tokenize(new_text[j])
tokens_without_sw = [word for word in text_tokens if not word in stopwords.words('english')]
filtered_sentence = (" ").join(tokens_without_sw)
w_stopwords_text.append(filtered_sentence)
#print(w_stopwords_text)
col_text = pd.DataFrame(w_stopwords_text)
final_text = col_text[0]
#print(final_text)
# -------------------------------- NORMALIZING WORDS VIA LEMMATIZATION ---------------------------------
f_sent = []
xxx = []
yyy = []
for count in range(len(w_stopwords_text)):
b = str(w_stopwords_text[count])
words_sent = b.split()
for j in words_sent:
lemmatizer = WordNetLemmatizer()
lem_sent = lemmatizer.lemmatize(j)
f_sent.append(lem_sent)
xxx = ' '.join(f_sent)
yyy.append(xxx)
f_sent.clear()
#print(yyy)
col_text = pd.DataFrame(yyy)
final_text = col_text[0]
# --------------- CLEANED DATA PLACED IN COLUMN #2 -----------
dataset.insert(2,'new_text',final_text)
#print('Clean Dataset: \n',dataset['new_text'].values)
print('1. Text Preprocessing Done!')
X = dataset['new_text'].values
y = dataset['dysphoria'].values
y_labels = np.unique(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
#print(X_train.shape)
#print(X_test.shape)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
# ---------------------------------------------------------------------------------
print('2. Classifiers')
print()
# ---------------------------------------------------------------------------------
print('2.1. Support Vector Machine (SVM - RBF)')
print()
svm = SVC(kernel = 'rbf', gamma = 0.1, C = 10.0, random_state = 1)
svm.fit(X_train,y_train)
y_pred = svm.predict(X_test)
svm_predictions = svm.predict(X_test)
print(' Misclassified samples (linear model): %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.3f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, svm_predictions))
# ---------------------------------------------------------------------------------
print('2.2. Decision Tree')
print()
dt = DecisionTreeClassifier(criterion="entropy", random_state = 1)
dt.fit(X_train,y_train)
y_pred = dt.predict(X_test)
dt_predictions = dt.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, dt_predictions))
print()
# ---------------------------------------------------------------------------------
print('2.3. Logistic Regression')
print()
log_reg = LogisticRegression(penalty='l2', C = 10, random_state = 1)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
log_reg_predictions = log_reg.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, log_reg_predictions))
print()
# ---------------------------------------------------------------------------------
#print('2.4. Linear Regression')
#print()
#lr = LogisticRegression()
#lr.fit(X_train, y_train)
#y_pred = lr.predict(X_test)
#lr_predictions = lr.predict(X_test)
#print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
#print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
#print(classification_report(y_test, lr_predictions))
#print()
# ---------------------------------------------------------------------------------
|
[
"pandas.Series",
"numpy.unique",
"pandas.read_csv",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.LogisticRegression",
"nltk.tokenize.word_tokenize",
"sklearn.feature_extraction.text.TfidfVectorizer",
"nltk.stem.wordnet.WordNetLemmatizer",
"pandas.DataFrame",
"contractions.fix",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC"
] |
[((1164, 1191), 'pandas.read_csv', 'pd.read_csv', (['"""df_truth.csv"""'], {}), "('df_truth.csv')\n", (1175, 1191), True, 'import pandas as pd\n'), ((2005, 2022), 'pandas.Series', 'pd.Series', (['n_text'], {}), '(n_text)\n', (2014, 2022), True, 'import pandas as pd\n'), ((3065, 3095), 'pandas.DataFrame', 'pd.DataFrame', (['w_stopwords_text'], {}), '(w_stopwords_text)\n', (3077, 3095), True, 'import pandas as pd\n'), ((3617, 3634), 'pandas.DataFrame', 'pd.DataFrame', (['yyy'], {}), '(yyy)\n', (3629, 3634), True, 'import pandas as pd\n'), ((3933, 3945), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3942, 3945), True, 'import numpy as np\n'), ((3982, 4035), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(X, y, test_size=0.3, random_state=1)\n', (3998, 4035), False, 'from sklearn.model_selection import train_test_split\n'), ((4094, 4111), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4109, 4111), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4460, 4512), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.1)', 'C': '(10.0)', 'random_state': '(1)'}), "(kernel='rbf', gamma=0.1, C=10.0, random_state=1)\n", (4463, 4512), False, 'from sklearn.svm import SVC\n'), ((4931, 4990), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(1)'}), "(criterion='entropy', random_state=1)\n", (4953, 4990), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5402, 5456), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(10)', 'random_state': '(1)'}), "(penalty='l2', C=10, random_state=1)\n", (5420, 5456), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2803, 2829), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['new_text[j]'], {}), '(new_text[j])\n', (2816, 2829), False, 'from nltk.tokenize import word_tokenize\n'), ((4757, 4803), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'svm_predictions'], {}), '(y_test, svm_predictions)\n', (4778, 4803), False, 'from sklearn.metrics import classification_report\n'), ((5210, 5255), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'dt_predictions'], {}), '(y_test, dt_predictions)\n', (5231, 5255), False, 'from sklearn.metrics import classification_report\n'), ((5699, 5749), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'log_reg_predictions'], {}), '(y_test, log_reg_predictions)\n', (5720, 5749), False, 'from sklearn.metrics import classification_report\n'), ((3430, 3449), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3447, 3449), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((4720, 4750), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4734, 4750), False, 'from sklearn.metrics import accuracy_score\n'), ((5173, 5203), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5187, 5203), False, 'from sklearn.metrics import accuracy_score\n'), ((5662, 5692), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5676, 5692), False, 'from sklearn.metrics import accuracy_score\n'), ((1751, 1770), 'contractions.fix', 'contractions.fix', (['h'], {}), '(h)\n', (1767, 1770), False, 'import contractions\n'), ((2899, 2925), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2914, 2925), False, 'from nltk.corpus import stopwords\n')]
|
import numpy as np
from scipy import stats
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
def standardize_vector(v, center=True, scale=False):
if center:
v = v - np.mean(v)
if scale:
if np.std(v) == 0:
return v
else:
return (v + 0.0) / np.std(v)
def standardize_vec(v, center='mean', scale='std'):
""""
Standardizes a vector by centering and scaling it
This function will ignore scaling if the scale value is zero and will
instead set the scale value to 1
"""
# choose the center value
if not center:
cent_val = 0.0
elif center == 'mean':
cent_val = np.mean(v)
elif center == 'median':
cent_val = np.median(v)
elif type(center) in [float, int]:
cent_val = center
else:
raise ValueError('improper center value')
# choose the scale value
if not scale:
scale = 1.0
elif scale == 'max':
scale_val = max(v)
elif scale == 'std':
scale_val = np.std(v)
elif scale == 'mean':
scale_val = np.mean(v)
elif scale == 'median':
scale_val = np.median(v)
elif type(scale) in [float, int]:
scale_val = scale
else:
raise ValueError('improper scale value')
# don't scale if scale value is zero
if scale_val == 0:
scale_val = 1
return (v - cent_val + 0.0) / scale_val
def get_PCA(X, scale=False):
"""
Returns the PCA decomposition of data frame X.
Rows of X are observations and columns are features.
Centers columns then performs PCA.
Optionally scales columns by standard deviation
X = U D V^t
Output
------
U, D, V
"""
if type(X) == np.ndarray:
X = pd.DataFrame(X)
# center columns
X_stand = X.apply(lambda c: standardize_vector(c,
center=True, scale=scale))
# do SVD
return np.linalg.svd(X_stand, full_matrices=False)
def get_pls(X, Y, n_comp):
"""
returns the PLS scores
parameters
----------
X: pandas data frame
Y: list
"""
# center and scale both X and y data
x = np.array(X.apply(lambda c: standardize_vector(c, center=True,
scale=True)))
y = standardize_vector(Y, center=True, scale=True)
# compute PLS direcections
pls = PLSRegression(n_components=int(n_comp), scale=True)
pls.fit(x, y)
return np.array(pls.x_scores_), pls.x_loadings_
|
[
"numpy.mean",
"numpy.median",
"pandas.DataFrame",
"numpy.array",
"numpy.std",
"numpy.linalg.svd"
] |
[((1965, 2008), 'numpy.linalg.svd', 'np.linalg.svd', (['X_stand'], {'full_matrices': '(False)'}), '(X_stand, full_matrices=False)\n', (1978, 2008), True, 'import numpy as np\n'), ((1770, 1785), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1782, 1785), True, 'import pandas as pd\n'), ((2507, 2530), 'numpy.array', 'np.array', (['pls.x_scores_'], {}), '(pls.x_scores_)\n', (2515, 2530), True, 'import numpy as np\n'), ((204, 214), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (211, 214), True, 'import numpy as np\n'), ((241, 250), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (247, 250), True, 'import numpy as np\n'), ((687, 697), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (694, 697), True, 'import numpy as np\n'), ((323, 332), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (329, 332), True, 'import numpy as np\n'), ((746, 758), 'numpy.median', 'np.median', (['v'], {}), '(v)\n', (755, 758), True, 'import numpy as np\n'), ((1049, 1058), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (1055, 1058), True, 'import numpy as np\n'), ((1105, 1115), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (1112, 1115), True, 'import numpy as np\n'), ((1164, 1176), 'numpy.median', 'np.median', (['v'], {}), '(v)\n', (1173, 1176), True, 'import numpy as np\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
utility tools.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.util
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
import time
OUTPUT = './output/'
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
class bcolors:
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[1;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).items()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = distutils.util.strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def check_output_directory(type):
"""
create output directory
Args:
type: name of picture set for test
"""
if not os.path.exists(OUTPUT):
os.mkdir(OUTPUT, 0o755)
if not os.path.exists(OUTPUT + "/" + type):
os.mkdir(OUTPUT + "/" + type, 0o755)
def convert_net(img_example):
"""
convert image array to original
Args:
img_example: array data of img
"""
#reshape img_example
output_img = np.reshape(img_example.astype('float32'), (3, 224, 224))
output_img *= img_std
output_img += img_mean
output_img *= 255
output_img = np.reshape(output_img.astype(np.uint8), (3, 224, 224))
#convert C,H,W to H,W,C
output_img = output_img.transpose((1, 2, 0))
return output_img
def save_image(output_img, path):
"""
save image from array that original or adversarial
Args:
img_example: array data of img
path: directory and filename
"""
im = Image.fromarray(output_img)
im.save(path, 'png')
def generation_image(id, org_img, org_label, adv_img, adv_label, attack_method='FGSM'):
"""
save image from array that original or adversarial
imagenet data set
Args:
org_img: array data of test img
adv_img: array data of adv img
org_label: the inference label of test image
adv_label: the adverarial label of adv image
attack_method: the adverarial example generation method
"""
DATA_TYPE = "imagenet"
check_output_directory(DATA_TYPE)
org_path= OUTPUT + DATA_TYPE + "/%d_original-%d-by-%s.png" \
% (id, org_label, attack_method)
adv_path= OUTPUT + DATA_TYPE + "/%d_adversary-%d-by-%s.png" \
% (id, adv_label, attack_method)
diff_path= OUTPUT + DATA_TYPE + "/%d_diff-x-by-%s.png" % (id, attack_method)
org_output = convert_net(org_img)
adv_output = convert_net(adv_img)
diff_output = abs(adv_output - org_output)
save_image(org_output, org_path)
save_image(adv_output, adv_path)
save_image(diff_output, diff_path)
print("--------------------------------------------------")
def show_images_diff(original_img, original_label, adversarial_img, adversarial_label):
"""
show original image, adversarial image and their difference
Args:
original_img: original image, numpy
original_label:original label, int
adversarial_img: adversarial image
adversarial_label: adversarial label
Returns:
"""
plt.figure()
plt.subplot(131)
plt.title('Original')
plt.imshow(original_img)
plt.axis('off')
plt.subplot(132)
plt.title('Adversarial')
plt.imshow(adversarial_img)
plt.axis('off')
plt.subplot(133)
plt.title('Adversarial-Original')
difference = adversarial_img - original_img
l0 = np.where(difference != 0)[0].shape[0]
l2 = np.linalg.norm(difference)
print("l0={} l2={}".format(l0, l2))
#(-1,1) -> (0,1)
difference = difference / abs(difference).max() / 2.0 + 0.5
plt.imshow(difference, cmap=plt.cm.gray)
plt.axis('off')
plt.tight_layout()
ts = time.localtime(time.time())
ts = time.strftime("%Y-%m-%d %H:%M:%S", ts)
if not os.path.exists('output'):
os.makedirs('output')
plt.savefig("output/orig_adv_diff_{}_{}.png".format(adversarial_label, ts))
plt.show()
def show_images_diff_denoising(image_a, image_a_label, image_b, image_b_label, image_a_title='Input', image_b_title='output'):
"""
show original image, adversarial image and their difference
Args:
image_a: original image, ndarray
image_a_label:original label, int
image_b: adversarial image, ndarray
image_b_label: adversarial label
image_a_title: the title of the image a
image_b_title: the title of the image b
Returns:
"""
plt.figure()
plt.subplot(131)
plt.title(image_a_title)
plt.imshow(image_a)
plt.axis('off')
plt.subplot(132)
plt.title(image_b_title)
plt.imshow(image_b)
plt.axis('off')
plt.subplot(133)
plt.title(image_a_title+'-'+image_b_title)
difference = image_a - image_b
l0 = np.where(difference != 0)[0].shape[0]
l2 = np.linalg.norm(difference)
print("l0={} l2={}".format(l0, l2))
#(-1,1) -> (0,1)
difference = difference / abs(difference).max() / 2.0 + 0.5
plt.imshow(difference, cmap=plt.cm.gray)
plt.axis('off')
plt.tight_layout()
ts = time.localtime(time.time())
ts = time.strftime("%Y-%m-%d %H:%M:%S", ts)
if not os.path.exists('examples/image_cls/output'):
os.makedirs('output')
plt.savefig("output/{}_{}_diff_{}_{}_{}.png".format(image_a_title, image_b_title, image_a_label, image_b_label, ts))
plt.show()
def show_input_adv_and_denoise(image_a, image_b, image_c, image_d, \
image_a_label, image_b_label, image_c_label, image_d_label, \
image_a_title='Input', image_b_title='Adversary', \
image_c_title='Adv-Denoise', image_d_title='In-Denoise',method='Default'
):
"""
show original image, adversarial image, and their denoising results, respectively
Args:
image_a: original image, ndarray
image_a_label: original label, str
image_a_title: the title of the image a
image_b: adversarial image, ndarray
image_b_label: adversarial label
image_b_title: the title of the image b
image_c: denoising result of the adversarial image, ndarray
image_c_label: the predicted class label after denoising of the adv-image
image_c_title: the title of the image c
image_d: denoising result of the original input image, ndarray
image_d_label: the predicted class label after denoising of the input image
image_d_title: the title of the image d
Returns:
"""
# get the first class name
a_label=''
for i in image_a_label:
if i!=',':
a_label+=i
else:
break
temp=a_label
if len(a_label)>10:
temp=''
for i in a_label:
if i==' ':
temp=''
else:
temp=temp+i
a_label=temp
b_label=''
for i in image_b_label:
if i!=',':
b_label+=i
else:
break
temp=b_label
if len(b_label)>10:
temp=''
for i in b_label:
if i==' ':
temp=''
else:
temp=temp+i
b_label=temp
c_label=''
for i in image_c_label:
if i!=',':
c_label+=i
else:
break
temp=c_label
if len(c_label)>10:
temp=''
for i in c_label:
if i==' ':
temp=''
else:
temp=temp+i
c_label=temp
d_label=''
for i in image_d_label:
if i!=',':
d_label+=i
else:
break
temp=d_label
if len(d_label)>10:
temp=''
for i in d_label:
if i==' ':
temp=''
else:
temp=temp+i
d_label=temp
# define the plot position
w = image_c.shape[0] if image_c.shape[0] > image_d.shape[0] else image_d.shape[0]
h = image_c.shape[1] if image_c.shape[1] > image_d.shape[1] else image_d.shape[1]
x = 0 # initial horizontal position of the first line
y = h + 10 # initial vertical position of the first line
xos = 15 # offset to x of the second line
yos = 10 # offset to y of the second line
fig = plt.figure()
title = 'Denoise method: ' + method
fig.suptitle(title, fontsize=12, fontweight='bold', y=0.80)
plt.subplot(141)
plt.title(image_a_title)
plt.imshow(image_a)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, a_label)
plt.axis('off')
plt.subplot(142)
plt.title(image_b_title)
plt.imshow(image_b)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, b_label)
plt.axis('off')
plt.subplot(143)
plt.title(image_c_title)
plt.imshow(image_c)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, c_label)
plt.axis('off')
plt.subplot(144)
plt.title(image_d_title)
plt.imshow(image_d)
plt.text(x, y, 'Top1 label:')
plt.text(x+xos, y+yos, d_label)
plt.axis('off')
plt.tight_layout()
if not os.path.exists('examples/image_cls/output'):
os.makedirs('output')
plt.savefig("output/{}_Denoising_Comparison.png".format(method))
plt.show()
def get_best_weigthts_from_folder(folder, pdparams_file_starter):
pdparams_files = [filename for filename in os.listdir(folder) if filename.lower().endswith('.pdparams')
and filename.lower().startswith(pdparams_file_starter.lower())]
if not pdparams_files:
return None
else:
acc_list = [filename.split('.')[1] for filename in pdparams_files]
max_index = acc_list.index(max(acc_list))
best_weight_path = os.path.join(folder, pdparams_files[max_index])
print('Loaded: ', best_weight_path)
return best_weight_path
|
[
"numpy.array",
"numpy.linalg.norm",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"numpy.where",
"os.mkdir",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.show",
"matplotlib.pyplot.text",
"PIL.Image.fromarray",
"os.makedirs",
"time.strftime",
"os.path.join",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
] |
[((3471, 3498), 'PIL.Image.fromarray', 'Image.fromarray', (['output_img'], {}), '(output_img)\n', (3486, 3498), False, 'from PIL import Image\n'), ((5021, 5033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5031, 5033), True, 'import matplotlib.pyplot as plt\n'), ((5039, 5055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (5050, 5055), True, 'import matplotlib.pyplot as plt\n'), ((5060, 5081), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (5069, 5081), True, 'import matplotlib.pyplot as plt\n'), ((5086, 5110), 'matplotlib.pyplot.imshow', 'plt.imshow', (['original_img'], {}), '(original_img)\n', (5096, 5110), True, 'import matplotlib.pyplot as plt\n'), ((5115, 5130), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5123, 5130), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5152), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (5147, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5157, 5181), 'matplotlib.pyplot.title', 'plt.title', (['"""Adversarial"""'], {}), "('Adversarial')\n", (5166, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5186, 5213), 'matplotlib.pyplot.imshow', 'plt.imshow', (['adversarial_img'], {}), '(adversarial_img)\n', (5196, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5218, 5233), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5226, 5233), True, 'import matplotlib.pyplot as plt\n'), ((5239, 5255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (5250, 5255), True, 'import matplotlib.pyplot as plt\n'), ((5260, 5293), 'matplotlib.pyplot.title', 'plt.title', (['"""Adversarial-Original"""'], {}), "('Adversarial-Original')\n", (5269, 5293), True, 'import matplotlib.pyplot as plt\n'), ((5399, 5425), 'numpy.linalg.norm', 'np.linalg.norm', (['difference'], {}), '(difference)\n', (5413, 5425), True, 'import numpy as np\n'), ((5557, 5597), 'matplotlib.pyplot.imshow', 'plt.imshow', (['difference'], {'cmap': 'plt.cm.gray'}), '(difference, cmap=plt.cm.gray)\n', (5567, 5597), True, 'import matplotlib.pyplot as plt\n'), ((5602, 5617), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5610, 5617), True, 'import matplotlib.pyplot as plt\n'), ((5622, 5640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5638, 5640), True, 'import matplotlib.pyplot as plt\n'), ((5687, 5725), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'ts'], {}), "('%Y-%m-%d %H:%M:%S', ts)\n", (5700, 5725), False, 'import time\n'), ((5878, 5888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5886, 5888), True, 'import matplotlib.pyplot as plt\n'), ((6391, 6403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6401, 6403), True, 'import matplotlib.pyplot as plt\n'), ((6409, 6425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (6420, 6425), True, 'import matplotlib.pyplot as plt\n'), ((6430, 6454), 'matplotlib.pyplot.title', 'plt.title', (['image_a_title'], {}), '(image_a_title)\n', (6439, 6454), True, 'import matplotlib.pyplot as plt\n'), ((6459, 6478), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_a'], {}), '(image_a)\n', (6469, 6478), True, 'import matplotlib.pyplot as plt\n'), ((6483, 6498), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6491, 6498), True, 'import matplotlib.pyplot as plt\n'), ((6504, 6520), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (6515, 6520), True, 'import matplotlib.pyplot as plt\n'), ((6525, 6549), 'matplotlib.pyplot.title', 'plt.title', (['image_b_title'], {}), '(image_b_title)\n', (6534, 6549), True, 'import matplotlib.pyplot as plt\n'), ((6554, 6573), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_b'], {}), '(image_b)\n', (6564, 6573), True, 'import matplotlib.pyplot as plt\n'), ((6578, 6593), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6586, 6593), True, 'import matplotlib.pyplot as plt\n'), ((6599, 6615), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (6610, 6615), True, 'import matplotlib.pyplot as plt\n'), ((6620, 6666), 'matplotlib.pyplot.title', 'plt.title', (["(image_a_title + '-' + image_b_title)"], {}), "(image_a_title + '-' + image_b_title)\n", (6629, 6666), True, 'import matplotlib.pyplot as plt\n'), ((6755, 6781), 'numpy.linalg.norm', 'np.linalg.norm', (['difference'], {}), '(difference)\n', (6769, 6781), True, 'import numpy as np\n'), ((6913, 6953), 'matplotlib.pyplot.imshow', 'plt.imshow', (['difference'], {'cmap': 'plt.cm.gray'}), '(difference, cmap=plt.cm.gray)\n', (6923, 6953), True, 'import matplotlib.pyplot as plt\n'), ((6958, 6973), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6966, 6973), True, 'import matplotlib.pyplot as plt\n'), ((6978, 6996), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6994, 6996), True, 'import matplotlib.pyplot as plt\n'), ((7043, 7081), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""', 'ts'], {}), "('%Y-%m-%d %H:%M:%S', ts)\n", (7056, 7081), False, 'import time\n'), ((7294, 7304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7302, 7304), True, 'import matplotlib.pyplot as plt\n'), ((10164, 10176), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10174, 10176), True, 'import matplotlib.pyplot as plt\n'), ((10287, 10303), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (10298, 10303), True, 'import matplotlib.pyplot as plt\n'), ((10308, 10332), 'matplotlib.pyplot.title', 'plt.title', (['image_a_title'], {}), '(image_a_title)\n', (10317, 10332), True, 'import matplotlib.pyplot as plt\n'), ((10337, 10356), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_a'], {}), '(image_a)\n', (10347, 10356), True, 'import matplotlib.pyplot as plt\n'), ((10361, 10390), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', '"""Top1 label:"""'], {}), "(x, y, 'Top1 label:')\n", (10369, 10390), True, 'import matplotlib.pyplot as plt\n'), ((10395, 10430), 'matplotlib.pyplot.text', 'plt.text', (['(x + xos)', '(y + yos)', 'a_label'], {}), '(x + xos, y + yos, a_label)\n', (10403, 10430), True, 'import matplotlib.pyplot as plt\n'), ((10431, 10446), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10439, 10446), True, 'import matplotlib.pyplot as plt\n'), ((10452, 10468), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (10463, 10468), True, 'import matplotlib.pyplot as plt\n'), ((10473, 10497), 'matplotlib.pyplot.title', 'plt.title', (['image_b_title'], {}), '(image_b_title)\n', (10482, 10497), True, 'import matplotlib.pyplot as plt\n'), ((10502, 10521), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_b'], {}), '(image_b)\n', (10512, 10521), True, 'import matplotlib.pyplot as plt\n'), ((10526, 10555), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', '"""Top1 label:"""'], {}), "(x, y, 'Top1 label:')\n", (10534, 10555), True, 'import matplotlib.pyplot as plt\n'), ((10560, 10595), 'matplotlib.pyplot.text', 'plt.text', (['(x + xos)', '(y + yos)', 'b_label'], {}), '(x + xos, y + yos, b_label)\n', (10568, 10595), True, 'import matplotlib.pyplot as plt\n'), ((10596, 10611), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10604, 10611), True, 'import matplotlib.pyplot as plt\n'), ((10617, 10633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (10628, 10633), True, 'import matplotlib.pyplot as plt\n'), ((10638, 10662), 'matplotlib.pyplot.title', 'plt.title', (['image_c_title'], {}), '(image_c_title)\n', (10647, 10662), True, 'import matplotlib.pyplot as plt\n'), ((10667, 10686), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_c'], {}), '(image_c)\n', (10677, 10686), True, 'import matplotlib.pyplot as plt\n'), ((10691, 10720), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', '"""Top1 label:"""'], {}), "(x, y, 'Top1 label:')\n", (10699, 10720), True, 'import matplotlib.pyplot as plt\n'), ((10725, 10760), 'matplotlib.pyplot.text', 'plt.text', (['(x + xos)', '(y + yos)', 'c_label'], {}), '(x + xos, y + yos, c_label)\n', (10733, 10760), True, 'import matplotlib.pyplot as plt\n'), ((10761, 10776), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10769, 10776), True, 'import matplotlib.pyplot as plt\n'), ((10782, 10798), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (10793, 10798), True, 'import matplotlib.pyplot as plt\n'), ((10803, 10827), 'matplotlib.pyplot.title', 'plt.title', (['image_d_title'], {}), '(image_d_title)\n', (10812, 10827), True, 'import matplotlib.pyplot as plt\n'), ((10832, 10851), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_d'], {}), '(image_d)\n', (10842, 10851), True, 'import matplotlib.pyplot as plt\n'), ((10856, 10885), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', '"""Top1 label:"""'], {}), "(x, y, 'Top1 label:')\n", (10864, 10885), True, 'import matplotlib.pyplot as plt\n'), ((10890, 10925), 'matplotlib.pyplot.text', 'plt.text', (['(x + xos)', '(y + yos)', 'd_label'], {}), '(x + xos, y + yos, d_label)\n', (10898, 10925), True, 'import matplotlib.pyplot as plt\n'), ((10926, 10941), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10934, 10941), True, 'import matplotlib.pyplot as plt\n'), ((10947, 10965), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10963, 10965), True, 'import matplotlib.pyplot as plt\n'), ((11126, 11136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11134, 11136), True, 'import matplotlib.pyplot as plt\n'), ((892, 923), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (900, 923), True, 'import numpy as np\n'), ((953, 984), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (961, 984), True, 'import numpy as np\n'), ((2636, 2658), 'os.path.exists', 'os.path.exists', (['OUTPUT'], {}), '(OUTPUT)\n', (2650, 2658), False, 'import os\n'), ((2668, 2689), 'os.mkdir', 'os.mkdir', (['OUTPUT', '(493)'], {}), '(OUTPUT, 493)\n', (2676, 2689), False, 'import os\n'), ((2703, 2738), 'os.path.exists', 'os.path.exists', (["(OUTPUT + '/' + type)"], {}), "(OUTPUT + '/' + type)\n", (2717, 2738), False, 'import os\n'), ((2748, 2782), 'os.mkdir', 'os.mkdir', (["(OUTPUT + '/' + type)", '(493)'], {}), "(OUTPUT + '/' + type, 493)\n", (2756, 2782), False, 'import os\n'), ((5665, 5676), 'time.time', 'time.time', ([], {}), '()\n', (5674, 5676), False, 'import time\n'), ((5738, 5762), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (5752, 5762), False, 'import os\n'), ((5772, 5793), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (5783, 5793), False, 'import os\n'), ((7021, 7032), 'time.time', 'time.time', ([], {}), '()\n', (7030, 7032), False, 'import time\n'), ((7094, 7137), 'os.path.exists', 'os.path.exists', (['"""examples/image_cls/output"""'], {}), "('examples/image_cls/output')\n", (7108, 7137), False, 'import os\n'), ((7147, 7168), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (7158, 7168), False, 'import os\n'), ((10978, 11021), 'os.path.exists', 'os.path.exists', (['"""examples/image_cls/output"""'], {}), "('examples/image_cls/output')\n", (10992, 11021), False, 'import os\n'), ((11031, 11052), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (11042, 11052), False, 'import os\n'), ((11608, 11655), 'os.path.join', 'os.path.join', (['folder', 'pdparams_files[max_index]'], {}), '(folder, pdparams_files[max_index])\n', (11620, 11655), False, 'import os\n'), ((11252, 11270), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (11262, 11270), False, 'import os\n'), ((5352, 5377), 'numpy.where', 'np.where', (['(difference != 0)'], {}), '(difference != 0)\n', (5360, 5377), True, 'import numpy as np\n'), ((6708, 6733), 'numpy.where', 'np.where', (['(difference != 0)'], {}), '(difference != 0)\n', (6716, 6733), True, 'import numpy as np\n')]
|
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import cmasher as cmr
import astropy.units as u
import astropy.coordinates as coord
from astropy.io import ascii
from astropy.io import fits
from astropy.wcs import WCS
from functions import *
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['font.family'] = 'cmu serif'
SMALL_SIZE = 8
MEDIUM_SIZE = 8
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
fig_directory='/Users/emma/OneDrive/PhD/thesis/Figures/'
cmap_blue = cmr.get_sub_cmap('twilight_shifted', 0, 0.5)
cmap_red = cmr.get_sub_cmap('twilight', 0.5, 1)
cmap_redblue=cmr.get_sub_cmap('twilight_shifted', 0.1, 0.9)
cmap=plt.cm.twilight_shifted
def main():
faradaysky,header=fitsopen('/Volumes/TARDIS/Work/askap/Faraday_cutout_pilot.fits')
faradayuncertainty,header2=fitsopen('/Volumes/TARDIS/Work/askap/Faraday_error_pilot.fits')
print(faradaysky.shape)
wcs=WCS(header)
sources=np.loadtxt('source_coords.txt',dtype='str')
plt.figure()
ax=plt.subplot(projection=wcs)
c=ax.imshow(faradaysky, origin='lower', cmap=cmap_redblue,vmin=-50,vmax=50)
cbar=plt.colorbar(c,fraction=0.046, pad=0.04)
for i in range(0,sources.shape[0]):
ra_ha=coord.Angle(sources[i,0],unit=u.hourangle)
ra = coord.Angle(ra_ha,unit=u.degree)
dec = coord.Angle(sources[i,1],unit=u.degree)
coords=coord.SkyCoord(ra=ra,dec=dec)
pixcoords=wcs.world_to_pixel(coords)
x=int(round(float(pixcoords[0])))
y=int(round(float(pixcoords[1])))
plt.scatter(pixcoords[0],pixcoords[1],marker='.',color='k')
RM=faradaysky[y,x]
RMerr=faradayuncertainty[y,x]
print(sources[i,0],sources[i,1])
print('{} +/- {}'.format(RM,RMerr))
plt.show()
if __name__ == "__main__":
main()
|
[
"astropy.coordinates.Angle",
"matplotlib.pyplot.colorbar",
"astropy.coordinates.SkyCoord",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.loadtxt",
"cmasher.get_sub_cmap",
"matplotlib.pyplot.subplot",
"astropy.wcs.WCS",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] |
[((399, 430), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'SMALL_SIZE'}), "('font', size=SMALL_SIZE)\n", (405, 430), True, 'import matplotlib.pyplot as plt\n'), ((470, 507), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'MEDIUM_SIZE'}), "('axes', titlesize=MEDIUM_SIZE)\n", (476, 507), True, 'import matplotlib.pyplot as plt\n'), ((541, 578), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'MEDIUM_SIZE'}), "('axes', labelsize=MEDIUM_SIZE)\n", (547, 578), True, 'import matplotlib.pyplot as plt\n'), ((615, 652), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'SMALL_SIZE'}), "('xtick', labelsize=SMALL_SIZE)\n", (621, 652), True, 'import matplotlib.pyplot as plt\n'), ((686, 723), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'SMALL_SIZE'}), "('ytick', labelsize=SMALL_SIZE)\n", (692, 723), True, 'import matplotlib.pyplot as plt\n'), ((757, 794), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'SMALL_SIZE'}), "('legend', fontsize=SMALL_SIZE)\n", (763, 794), True, 'import matplotlib.pyplot as plt\n'), ((886, 930), 'cmasher.get_sub_cmap', 'cmr.get_sub_cmap', (['"""twilight_shifted"""', '(0)', '(0.5)'], {}), "('twilight_shifted', 0, 0.5)\n", (902, 930), True, 'import cmasher as cmr\n'), ((942, 978), 'cmasher.get_sub_cmap', 'cmr.get_sub_cmap', (['"""twilight"""', '(0.5)', '(1)'], {}), "('twilight', 0.5, 1)\n", (958, 978), True, 'import cmasher as cmr\n'), ((992, 1038), 'cmasher.get_sub_cmap', 'cmr.get_sub_cmap', (['"""twilight_shifted"""', '(0.1)', '(0.9)'], {}), "('twilight_shifted', 0.1, 0.9)\n", (1008, 1038), True, 'import cmasher as cmr\n'), ((1290, 1301), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (1293, 1301), False, 'from astropy.wcs import WCS\n'), ((1312, 1356), 'numpy.loadtxt', 'np.loadtxt', (['"""source_coords.txt"""'], {'dtype': '"""str"""'}), "('source_coords.txt', dtype='str')\n", (1322, 1356), True, 'import numpy as np\n'), ((1358, 1370), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1368, 1370), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1402), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'wcs'}), '(projection=wcs)\n', (1386, 1402), True, 'import matplotlib.pyplot as plt\n'), ((1486, 1527), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['c'], {'fraction': '(0.046)', 'pad': '(0.04)'}), '(c, fraction=0.046, pad=0.04)\n', (1498, 1527), True, 'import matplotlib.pyplot as plt\n'), ((2047, 2057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2055, 2057), True, 'import matplotlib.pyplot as plt\n'), ((1573, 1617), 'astropy.coordinates.Angle', 'coord.Angle', (['sources[i, 0]'], {'unit': 'u.hourangle'}), '(sources[i, 0], unit=u.hourangle)\n', (1584, 1617), True, 'import astropy.coordinates as coord\n'), ((1623, 1656), 'astropy.coordinates.Angle', 'coord.Angle', (['ra_ha'], {'unit': 'u.degree'}), '(ra_ha, unit=u.degree)\n', (1634, 1656), True, 'import astropy.coordinates as coord\n'), ((1664, 1705), 'astropy.coordinates.Angle', 'coord.Angle', (['sources[i, 1]'], {'unit': 'u.degree'}), '(sources[i, 1], unit=u.degree)\n', (1675, 1705), True, 'import astropy.coordinates as coord\n'), ((1713, 1743), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', ([], {'ra': 'ra', 'dec': 'dec'}), '(ra=ra, dec=dec)\n', (1727, 1743), True, 'import astropy.coordinates as coord\n'), ((1857, 1919), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pixcoords[0]', 'pixcoords[1]'], {'marker': '"""."""', 'color': '"""k"""'}), "(pixcoords[0], pixcoords[1], marker='.', color='k')\n", (1868, 1919), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pymc as pm
import networkx as nx
from matplotlib import pyplot as plt
alpha = 0.5
beta = 0.1
L= 9.0
G0 = nx.Graph()
for i in range(1, 10):
for j in range(i + 1, 11):
G0.add_edge(i, j)
#G0.add_path(range(1, 11))
#G0.add_path(range(1, 11))
#G0.remove_edge(2, 3)
#G0.remove_edge(3, 4)
#G0.add_edge(2, 4)
#G0.add_edge(3, 7)
#G0.add_edge(8, 10)
# nx.draw(G0, with_labels=True, font_weight='bold')
# plt.show()
@pm.stochastic(dtype=nx.Graph)
def cwg(value = G0, alpha = alpha, beta = beta, L = L):
tmp = 0
for i in range(1, len(value)):
for j in range(i + 1, len(value)+1):
if value.has_edge(i, j):
tmp += np.log(alpha) - ((j - i) / (beta * L))
else:
tmp += np.log(1 - alpha * np.exp((i - j) / (beta * L)))
return tmp
class CWGMetropolis(pm.Metropolis):
""" A PyMC Step Method that walks on connected Waxman Graphs by
choosing two distinct nodes at random and considering the
possible link between them. If the link is already in the
graph, it consider it for deletion, and if the link is not in
the graph, it consider it for inclusion, keeping it with the
appropriate Metropolis probability (no Hastings factor necessary,
because the chain is reversible, right?)
"""
def __init__(self, stochastic):
# Initialize superclass
pm.Metropolis.__init__(self, stochastic, scale=1., verbose=0, tally=False)
def propose(self):
""" Add an edge or remove an edge"""
G = self.stochastic.value
G.u_new = np.random.choice(G.nodes()); G.v_new = np.random.choice(G.nodes())
while G.u_new == G.v_new:
G.v_new = np.random.choice(G.nodes())
if G.has_edge(G.u_new, G.v_new):
G.remove_edge(G.u_new, G.v_new)
if not nx.is_connected(G):
G.add_edge(G.u_new, G.v_new)
else:
G.add_edge(G.u_new, G.v_new)
self.stochastic.value = G
def reject(self):
""" Restore the graph"""
G = self.stochastic.value
if G.has_edge(G.u_new, G.v_new):
G.remove_edge(G.u_new, G.v_new)
else:
G.add_edge(G.u_new, G.v_new)
self.rejected += 1
self.stochastic.value = G
@pm.deterministic
def average_degree(G = cwg):
# return np.sum([t[1] for t in list(G.degree())]) / len(G)
return np.sum(list(G.degree().values())) / len(G)
mcmc = pm.MCMC([cwg, average_degree])
mcmc.use_step_method(CWGMetropolis, cwg)
mcmc.sample(100000)
avgd_samples = mcmc.trace("average_degree")[:]
plt.hist(avgd_samples[90000:])
plt.show()
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
mcmc.sample(100)
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
mcmc.sample(100)
nx.draw(cwg.value, with_labels=True, font_weight='bold')
plt.show()
|
[
"matplotlib.pyplot.hist",
"networkx.is_connected",
"numpy.log",
"networkx.Graph",
"numpy.exp",
"pymc.Metropolis.__init__",
"pymc.MCMC",
"pymc.stochastic",
"networkx.draw",
"matplotlib.pyplot.show"
] |
[((142, 152), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (150, 152), True, 'import networkx as nx\n'), ((478, 507), 'pymc.stochastic', 'pm.stochastic', ([], {'dtype': 'nx.Graph'}), '(dtype=nx.Graph)\n', (491, 507), True, 'import pymc as pm\n'), ((2567, 2597), 'pymc.MCMC', 'pm.MCMC', (['[cwg, average_degree]'], {}), '([cwg, average_degree])\n', (2574, 2597), True, 'import pymc as pm\n'), ((2708, 2738), 'matplotlib.pyplot.hist', 'plt.hist', (['avgd_samples[90000:]'], {}), '(avgd_samples[90000:])\n', (2716, 2738), True, 'from matplotlib import pyplot as plt\n'), ((2739, 2749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2747, 2749), True, 'from matplotlib import pyplot as plt\n'), ((2751, 2807), 'networkx.draw', 'nx.draw', (['cwg.value'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(cwg.value, with_labels=True, font_weight='bold')\n", (2758, 2807), True, 'import networkx as nx\n'), ((2809, 2819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2817, 2819), True, 'from matplotlib import pyplot as plt\n'), ((2840, 2896), 'networkx.draw', 'nx.draw', (['cwg.value'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(cwg.value, with_labels=True, font_weight='bold')\n", (2847, 2896), True, 'import networkx as nx\n'), ((2898, 2908), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2906, 2908), True, 'from matplotlib import pyplot as plt\n'), ((2929, 2985), 'networkx.draw', 'nx.draw', (['cwg.value'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(cwg.value, with_labels=True, font_weight='bold')\n", (2936, 2985), True, 'import networkx as nx\n'), ((2987, 2997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2995, 2997), True, 'from matplotlib import pyplot as plt\n'), ((1470, 1545), 'pymc.Metropolis.__init__', 'pm.Metropolis.__init__', (['self', 'stochastic'], {'scale': '(1.0)', 'verbose': '(0)', 'tally': '(False)'}), '(self, stochastic, scale=1.0, verbose=0, tally=False)\n', (1492, 1545), True, 'import pymc as pm\n'), ((1935, 1953), 'networkx.is_connected', 'nx.is_connected', (['G'], {}), '(G)\n', (1950, 1953), True, 'import networkx as nx\n'), ((722, 735), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (728, 735), True, 'import numpy as np\n'), ((823, 851), 'numpy.exp', 'np.exp', (['((i - j) / (beta * L))'], {}), '((i - j) / (beta * L))\n', (829, 851), True, 'import numpy as np\n')]
|
import argparse
import json
import numpy as np
import typing
from blockfs.directory import Directory
import logging
from precomputed_tif.blockfs_stack import BlockfsStack
from precomputed_tif.ngff_stack import NGFFStack
import os
import sys
from spimstitch.ngff import NGFFDirectory
from ..stitch import get_output_size, StitchSrcVolume, run
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
help="The root directory of the oblique volume tree. The program expects "
"blockfs Neuroglancer volumes in directories whose name is in the "
"format, <x>_<y> where <x> and <y> are the X and Y coordinates of "
"the top left corner of the volume.",
required=True)
parser.add_argument(
"--output",
help="The directory for the precomputed volume output"
)
parser.add_argument(
"--levels",
help="The number of mipmap levels in the precomputed volume",
default=5,
type=int)
parser.add_argument(
"--log-level",
help="The log level for logging",
default="WARNING")
parser.add_argument(
"--n-writers",
help="The number of writer processes for writing blockfs files",
default=min(12, os.cpu_count()),
type=int)
parser.add_argument(
"--n-workers",
help="The number of worker processes for the processing pipeline",
default=min(12, os.cpu_count()),
type=int)
parser.add_argument(
"--silent",
help="Turn off progress bars",
action="store_true")
parser.add_argument(
"--x-step-size",
help="X step size in microns",
default=1.28,
type=float)
parser.add_argument(
"--y-voxel-size",
help="Size of a voxel in the Y direction in microns",
default=1.8,
type=float)
parser.add_argument(
"--z-offset",
help="# of voxels of offset between the start of the stack above "
"in Z and the stack underneath it",
default=2048,
type=int
)
parser.add_argument(
"--output-size",
help="Size of the output volume (x,y,z). Defaults to the extent of all "
"prestitched volumes.")
parser.add_argument(
"--output-offset",
help="Offset of the output volume. Only use with --output-size. ")
parser.add_argument(
"--alignment",
help="Alignment file from oblique-align. Default is use static "
"alignment"
)
parser.add_argument(
"--y-illum-corr",
help="Fractional brightness of y[2047] with respect to y[0] for "
"each subvolume. Default is properly corrected",
type=float
)
parser.add_argument(
"--compute-y-illum-corr",
help="If present, compute fractional brightness at overlaps "
"between volumes",
action="store_true"
)
parser.add_argument(
"--n-y-illum-patches",
help="Number of patches to take to compute the y illumination "
"correction",
type=int,
default=1000
)
parser.add_argument(
"--min-y-illum-mean",
help="For an illum patch, the minimum allowed value of the mean "
"intensity of the patch",
type=int,
default=100
)
parser.add_argument(
"--min-y-illum-corr-coef",
help="The two overlapping volumes in an illumination patch must "
"have at least this correlation coefficient "
"(0 <= min-y-illum-corr-coef < 1) to be included",
type=float,
default=.80
)
parser.add_argument(
"--ngff",
help="Output an NGFF volume instead of blockfs",
action="store_true"
)
return parser.parse_args(args)
def main(args=sys.argv[1:]):
opts = parse_args(args)
logging.basicConfig(level=getattr(logging,opts.log_level))
volume_paths = []
zs = []
for root, folders, files in os.walk(opts.input, followlinks=True):
if os.path.split(root)[-1] == "1_1_1":
for file in files:
if file == BlockfsStack.DIRECTORY_FILENAME:
volume_paths.append(os.path.join(root, file))
try:
zs.append(int(os.path.split(os.path.dirname(root))[1]))
except ValueError:
logging.warning(
"Non-numeric Z found in stack path: %s" % root)
all_z = sorted(set(zs))
if opts.alignment is not None:
with open(opts.alignment) as fd:
align_z = json.load(fd)["align-z"]
else:
align_z = False
if align_z:
z_offsets = [z / 10 for z in zs]
else:
z_offsets = [opts.z_offset * all_z.index(z) * opts.x_step_size for z in zs]
volumes = [
StitchSrcVolume(volume_path,
opts.x_step_size,
opts.y_voxel_size,
z_offset)
for volume_path, z_offset in zip(volume_paths, z_offsets)]
z_too = adjust_alignments(opts, volumes)
StitchSrcVolume.rebase_all(volumes, z_too=z_too)
if opts.compute_y_illum_corr:
y_illum_corr = StitchSrcVolume.compute_illum_corr(
volumes,
n_patches=opts.n_y_illum_patches,
min_mean=opts.min_y_illum_mean,
min_corr_coef=opts.min_y_illum_corr_coef,
n_workers=opts.n_workers
)
elif opts.y_illum_corr is not None:
y_illum_corr = opts.y_illum_corr
else:
y_illum_corr = None
if y_illum_corr is not None:
y_illum_corr = \
(1 - y_illum_corr) * (2047 - np.arange(2048)) / 2047 + \
y_illum_corr
if opts.output_size is None:
zs, ys, xs = get_output_size(volumes)
x0 = y0 = z0 = 0
else:
xs, ys, zs = [int(_) for _ in opts.output_size.split(",")]
if opts.output_offset is None:
x0 = y0 = z0 = 0
else:
x0, y0, z0 = [int(_) for _ in opts.output_offset.split(",")]
if not os.path.exists(opts.output):
os.mkdir(opts.output)
l1_dir = os.path.join(opts.output, "1_1_1")
if not os.path.exists(l1_dir):
os.mkdir(l1_dir)
if opts.ngff:
output = NGFFStack((xs, ys, xs), opts.output)
output.create()
else:
output = BlockfsStack((zs, ys, xs), opts.output)
voxel_size = (opts.x_step_size * 1000,
opts.y_voxel_size * 1000,
opts.x_step_size * 1000)
output.write_info_file(opts.levels, voxel_size)
if opts.ngff:
directory = NGFFDirectory(output)
directory.create()
else:
directory_path = os.path.join(l1_dir, BlockfsStack.DIRECTORY_FILENAME)
directory = Directory(xs, ys, zs, volumes[0].directory.dtype,
directory_path,
n_filenames=opts.n_writers)
directory.create()
directory.start_writer_processes()
run(volumes, directory, x0, y0, z0, opts.n_workers, opts.silent,
y_illum_corr)
directory.close()
for level in range(2, opts.levels + 1):
output.write_level_n(level,
silent=opts.silent,
n_cores=opts.n_writers)
def adjust_alignments(opts, volumes:typing.Sequence[StitchSrcVolume]):
"""
Adjust the volume coordinates based on alignments recorded by
oblique-align or similar.
:param opts: The command-line options - we take the --alignment arg
as a json file.
:param volumes: The volumes to be adjusted
"""
if opts.alignment is not None:
alignments = {}
with open(opts.alignment) as fd:
d:dict = json.load(fd)
if "alignments" in d:
for k, v in d["alignments"].items():
alignments[tuple(json.loads(k)[:-1])] = v
align_z = d.get("align-z", False)
for volume in volumes:
k = (volume.x0, volume.y0)
if k in alignments:
if align_z:
volume.x0, volume.y0, volume.z0 = alignments[k]
else:
volume.x0, volume.y0, _ = alignments[k]
return align_z
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"json.loads",
"spimstitch.ngff.NGFFDirectory",
"argparse.ArgumentParser",
"precomputed_tif.blockfs_stack.BlockfsStack",
"numpy.arange",
"os.path.join",
"logging.warning",
"os.path.split",
"os.path.dirname",
"blockfs.directory.Directory",
"os.mkdir",
"os.cpu_count",
"json.load",
"os.walk",
"precomputed_tif.ngff_stack.NGFFStack"
] |
[((394, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (417, 419), False, 'import argparse\n'), ((4039, 4076), 'os.walk', 'os.walk', (['opts.input'], {'followlinks': '(True)'}), '(opts.input, followlinks=True)\n', (4046, 4076), False, 'import os\n'), ((6213, 6247), 'os.path.join', 'os.path.join', (['opts.output', '"""1_1_1"""'], {}), "(opts.output, '1_1_1')\n", (6225, 6247), False, 'import os\n'), ((6141, 6168), 'os.path.exists', 'os.path.exists', (['opts.output'], {}), '(opts.output)\n', (6155, 6168), False, 'import os\n'), ((6178, 6199), 'os.mkdir', 'os.mkdir', (['opts.output'], {}), '(opts.output)\n', (6186, 6199), False, 'import os\n'), ((6259, 6281), 'os.path.exists', 'os.path.exists', (['l1_dir'], {}), '(l1_dir)\n', (6273, 6281), False, 'import os\n'), ((6291, 6307), 'os.mkdir', 'os.mkdir', (['l1_dir'], {}), '(l1_dir)\n', (6299, 6307), False, 'import os\n'), ((6343, 6379), 'precomputed_tif.ngff_stack.NGFFStack', 'NGFFStack', (['(xs, ys, xs)', 'opts.output'], {}), '((xs, ys, xs), opts.output)\n', (6352, 6379), False, 'from precomputed_tif.ngff_stack import NGFFStack\n'), ((6431, 6470), 'precomputed_tif.blockfs_stack.BlockfsStack', 'BlockfsStack', (['(zs, ys, xs)', 'opts.output'], {}), '((zs, ys, xs), opts.output)\n', (6443, 6470), False, 'from precomputed_tif.blockfs_stack import BlockfsStack\n'), ((6691, 6712), 'spimstitch.ngff.NGFFDirectory', 'NGFFDirectory', (['output'], {}), '(output)\n', (6704, 6712), False, 'from spimstitch.ngff import NGFFDirectory\n'), ((6775, 6828), 'os.path.join', 'os.path.join', (['l1_dir', 'BlockfsStack.DIRECTORY_FILENAME'], {}), '(l1_dir, BlockfsStack.DIRECTORY_FILENAME)\n', (6787, 6828), False, 'import os\n'), ((6849, 6946), 'blockfs.directory.Directory', 'Directory', (['xs', 'ys', 'zs', 'volumes[0].directory.dtype', 'directory_path'], {'n_filenames': 'opts.n_writers'}), '(xs, ys, zs, volumes[0].directory.dtype, directory_path,\n n_filenames=opts.n_writers)\n', (6858, 6946), False, 'from blockfs.directory import Directory\n'), ((7814, 7827), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (7823, 7827), False, 'import json\n'), ((1296, 1310), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1308, 1310), False, 'import os\n'), ((1478, 1492), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1490, 1492), False, 'import os\n'), ((4089, 4108), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (4102, 4108), False, 'import os\n'), ((4669, 4682), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (4678, 4682), False, 'import json\n'), ((4256, 4280), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (4268, 4280), False, 'import os\n'), ((5740, 5755), 'numpy.arange', 'np.arange', (['(2048)'], {}), '(2048)\n', (5749, 5755), True, 'import numpy as np\n'), ((4450, 4513), 'logging.warning', 'logging.warning', (["('Non-numeric Z found in stack path: %s' % root)"], {}), "('Non-numeric Z found in stack path: %s' % root)\n", (4465, 4513), False, 'import logging\n'), ((7940, 7953), 'json.loads', 'json.loads', (['k'], {}), '(k)\n', (7950, 7953), False, 'import json\n'), ((4359, 4380), 'os.path.dirname', 'os.path.dirname', (['root'], {}), '(root)\n', (4374, 4380), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Jan 5 2021
@author: <NAME>
based on the Iso-MPS codes
"""
#%% -- IMPORTS --
import sys
sys.path.append("..") # import one subdirectory up in files
# external packages
import numpy as np
import qiskit as qk
import networkx as nx
import tenpy
# custom things
from networks.isonetwork import IsoTensor, IsoNetwork, QKParamCircuit
import mps.mps as mps
#%%
class IsoMERA(IsoNetwork):
"""
MPS defined by
- number of physical and bond qubits (sets up associated quantum registers accordingly)
- l_uc - length of unit cell
- L number of times to repeat unit cell
- circuits for each site in the unit cell, and initial state of bond-qubits
"""
def __init__(self,
preg,
breg,
pcircs,
smax, #
**kwargs):
"""
inputs:
preg, list of lists of physical qubit registers on each site;
notice that in MERA setting we require len(preg) = 2^(smax-1)
breg, list of lists of physical qubit registers on each site;
notice that in MERA setting we require len(preg) = smax
(for qiskit: register= quantum register)
smax, # of layers; count from 0 to smax-1; total smax layers
pcircs, list, of parameterized circuit objects:
pcircs[0] - boundary circuit (acting only on bond-qubits)
pcircs[1...l_uc] for each site in unit-cell
param_names,list of sympy symbols, parameterized gate parameters (shared by all tensors)
L, int (default=1), Length of System (number of times to repeat unit cell)
bdry_circ, boundary vector circuit for prepping initial state of bond-qubits
circuit_format, str, (default='cirq'), type of circuit editor/simulator used
"""
# here, pcircs is a list of lists with length 1,2,4...2^(smax-1), respectively
# self.n_params = len(param_names)
# parse kwargs that don't depend on circuit_format
if 'circuit_format' in kwargs.keys():
self.circuit_format = kwargs['circuit_format']
else:
self.circuit_format = 'qiskit'
if 'L' in kwargs.keys():
self.L = kwargs['L']
else:
self.L=1
if self.circuit_format == 'qiskit':
# setup classical registers for measurement outcomes
self.cregs = [[qk.ClassicalRegister(len(preg[z]))for z in range(2**(smax-1))]#label the thing on each layer
for x in range(self.L)]
self.nphys = 0
self.nbond = 0
for i in range(len(preg)):
self.nphys += len(preg[i]) # number of physical qubits
for i in range(len(breg)):
self.nbond += len(breg[i]) # number of bond qubits
if 'boundary_circuit' in kwargs.keys():
bdry_circ = kwargs['boundary_circuit'] #this, as well, has to be a list
else:
bdry_circ = [QKParamCircuit(qk.QuantumCircuit(), []) for i in range(smax)]
# make the MPS/tensor-train -- same qubits used by each tensor
self.bdry_tensor = [IsoTensor('v_L'+str(i),
[breg[i]],
bdry_circ[i]) for i in range(smax)]
def mlist(preg,x,y,z):
if y == smax-1:
meas_list=[(preg,self.cregs[x][z],qk.QuantumCircuit())]
else:
meas_list=[]
return meas_list
self.sites= [[[IsoTensor('A'+str(x)+str(y)+str(z),
[preg[z],breg[y]],
pcircs[y][z],
meas_list=mlist(preg[z],x,y,z) )
for z in range(2**(y))]#label the nodes on each layer
for y in range(smax)]#label the layers
for x in range(self.L)]
# setup IsoNetwork
# make a flat list of nodes
self.nodes = self.bdry_tensor
for x in range(self.L):
for y in range(smax):
self.nodes += self.sites[x][y]
self.edges = [(self.bdry_tensor[i],self.sites[0][i][0],{'qreg':breg[i]}) for i in range(smax)]
self.edges+=[(self.sites[x][y][z],self.sites[x][y][z+1],{'qreg':breg[y]}) for x in range(self.L) for y in range(smax) for z in range (int(2**(y)-1))]
self.edges+=[(self.sites[x][y][z],self.sites[x][y+1][int(2*z)],{'qreg':preg[z]}) for x in range(self.L) for y in range(int(smax-1)) for z in range(int(2**(y)))]
self.edges+=[(self.sites[x][y][int(2**(y-1)-1)],self.sites[x+1][y][0],{'qreg':breg[y]})for x in range(self.L-1) for y in range(int(smax-1))]
self.qregs = breg+preg
# construct graph and check that is a DAG
# check for repeated node names
self.graph = nx.DiGraph()
self.graph.add_nodes_from(self.nodes)
self.graph.add_edges_from(self.edges)
# check that graph is directed & acyclic (DAG)
if nx.algorithms.dag.is_directed_acyclic_graph(self.graph) != True:
raise RuntimeError('Graph must be directed and acyclic')
# store node information
# self.creg_dict = creg_dict
self.node_names = [node.name for node in self.nodes]
if len(self.node_names) != len(set(self.node_names)):
raise ValueError('Tensor nodes must have unique names')
# store variational parameter info
self.param_assignments = {}
for node in self.nodes:
self.param_assignments[node]=node.param_names
# topologically sort nodes in order of execution
self.sorted_nodes = [node for node in nx.topological_sort(self.graph)]
else:
raise NotImplementedError('only qiskit implemented')
## cpu simulation ##
def left_bdry_vector(self,params):
"""
computes full unitaries for each state (any initial state for physicalqubit)
inputs:
params, dictionary of parameters {'name':numerical-value}
returns:
bdry_vec, unitary correspond to boundary
ulist, list of unitaries for tensors in unit cell
"""
bvec_l = self.bdry_tensor.unitary(params)[:,0] # boundary circuit tensor
return bvec_l
def unitaries(self,params):
"""
computes full unitaries for each state (any initial state for physicalqubit)
inputs:
params, dictionary of parameters {'name':numerical-value}
returns:
ulist, list of rank-4 tensors for each site in unit cell
"""
ulist = [self.sites[j].unitary(params) for j in range(self.l_uc)]
return ulist
def tensors(self,params):
"""
computes tensors for fixed initial state of physical qubit = |0>
inputs:
params, dictionary of parameters {'name':numerical-value}
returns:
tensors, list of rank-3 tensors for each site in unit cell
"""
tensors = [self.sites[j].unitary(params)[:,:,0,:] for j in range(self.l_uc)]
return tensors
## Convert to other format(s) ##
def to_tenpy(self,params,L=1):
"""
inputs:
params, dictionary of parameters {'name':numerical-value}
L, int, number of repetitions of unit cell,
set to np.inf for iMPS
TODO: add any other args needed to specify, symmetries, site-type etc...
outputs:
tenpy MPS object created from cirq description
"""
site = tenpy.networks.site.SpinHalfSite(conserve=None)
if (L==np.inf) and (self.l_uc==1) and (self.nphys==1):
B = np.swapaxes(self.tensors(params)[0],1,2)
psi = tenpy.networks.mps.MPS.from_Bflat([site],
[B],
bc='infinite',
dtype=complex,
form=None)
else:
B_arrs = [np.swapaxes(tensor,1,2) for tensor in self.tensors(params)]
B_arrs[0] = B_arrs[0][:,0:1,:]
B_arrs[-1] = B_arrs[-1][:,:,0:1]
psi = tenpy.networks.mps.MPS.from_Bflat([site]*L,
B_arrs,
bc = 'finite',
dtype=complex,
form=None)
psi.canonical_form()
psi.convert_form(psi.form)
return psi
def as_mps(self,params,L=1):
"""
converts to custom MPS class object
inputs:
params, dictionary of parameters {'name':numerical-value}
L, int, number of repetitions of unit cell,
set to np.inf for iMPS
outputs:
custom MPS object created from cirq description
"""
tensors = self.tensors(params)
bvecl = self.left_bdry_vector(params)
state = mps.MPS(tensors,L=L,bdry_vecs=[bvecl,None], rcf = True)
return state
def as_mpo(self,params):
"""
converts to custom MPO class object
inputs:
params, dictionary of parameters {'name':numerical-value}
outputs:
custom MPS object created from cirq description
"""
tensors = self.compute_unitaries(params)
bvecl = self.compute_left_bdry_vector(params)
op = mps.MPO(tensors,L=self.L,bdry_vecs=[bvecl,None], rcf = True)
return op
## correlation function sampling ##
def sample_correlations(self,L,bases,N_samples):
"""
basis: measurement basis for each site
possible formats:
- cirq circuit for physical qubits that maps physical qubits to measurement basis
- string of
possible backends:
'tenpy' - uses
'qasm' - output qasm script to measure
inputs:
options: dictionary with entries specifying:
burn-in length,
unit cell length,
basis to measure in for each site,
number of samples to take (could be infinite for cpu-simulations)
backend: whether to run as
"""
raise NotImplementedError
#%%
|
[
"mps.mps.MPO",
"tenpy.networks.site.SpinHalfSite",
"networkx.topological_sort",
"networkx.DiGraph",
"networkx.algorithms.dag.is_directed_acyclic_graph",
"numpy.swapaxes",
"tenpy.networks.mps.MPS.from_Bflat",
"mps.mps.MPS",
"qiskit.QuantumCircuit",
"sys.path.append"
] |
[((188, 209), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (203, 209), False, 'import sys\n'), ((8075, 8122), 'tenpy.networks.site.SpinHalfSite', 'tenpy.networks.site.SpinHalfSite', ([], {'conserve': 'None'}), '(conserve=None)\n', (8107, 8122), False, 'import tenpy\n'), ((9621, 9677), 'mps.mps.MPS', 'mps.MPS', (['tensors'], {'L': 'L', 'bdry_vecs': '[bvecl, None]', 'rcf': '(True)'}), '(tensors, L=L, bdry_vecs=[bvecl, None], rcf=True)\n', (9628, 9677), True, 'import mps.mps as mps\n'), ((10079, 10140), 'mps.mps.MPO', 'mps.MPO', (['tensors'], {'L': 'self.L', 'bdry_vecs': '[bvecl, None]', 'rcf': '(True)'}), '(tensors, L=self.L, bdry_vecs=[bvecl, None], rcf=True)\n', (10086, 10140), True, 'import mps.mps as mps\n'), ((5225, 5237), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5235, 5237), True, 'import networkx as nx\n'), ((8261, 8352), 'tenpy.networks.mps.MPS.from_Bflat', 'tenpy.networks.mps.MPS.from_Bflat', (['[site]', '[B]'], {'bc': '"""infinite"""', 'dtype': 'complex', 'form': 'None'}), "([site], [B], bc='infinite', dtype=complex,\n form=None)\n", (8294, 8352), False, 'import tenpy\n'), ((8760, 8857), 'tenpy.networks.mps.MPS.from_Bflat', 'tenpy.networks.mps.MPS.from_Bflat', (['([site] * L)', 'B_arrs'], {'bc': '"""finite"""', 'dtype': 'complex', 'form': 'None'}), "([site] * L, B_arrs, bc='finite', dtype=\n complex, form=None)\n", (8793, 8857), False, 'import tenpy\n'), ((5412, 5467), 'networkx.algorithms.dag.is_directed_acyclic_graph', 'nx.algorithms.dag.is_directed_acyclic_graph', (['self.graph'], {}), '(self.graph)\n', (5455, 5467), True, 'import networkx as nx\n'), ((8594, 8619), 'numpy.swapaxes', 'np.swapaxes', (['tensor', '(1)', '(2)'], {}), '(tensor, 1, 2)\n', (8605, 8619), True, 'import numpy as np\n'), ((6166, 6197), 'networkx.topological_sort', 'nx.topological_sort', (['self.graph'], {}), '(self.graph)\n', (6185, 6197), True, 'import networkx as nx\n'), ((3196, 3215), 'qiskit.QuantumCircuit', 'qk.QuantumCircuit', ([], {}), '()\n', (3213, 3215), True, 'import qiskit as qk\n'), ((3641, 3660), 'qiskit.QuantumCircuit', 'qk.QuantumCircuit', ([], {}), '()\n', (3658, 3660), True, 'import qiskit as qk\n')]
|
"""
Module for Gemini FLAMINGOS.
.. include:: ../include/links.rst
"""
import os
from pkg_resources import resource_filename
from IPython import embed
import numpy as np
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit.images import detector_container
from pypeit.spectrographs import spectrograph
class GeminiFLAMINGOSSpectrograph(spectrograph.Spectrograph):
"""
Base class for the Gemini FLAMINGOS spectrograph.
"""
ndet = 1
telescope = telescopes.GeminiSTelescopePar()
def init_meta(self):
"""
Define how metadata are derived from the spectrograph files.
That is, this associates the ``PypeIt``-specific metadata keywords
with the instrument-specific header cards using :attr:`meta`.
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='OBJECT')
self.meta['decker'] = dict(ext=0, card='MASKNAME')
self.meta['dichroic'] = dict(ext=0, card='FILTER')
self.meta['binning'] = dict(ext=0, card=None, default='1,1')
self.meta['mjd'] = dict(ext=0, card='MJD-OBS')
self.meta['exptime'] = dict(ext=0, card='EXPTIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
# Extras for config and frametyping
self.meta['dispname'] = dict(ext=0, card='GRISM')
self.meta['idname'] = dict(ext=0, card='OBSTYPE')
class GeminiFLAMINGOS2Spectrograph(GeminiFLAMINGOSSpectrograph):
"""
Gemini/Flamingos2 Echelle spectrograph methods.
"""
name = 'gemini_flamingos2'
camera = 'FLAMINGOS'
supported = True
comment = 'Flamingos-2 NIR spectrograph'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning = '1,1',
det = 1,
dataext = 1,
specaxis = 0,
specflip = True,
spatflip = False,
platescale = 0.1787,
darkcurr = 0.5,
saturation = 700000., #155400.,
nonlinear = 1.0,
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(4.44),
ronoise = np.atleast_1d(5.0), #8 CDS read
datasec = np.atleast_1d('[:,:]'),
oscansec = np.atleast_1d('[:,:]'),
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Image processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False,
use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Wavelengths
# 1D wavelength solution with arc lines
par['calibrations']['wavelengths']['rms_threshold'] = 0.5
par['calibrations']['wavelengths']['sigdetect']=5
par['calibrations']['wavelengths']['fwhm'] = 5
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=4
par['calibrations']['wavelengths']['lamps'] = ['OH_NIRES']
par['calibrations']['wavelengths']['match_toler']=5.0
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['tilts']['spat_order'] = 4
par['calibrations']['slitedges']['trace_thresh'] = 10.
par['calibrations']['slitedges']['edge_thresh'] = 200.
par['calibrations']['slitedges']['fit_min_spec_length'] = 0.4
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 30]
par['calibrations']['tiltframe']['exprng'] = [50, None]
par['calibrations']['arcframe']['exprng'] = [50, None]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
# Scienceimage parameters
par['reduce']['findobj']['sig_thresh'] = 5.0
par['reduce']['skysub']['sky_sigrej'] = 5.0
par['reduce']['findobj']['find_trim_edge'] = [10,10]
# Do not correct for flexure
par['flexure']['spec_method'] = 'skip'
# Sensitivity function parameters
par['sensfunc']['algorithm'] = 'IR'
par['sensfunc']['polyorder'] = 8
# TODO: replace the telluric grid file for Gemini-S site.
par['sensfunc']['IR']['telgridfile'] \
= os.path.join(par['sensfunc']['IR'].default_root,
'TelFit_LasCampanas_3100_26100_R20000.fits')
return par
def config_specific_par(self, scifile, inp_par=None):
"""
Modify the ``PypeIt`` parameters to hard-wired values used for
specific instrument configurations.
Args:
scifile (:obj:`str`):
File to use when determining the configuration and how
to adjust the input parameters.
inp_par (:class:`~pypeit.par.parset.ParSet`, optional):
Parameter set used for the full run of PypeIt. If None,
use :func:`default_pypeit_par`.
Returns:
:class:`~pypeit.par.parset.ParSet`: The PypeIt parameter set
adjusted for configuration specific parameter values.
"""
par = super().config_specific_par(scifile, inp_par=inp_par)
# TODO: Should we allow the user to override these?
if self.get_meta_value(scifile, 'dispname') == 'JH_G5801':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'Flamingos2_JH_JH.fits'
elif self.get_meta_value(scifile, 'dispname') == 'HK_G5802':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'Flamingos2_HK_HK.fits'
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'FLAT')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'OBJECT')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'OBJECT')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'OBJECT')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
class GeminiFLAMINGOS1Spectrograph(GeminiFLAMINGOSSpectrograph):
"""
Gemini/Flamingos1 Echelle spectrograph methods.
.. todo::
This is a placeholder class that is not yet supported.
"""
name = 'gemini_flamingos1'
camera = 'FLAMINGOS'
def get_detector_par(self, hdu, det):
"""
Return metadata for the selected detector.
Args:
hdu (`astropy.io.fits.HDUList`_):
The open fits file with the raw image of interest.
det (:obj:`int`):
1-indexed detector number.
Returns:
:class:`~pypeit.images.detector_container.DetectorContainer`:
Object with the detector metadata.
"""
# Detector 1
detector_dict = dict(
binning='1,1',
det = 1,
dataext = 1,
specaxis = 0,
specflip = False,
spatflip = False,
platescale = 0.15,
darkcurr = 0.01,
saturation = 320000., #155400.,
nonlinear = 0.875,
mincounts = -1e10,
numamplifiers = 1,
gain = np.atleast_1d(3.8),
ronoise = np.atleast_1d(6.0), # SUTR readout
datasec= np.atleast_1d('[5:2044, 900:1250]'),
oscansec= np.atleast_1d('[:5, 900:1250]'),
)
return detector_container.DetectorContainer(**detector_dict)
@classmethod
def default_pypeit_par(cls):
"""
Return the default parameters to use for this instrument.
Returns:
:class:`~pypeit.par.pypeitpar.PypeItPar`: Parameters required by
all of ``PypeIt`` methods.
"""
par = super().default_pypeit_par()
# Image processing steps
turn_off = dict(use_illumflat=False, use_biasimage=False, use_overscan=False,
use_darkimage=False)
par.reset_all_processimages_par(**turn_off)
# Wavelengths
# 1D wavelength solution with arc lines
par['calibrations']['wavelengths']['rms_threshold'] = 1.0
par['calibrations']['wavelengths']['sigdetect']=3
par['calibrations']['wavelengths']['fwhm'] = 20
par['calibrations']['wavelengths']['n_first']=2
par['calibrations']['wavelengths']['n_final']=4
par['calibrations']['wavelengths']['lamps'] = ['ArI', 'ArII', 'ThAr', 'NeI']
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_fire_long.fits'
par['calibrations']['wavelengths']['match_toler']=5.0
# Set slits and tilts parameters
par['calibrations']['tilts']['tracethresh'] = 5
par['calibrations']['slitedges']['trace_thresh'] = 5.
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Scienceimage parameters
par['reduce']['findobj']['sig_thresh'] = 5.0
# TODO: I think this parameter was removed
par['reduce']['findobj']['find_trim_edge'] = [50,50]
# Do not correct for flexure
par['flexure']['spec_method'] = 'skip'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 60]
par['calibrations']['arcframe']['exprng'] = [1, 50]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
return par
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
Args:
ftype (:obj:`str`):
Type of frame to check. Must be a valid frame type; see
frame-type :ref:`frame_type_defs`.
fitstbl (`astropy.table.Table`_):
The table with the metadata for one or more frames to check.
exprng (:obj:`list`, optional):
Range in the allowed exposure time for a frame of type
``ftype``. See
:func:`pypeit.core.framematch.check_frame_exptime`.
Returns:
`numpy.ndarray`_: Boolean array with the flags selecting the
exposures in ``fitstbl`` that are ``ftype`` type frames.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return good_exp & (fitstbl['idname'] == 'PixFlat')
if ftype == 'standard':
return good_exp & (fitstbl['idname'] == 'Telluric')
if ftype == 'science':
return good_exp & (fitstbl['idname'] == 'Science')
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Arc')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
|
[
"os.path.join",
"pypeit.images.detector_container.DetectorContainer",
"pypeit.telescopes.GeminiSTelescopePar",
"pypeit.core.framematch.check_frame_exptime",
"numpy.atleast_1d"
] |
[((517, 549), 'pypeit.telescopes.GeminiSTelescopePar', 'telescopes.GeminiSTelescopePar', ([], {}), '()\n', (547, 549), False, 'from pypeit import telescopes\n'), ((2994, 3047), 'pypeit.images.detector_container.DetectorContainer', 'detector_container.DetectorContainer', ([], {}), '(**detector_dict)\n', (3030, 3047), False, 'from pypeit.images import detector_container\n'), ((5423, 5520), 'os.path.join', 'os.path.join', (["par['sensfunc']['IR'].default_root", '"""TelFit_LasCampanas_3100_26100_R20000.fits"""'], {}), "(par['sensfunc']['IR'].default_root,\n 'TelFit_LasCampanas_3100_26100_R20000.fits')\n", (5435, 5520), False, 'import os\n'), ((7707, 7765), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (7737, 7765), False, 'from pypeit.core import framematch\n'), ((9884, 9937), 'pypeit.images.detector_container.DetectorContainer', 'detector_container.DetectorContainer', ([], {}), '(**detector_dict)\n', (9920, 9937), False, 'from pypeit.images import detector_container\n'), ((12811, 12869), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (12841, 12869), False, 'from pypeit.core import framematch\n'), ((2774, 2793), 'numpy.atleast_1d', 'np.atleast_1d', (['(4.44)'], {}), '(4.44)\n', (2787, 2793), True, 'import numpy as np\n'), ((2825, 2843), 'numpy.atleast_1d', 'np.atleast_1d', (['(5.0)'], {}), '(5.0)\n', (2838, 2843), True, 'import numpy as np\n'), ((2887, 2909), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[:,:]"""'], {}), "('[:,:]')\n", (2900, 2909), True, 'import numpy as np\n'), ((2941, 2963), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[:,:]"""'], {}), "('[:,:]')\n", (2954, 2963), True, 'import numpy as np\n'), ((9657, 9675), 'numpy.atleast_1d', 'np.atleast_1d', (['(3.8)'], {}), '(3.8)\n', (9670, 9675), True, 'import numpy as np\n'), ((9707, 9725), 'numpy.atleast_1d', 'np.atleast_1d', (['(6.0)'], {}), '(6.0)\n', (9720, 9725), True, 'import numpy as np\n'), ((9763, 9798), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[5:2044, 900:1250]"""'], {}), "('[5:2044, 900:1250]')\n", (9776, 9798), True, 'import numpy as np\n'), ((9822, 9853), 'numpy.atleast_1d', 'np.atleast_1d', (['"""[:5, 900:1250]"""'], {}), "('[:5, 900:1250]')\n", (9835, 9853), True, 'import numpy as np\n')]
|
from precise.skaters.covariance.allcovskaters import ALL_D0_SKATERS
from precise.skaters.covarianceutil.likelihood import cov_skater_loglikelihood
from uuid import uuid4
import os
import json
import pathlib
from pprint import pprint
import traceback
from collections import Counter
from momentum.functions import rvar
from precise.skatertools.data.equity import random_m6_returns
from precise.whereami import SKATER_WIN_DATA
import numpy as np
import time
DEFAULT_M6_PARAMS = {'n_dim': 25,
'n_obs': 356,
'n_burn':300,
'atol': 1,
'lb':-1000,
'ub':1000,
'interval':'d'}
def params_category_and_data(params:dict):
"""
Supplement params (usually inferred from battle script file names) with defaults
"""
if params['topic']== 'm6':
combined_params = DEFAULT_M6_PARAMS
combined_params.update(params)
descriptions = {'m': 'm6_stocks_monthly',
'd': 'm6_stocks_daily'}
combined_params['description'] = descriptions[combined_params['interval']]
category = combined_params['description'] + '_p' + str(combined_params['n_dim']) + '_n' + str(combined_params['n_burn'])
xs = random_m6_returns(verbose=False, **combined_params)
return combined_params, category, xs
else:
raise ValueError('m6 is only topic, for now')
def skater_battle( params:dict ):
"""
Write results to a new queue
"""
n_per_battle = 3
atol = 1.0
try:
params, category, xs_test = params_category_and_data(params=params)
except Exception as e:
print(e)
pprint(params)
raise ValueError('Something is probably wrong with params for getting data, so this config will not fly')
print('Data retrieval test passed for category '+category)
pprint(params)
time.sleep(1)
print('Will test the following skaters')
pprint(ALL_D0_SKATERS)
qn = str(uuid4())+'.json'
queue_dir = os.path.join(SKATER_WIN_DATA, category)
queue = os.path.join(queue_dir,qn)
pathlib.Path(queue_dir).mkdir(parents=True, exist_ok=True)
print(queue)
battles = Counter()
timing = dict()
reliability = dict()
failures = dict()
worst_ll_seen = 10000000
lb = params['lb']
ub = params['ub']
while True:
n_obs = params['n_obs']
params, category, xs = params_category_and_data(params=params)
assert len(xs)==n_obs
xs = np.array(xs)
np.random.shuffle(ALL_D0_SKATERS)
fs = ALL_D0_SKATERS[:n_per_battle]
stuff = list()
for f in fs:
try:
ll, metrics = cov_skater_loglikelihood(f=f, xs=xs, n_burn=params['n_burn'], with_metrics=True, lb=lb, ub=ub)
metrics['name']=f.__name__
metrics['traceback']=''
metrics['passing']=1
stuff.append( (ll,metrics) )
if ll<worst_ll_seen:
worst_ll_seen = ll
print({'worst_ll_seen':ll})
name = metrics['name']
if name not in timing:
timing[name] = {}
timing[name] = rvar(timing[name], x=metrics['time'], rho=0.05)
if name not in reliability:
reliability[name] = {}
reliability[name] = rvar(reliability[name], x=1.0, rho=0.05)
except Exception as e:
metrics = {'name':f.__name__,'passing':0,'traceback':traceback.format_exc(),'ll':-100000000}
if f.__name__ not in reliability:
reliability[f.__name__] = {}
reliability[f.__name__] = rvar(reliability[f.__name__], x=0.0, rho=0.05)
failures[f.__name__] = traceback.format_exc()
ll = worst_ll_seen
stuff.append( (ll,metrics))
valid = [ s for s in stuff if s[1]['passing']>0.5 ]
if len(valid)<=2:
print('urhg')
for i, mi in enumerate(valid):
for j, mj in enumerate(valid):
if j != i:
if mi[0] > mj[0]+atol:
i_name = mi[1]['name']
j_name = mj[1]['name']
cmp_name = i_name+'>'+j_name
battles.update({cmp_name:1.0})
reliabilties = dict([(nm, reliab['mean']) for nm,reliab in reliability.items() ] )
cpu_times = dict([(nm, tm['mean']) for nm, tm in timing.items()])
if np.random.rand()<0.01:
with open(queue,'wt') as fh:
json.dump(battles,fh)
print('---')
pprint(reliabilties)
print('---')
pprint(cpu_times)
print('---')
pprint(battles)
print(' ')
pprint(failures)
|
[
"traceback.format_exc",
"numpy.random.rand",
"pathlib.Path",
"json.dump",
"os.path.join",
"time.sleep",
"uuid.uuid4",
"collections.Counter",
"numpy.array",
"precise.skaters.covarianceutil.likelihood.cov_skater_loglikelihood",
"precise.skatertools.data.equity.random_m6_returns",
"pprint.pprint",
"momentum.functions.rvar",
"numpy.random.shuffle"
] |
[((1910, 1924), 'pprint.pprint', 'pprint', (['params'], {}), '(params)\n', (1916, 1924), False, 'from pprint import pprint\n'), ((1929, 1942), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1939, 1942), False, 'import time\n'), ((1992, 2014), 'pprint.pprint', 'pprint', (['ALL_D0_SKATERS'], {}), '(ALL_D0_SKATERS)\n', (1998, 2014), False, 'from pprint import pprint\n'), ((2062, 2101), 'os.path.join', 'os.path.join', (['SKATER_WIN_DATA', 'category'], {}), '(SKATER_WIN_DATA, category)\n', (2074, 2101), False, 'import os\n'), ((2114, 2141), 'os.path.join', 'os.path.join', (['queue_dir', 'qn'], {}), '(queue_dir, qn)\n', (2126, 2141), False, 'import os\n'), ((2236, 2245), 'collections.Counter', 'Counter', ([], {}), '()\n', (2243, 2245), False, 'from collections import Counter\n'), ((1290, 1341), 'precise.skatertools.data.equity.random_m6_returns', 'random_m6_returns', ([], {'verbose': '(False)'}), '(verbose=False, **combined_params)\n', (1307, 1341), False, 'from precise.skatertools.data.equity import random_m6_returns\n'), ((2550, 2562), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (2558, 2562), True, 'import numpy as np\n'), ((2571, 2604), 'numpy.random.shuffle', 'np.random.shuffle', (['ALL_D0_SKATERS'], {}), '(ALL_D0_SKATERS)\n', (2588, 2604), True, 'import numpy as np\n'), ((1713, 1727), 'pprint.pprint', 'pprint', (['params'], {}), '(params)\n', (1719, 1727), False, 'from pprint import pprint\n'), ((2029, 2036), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2034, 2036), False, 'from uuid import uuid4\n'), ((2145, 2168), 'pathlib.Path', 'pathlib.Path', (['queue_dir'], {}), '(queue_dir)\n', (2157, 2168), False, 'import pathlib\n'), ((4598, 4614), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4612, 4614), True, 'import numpy as np\n'), ((2740, 2839), 'precise.skaters.covarianceutil.likelihood.cov_skater_loglikelihood', 'cov_skater_loglikelihood', ([], {'f': 'f', 'xs': 'xs', 'n_burn': "params['n_burn']", 'with_metrics': '(True)', 'lb': 'lb', 'ub': 'ub'}), "(f=f, xs=xs, n_burn=params['n_burn'], with_metrics=\n True, lb=lb, ub=ub)\n", (2764, 2839), False, 'from precise.skaters.covarianceutil.likelihood import cov_skater_loglikelihood\n'), ((3271, 3318), 'momentum.functions.rvar', 'rvar', (['timing[name]'], {'x': "metrics['time']", 'rho': '(0.05)'}), "(timing[name], x=metrics['time'], rho=0.05)\n", (3275, 3318), False, 'from momentum.functions import rvar\n'), ((3442, 3482), 'momentum.functions.rvar', 'rvar', (['reliability[name]'], {'x': '(1.0)', 'rho': '(0.05)'}), '(reliability[name], x=1.0, rho=0.05)\n', (3446, 3482), False, 'from momentum.functions import rvar\n'), ((4678, 4700), 'json.dump', 'json.dump', (['battles', 'fh'], {}), '(battles, fh)\n', (4687, 4700), False, 'import json\n'), ((4745, 4765), 'pprint.pprint', 'pprint', (['reliabilties'], {}), '(reliabilties)\n', (4751, 4765), False, 'from pprint import pprint\n'), ((4811, 4828), 'pprint.pprint', 'pprint', (['cpu_times'], {}), '(cpu_times)\n', (4817, 4828), False, 'from pprint import pprint\n'), ((4874, 4889), 'pprint.pprint', 'pprint', (['battles'], {}), '(battles)\n', (4880, 4889), False, 'from pprint import pprint\n'), ((4933, 4949), 'pprint.pprint', 'pprint', (['failures'], {}), '(failures)\n', (4939, 4949), False, 'from pprint import pprint\n'), ((3768, 3814), 'momentum.functions.rvar', 'rvar', (['reliability[f.__name__]'], {'x': '(0.0)', 'rho': '(0.05)'}), '(reliability[f.__name__], x=0.0, rho=0.05)\n', (3772, 3814), False, 'from momentum.functions import rvar\n'), ((3854, 3876), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3874, 3876), False, 'import traceback\n'), ((3587, 3609), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3607, 3609), False, 'import traceback\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import dask
from dask_kubernetes import KubeCluster
import numpy as np
# In[ ]:
#tag::remote_lb_deploy[]
# In[2]:
# Specify a remote deployment using a load blanacer, necessary for communication with notebook from cluster
dask.config.set({"kubernetes.scheduler-service-type": "LoadBalancer"})
# In[4]:
cluster = KubeCluster.from_yaml('worker-spec.yaml', namespace='dask', deploy_mode='remote')
# In[ ]:
#end::remote_lb_deploy[]
# In[5]:
cluster.adapt(minimum=1, maximum=100)
# In[6]:
# Example usage
from dask.distributed import Client
import dask.array as da
# Connect Dask to the cluster
client = Client(cluster)
# In[7]:
client.scheduler_comm.comm.handshake_info()
# In[8]:
# Create a large array and calculate the mean
array = da.ones((1000, 1000, 1000))
print(array.mean().compute()) # Should print 1.0|
# In[9]:
print(array.mean().compute())
# In[10]:
print(array.sum().compute())
# In[13]:
dir(array)
# In[18]:
np.take(array, indices=[0, 10]).sum().compute()
# In[15]:
# In[ ]:
|
[
"dask.config.set",
"dask_kubernetes.KubeCluster.from_yaml",
"dask.distributed.Client",
"numpy.take",
"dask.array.ones"
] |
[((280, 350), 'dask.config.set', 'dask.config.set', (["{'kubernetes.scheduler-service-type': 'LoadBalancer'}"], {}), "({'kubernetes.scheduler-service-type': 'LoadBalancer'})\n", (295, 350), False, 'import dask\n'), ((374, 460), 'dask_kubernetes.KubeCluster.from_yaml', 'KubeCluster.from_yaml', (['"""worker-spec.yaml"""'], {'namespace': '"""dask"""', 'deploy_mode': '"""remote"""'}), "('worker-spec.yaml', namespace='dask', deploy_mode=\n 'remote')\n", (395, 460), False, 'from dask_kubernetes import KubeCluster\n'), ((674, 689), 'dask.distributed.Client', 'Client', (['cluster'], {}), '(cluster)\n', (680, 689), False, 'from dask.distributed import Client\n'), ((814, 841), 'dask.array.ones', 'da.ones', (['(1000, 1000, 1000)'], {}), '((1000, 1000, 1000))\n', (821, 841), True, 'import dask.array as da\n'), ((1018, 1049), 'numpy.take', 'np.take', (['array'], {'indices': '[0, 10]'}), '(array, indices=[0, 10])\n', (1025, 1049), True, 'import numpy as np\n')]
|
import collections
import csv
import os
import sys
from enum import Enum
from pathlib import Path
# adapt paths for jupyter
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import face_alignment
from yawn_train.src.blazeface_detector import BlazeFaceDetector
import cv2
import dlib
import numpy as np
from imutils import face_utils
from yawn_train.src.ssd_face_detector import SSDFaceDetector
# define one constants, for mouth aspect ratio to indicate open mouth
from yawn_train.src import download_utils, detect_utils, inference_utils
from yawn_train.src.model_config import MOUTH_AR_THRESH, MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT
class ImageResult:
def __init__(self, is_processed, is_opened_image):
self.is_processed = is_processed
self.is_opened_image = is_opened_image
@staticmethod
def not_processed():
return ImageResult(False, False)
class VideoResult:
def __init__(self, total_frames, dlib_counter, caffe_counter, blazeface_counter, opened_counter, closed_counter):
self.total_frames = total_frames
self.dlib_counter = dlib_counter
self.caffe_counter = caffe_counter
self.blazeface_counter = blazeface_counter
self.opened_counter = opened_counter
self.closed_counter = closed_counter
@staticmethod
def empty():
return VideoResult(0, 0, 0, 0, 0, 0)
class FACE_TYPE(Enum):
BLAZEFACE = 0
DLIB = 1
CAFFE = 2
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
def get_next(self):
val = self.value
if self.has_value(val + 1):
return FACE_TYPE(val + 1)
return FACE_TYPE(0)
class LNDMR_TYPE(Enum):
DLIB = 0
FACEALIGN = 1
COLOR_IMG = False
MOUTH_FOLDER = "./mouth_state_new10" + ("_color" if COLOR_IMG else "")
MOUTH_OPENED_FOLDER = os.path.join(MOUTH_FOLDER, 'opened')
MOUTH_CLOSED_FOLDER = os.path.join(MOUTH_FOLDER, 'closed')
TEMP_FOLDER = "./temp"
# https://ieee-dataport.org/open-access/yawdd-yawning-detection-dataset#files
YAWDD_DATASET_FOLDER = "./YawDD dataset"
CSV_STATS = 'video_stat.csv'
read_mouth_open_counter = 0
read_mouth_close_counter = 0
saved_mouth_open_counter = 0
saved_mouth_close_counter = 0
SAMPLE_STEP_IMG_OPENED = 1
SAMPLE_STEP_IMG_CLOSED = 4
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
Path(MOUTH_FOLDER).mkdir(parents=True, exist_ok=True)
Path(MOUTH_OPENED_FOLDER).mkdir(parents=True, exist_ok=True)
Path(MOUTH_CLOSED_FOLDER).mkdir(parents=True, exist_ok=True)
dlib_landmarks_file = download_utils.download_and_unpack_dlib_68_landmarks(TEMP_FOLDER)
# dlib predictor for 68pts, mouth
predictor = dlib.shape_predictor(dlib_landmarks_file)
# initialize dlib's face detector (HOG-based)
detector = dlib.get_frontal_face_detector()
caffe_weights, caffe_config = download_utils.download_caffe(TEMP_FOLDER)
# Reads the network model stored in Caffe framework's format.
face_model = cv2.dnn.readNetFromCaffe(caffe_config, caffe_weights)
ssd_face_detector = SSDFaceDetector(face_model)
import tensorflow as tf
bf_model = download_utils.download_blazeface(TEMP_FOLDER)
blazeface_tf = tf.keras.models.load_model(bf_model, compile=False)
blazefaceDetector = BlazeFaceDetector(blazeface_tf)
# img = cv2.imread(
# '/Users/igla/Desktop/Screenshot 2021-01-14 at 12.29.25.png', cv2.IMREAD_GRAYSCALE)
# ultrafacedetector = UltraFaceDetector("/Users/igla/Downloads/version-RFB-320_simplified.onnx")
"""
Take mouth ratio only from dlib rect. Use dnn frame for output
"""
def should_process_video(video_name: str) -> bool:
is_video_sunglasses = video_name.rfind('SunGlasses') != -1
if is_video_sunglasses:
# inaccurate landmarks in sunglasses
print('Video contains sunglasses. Skip', video_name)
return False
return video_name.endswith('-Normal.avi') or \
video_name.endswith('-Talking.avi') or \
video_name.endswith('-Yawning.avi')
pred_type = collections.namedtuple('prediction_type', ['slice', 'color'])
pred_types = {'face': pred_type(slice(0, 17), (0.682, 0.780, 0.909, 0.5)),
'eyebrow1': pred_type(slice(17, 22), (1.0, 0.498, 0.055, 0.4)),
'eyebrow2': pred_type(slice(22, 27), (1.0, 0.498, 0.055, 0.4)),
'nose': pred_type(slice(27, 31), (0.345, 0.239, 0.443, 0.4)),
'nostril': pred_type(slice(31, 36), (0.345, 0.239, 0.443, 0.4)),
'eye1': pred_type(slice(36, 42), (0.596, 0.875, 0.541, 0.3)),
'eye2': pred_type(slice(42, 48), (0.596, 0.875, 0.541, 0.3)),
'lips': pred_type(slice(48, 60), (0.596, 0.875, 0.541, 0.3)),
'teeth': pred_type(slice(60, 68), (0.596, 0.875, 0.541, 0.4))
}
face_detector = 'sfd'
face_detector_kwargs = {
"filter_threshold": 0.8
}
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=True, device='cpu',
face_detector=face_detector)
def get_mouth_opened(frame, start_x, start_y, end_x, end_y) -> tuple:
mouth_shape = predictor(frame, dlib.rectangle(start_x, start_y, end_x, end_y))
mouth_shape = face_utils.shape_to_np(mouth_shape)
mouth_arr = mouth_shape[mStart:mEnd]
mouth_mar_dlib = detect_utils.mouth_aspect_ratio(mouth_arr)
mouth_mar_dlib = round(mouth_mar_dlib, 2)
# print(mouth_mar_dlib)
face_roi_dlib = frame[start_y:end_y, start_x:end_x]
height_frame, width_frame = face_roi_dlib.shape[:2]
# swapping the read and green channels
# https://stackoverflow.com/a/56933474/1461625
detected_faces = []
detected_faces.append([0, 0, width_frame, height_frame])
preds = fa.get_landmarks_from_image(face_roi_dlib, detected_faces)[-1]
pred_type = pred_types['lips']
X = preds[pred_type.slice, 0]
Y = preds[pred_type.slice, 1]
mouth_shape_3ddfa = []
for x, y in zip(X, Y):
mouth_shape_3ddfa.append((x, y))
# shape = []
# for idx, pred_type in enumerate(pred_types.values()):
# X = preds[pred_type.slice, 0]
# Y = preds[pred_type.slice, 1]
# for x, y in zip(X, Y):
# shape.append((x, y))
mouth_mar_3ddfa = detect_utils.mouth_aspect_ratio(mouth_shape_3ddfa)
mouth_mar_3ddfa = round(mouth_mar_3ddfa, 2)
# print(mouth_mar_3ddfa)
is_opened_mouth_3ddfa = mouth_mar_3ddfa >= 0.75
is_opened_mouth_dlib = mouth_mar_dlib >= MOUTH_AR_THRESH
if is_opened_mouth_3ddfa == is_opened_mouth_dlib:
return is_opened_mouth_3ddfa, mouth_mar_dlib, LNDMR_TYPE.DLIB # correct, same as dlib, return dlib ratio
else:
return is_opened_mouth_3ddfa, mouth_mar_3ddfa, LNDMR_TYPE.FACEALIGN # return 3ddfa, as it's more accurate
def recognize_image(video_id: int, video_path: str, frame, frame_id: int, face_type: FACE_TYPE, face_rect_dlib,
face_rect_dnn=None) -> ImageResult:
(start_x, start_y, end_x, end_y) = face_rect_dlib
start_x = max(start_x, 0)
start_y = max(start_y, 0)
if start_x >= end_x or start_y >= end_y:
print('Invalid detection. Skip', face_rect_dlib)
return ImageResult.not_processed()
face_roi_dlib = frame[start_y:end_y, start_x:end_x]
if face_roi_dlib is None:
print('Cropped face is None. Skip')
return ImageResult.not_processed()
height_frame, width_frame = face_roi_dlib.shape[:2]
if height_frame < 50 or width_frame < 50: # some images have invalid dlib face rect
print('Too small face. Skip')
return ImageResult.not_processed()
# https://pyimagesearch.com/wp-content/uploads/2017/04/facial_landmarks_68markup.jpg
is_mouth_opened, open_mouth_ratio, lndmk_type = get_mouth_opened(frame, start_x, start_y, end_x, end_y)
# skip frames in normal and talking, containing opened mouth (we detect only yawn)
video_name = os.path.basename(video_path)
is_video_no_yawn = video_name.endswith('-Normal.avi') or \
video_name.endswith('-Talking.avi')
if is_mouth_opened and is_video_no_yawn:
# some videos may contain opened mouth, skip these situations
return ImageResult.not_processed()
prefix = 'dlib'
target_face_roi = None
if face_rect_dnn is not None:
(start_x, start_y, end_x, end_y) = face_rect_dnn
start_x = max(start_x, 0)
start_y = max(start_y, 0)
if start_x < end_x and start_y < end_y:
face_roi_dnn = frame[start_y:end_y, start_x:end_x]
target_face_roi = face_roi_dnn
prefix = face_type.name.lower()
if target_face_roi is None:
target_face_roi = face_roi_dlib
if len(frame.shape) == 2 or COLOR_IMG: # single channel
gray_img = target_face_roi
else:
gray_img = cv2.cvtColor(target_face_roi, cv2.COLOR_BGR2GRAY)
gray_img = detect_utils.resize_img(gray_img, MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT)
lndmk_type_name = lndmk_type.name.lower()
if is_mouth_opened:
global read_mouth_open_counter
read_mouth_open_counter = read_mouth_open_counter + 1
# reduce img count
if read_mouth_open_counter % SAMPLE_STEP_IMG_OPENED != 0:
return ImageResult.not_processed()
global saved_mouth_open_counter
saved_mouth_open_counter = saved_mouth_open_counter + 1
file_name = os.path.join(MOUTH_OPENED_FOLDER,
f'{read_mouth_open_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg')
cv2.imwrite(file_name, gray_img)
return ImageResult(is_processed=True, is_opened_image=True)
else:
global read_mouth_close_counter
read_mouth_close_counter = read_mouth_close_counter + 1
# reduce img count
if read_mouth_close_counter % SAMPLE_STEP_IMG_CLOSED != 0:
return ImageResult.not_processed()
global saved_mouth_close_counter
saved_mouth_close_counter = saved_mouth_close_counter + 1
file_name = os.path.join(MOUTH_CLOSED_FOLDER,
f'{read_mouth_close_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg')
cv2.imwrite(file_name, gray_img)
return ImageResult(is_processed=True, is_opened_image=False)
def detect_faces_complex(frame):
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_list_dlib = inference_utils.detect_face_dlib(detector, gray_frame)
if len(face_list_dlib) > 0:
return face_list_dlib, FACE_TYPE.DLIB
face_list_dnn_cafe = ssd_face_detector.detect_face(frame)
if len(face_list_dnn_cafe) > 0:
return face_list_dnn_cafe, FACE_TYPE.CAFFE
face_list_dnn_blaze = blazefaceDetector.detect_face(frame)
if len(face_list_dnn_blaze) > 0:
return face_list_dnn_blaze, FACE_TYPE.BLAZEFACE
return [], None
def process_video(video_id, video_path) -> VideoResult:
video_name = os.path.basename(video_path)
if should_process_video(video_name) is False:
print('Video should not be processed', video_path)
return VideoResult.empty()
cap = cv2.VideoCapture(video_path)
if cap.isOpened() is False:
print('Video is not opened', video_path)
return VideoResult.empty()
face_dlib_counter = 0
face_caffe_counter = 0
face_blazeface_counter = 0
opened_img_counter = 0
closed_img_counter = 0
frame_id = 0
face_type = FACE_TYPE.DLIB
while True:
ret, frame = cap.read()
if ret is False:
break
if frame is None:
print('No images left in', video_path)
break
if np.shape(frame) == ():
print('Empty image. Skip')
continue
frame_id = frame_id + 1
face_list, f_type = detect_faces_complex(frame)
if len(face_list) == 0:
# skip images not recognized by dlib or other detectors
continue
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
recognize_frame = frame if COLOR_IMG else gray_frame
if face_type == FACE_TYPE.DLIB:
image_result = recognize_image(video_id, video_path, recognize_frame, frame_id, face_type,
face_list[0])
is_processed = image_result.is_processed
if is_processed:
face_type = face_type.get_next()
face_dlib_counter = face_dlib_counter + 1
if image_result.is_opened_image:
opened_img_counter = opened_img_counter + 1
else:
closed_img_counter = closed_img_counter + 1
continue
if face_type == FACE_TYPE.CAFFE:
face_list_dnn = ssd_face_detector.detect_face(frame)
if len(face_list_dnn) == 0:
face_type = face_type.get_next()
print('Face not found with Caffe DNN')
continue
image_result = recognize_image(video_id, video_path, recognize_frame, frame_id, face_type,
face_list[0],
face_list_dnn[0])
is_processed = image_result.is_processed
if is_processed:
face_type = face_type.get_next()
face_caffe_counter = face_caffe_counter + 1
if image_result.is_opened_image:
opened_img_counter = opened_img_counter + 1
else:
closed_img_counter = closed_img_counter + 1
if face_type == FACE_TYPE.BLAZEFACE:
face_list_dnn = blazefaceDetector.detect_face(frame)
if len(face_list_dnn) == 0:
face_type = face_type.get_next()
print('Face not found with Blazeface')
continue
image_result = recognize_image(video_id, video_path, recognize_frame, frame_id, face_type,
face_list[0],
face_list_dnn[0])
is_processed = image_result.is_processed
if is_processed:
face_type = face_type.get_next()
face_blazeface_counter = face_blazeface_counter + 1
if image_result.is_opened_image:
opened_img_counter = opened_img_counter + 1
else:
closed_img_counter = closed_img_counter + 1
print(
f"Total images: {face_dlib_counter + face_caffe_counter + face_blazeface_counter}"
f', dlib: {face_dlib_counter} images'
f', blazeface: {face_blazeface_counter} images'
f', caffe: {face_caffe_counter} images in video {video_name}'
)
cap.release()
# The function is not implemented. Rebuild the library with Windows, GTK+ 2.x or Cocoa support. If you are on
# Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script in function
# 'cvDestroyAllWindows'
try:
cv2.destroyAllWindows()
except:
print('No destroy windows')
return VideoResult(
frame_id,
face_dlib_counter,
face_blazeface_counter,
face_caffe_counter,
opened_img_counter,
closed_img_counter
)
def write_csv_stat(filename, video_count, video_result: VideoResult):
video_stat_dict_path = os.path.join(MOUTH_FOLDER, CSV_STATS)
if os.path.isfile(video_stat_dict_path) is False:
with open(video_stat_dict_path, 'w') as f:
w = csv.writer(f)
w.writerow(['Video id', 'File name', 'Total frames', 'Image saved', 'Opened img', 'Closed img'])
# mode 'a' append
with open(video_stat_dict_path, 'a') as f:
w = csv.writer(f)
img_counter = video_result.caffe_counter + video_result.dlib_counter + video_result.blazeface_counter
w.writerow((
video_count,
filename,
video_result.total_frames,
img_counter,
video_result.opened_counter,
video_result.closed_counter
))
def process_videos():
video_count = 0
total_frames = 0
for root, dirs, files in os.walk(YAWDD_DATASET_FOLDER):
for file in files:
if file.endswith(".avi"):
video_count = video_count + 1
file_name = os.path.join(root, file)
print('Current video', file_name)
video_result = process_video(video_count, file_name)
total_frames = total_frames + video_result.total_frames
write_csv_stat(file_name, video_count, video_result)
print(f'Videos processed: {video_count}')
print(f'Total read images: {total_frames}')
print(f'Total saved images: {saved_mouth_open_counter + saved_mouth_close_counter}')
print(f'Saved opened mouth images: {saved_mouth_open_counter}')
print(f'Saved closed mouth images: {saved_mouth_close_counter}')
if __name__ == '__main__':
process_videos()
|
[
"yawn_train.src.download_utils.download_blazeface",
"tensorflow.keras.models.load_model",
"cv2.destroyAllWindows",
"yawn_train.src.blazeface_detector.BlazeFaceDetector",
"yawn_train.src.download_utils.download_and_unpack_dlib_68_landmarks",
"sys.path.append",
"os.walk",
"yawn_train.src.ssd_face_detector.SSDFaceDetector",
"pathlib.Path",
"dlib.rectangle",
"dlib.shape_predictor",
"cv2.dnn.readNetFromCaffe",
"yawn_train.src.download_utils.download_caffe",
"dlib.get_frontal_face_detector",
"yawn_train.src.detect_utils.mouth_aspect_ratio",
"collections.namedtuple",
"csv.writer",
"os.path.isfile",
"imutils.face_utils.shape_to_np",
"cv2.cvtColor",
"numpy.shape",
"cv2.imwrite",
"os.path.join",
"face_alignment.FaceAlignment",
"yawn_train.src.detect_utils.resize_img",
"yawn_train.src.inference_utils.detect_face_dlib",
"os.path.basename",
"cv2.VideoCapture"
] |
[((1922, 1958), 'os.path.join', 'os.path.join', (['MOUTH_FOLDER', '"""opened"""'], {}), "(MOUTH_FOLDER, 'opened')\n", (1934, 1958), False, 'import os\n'), ((1981, 2017), 'os.path.join', 'os.path.join', (['MOUTH_FOLDER', '"""closed"""'], {}), "(MOUTH_FOLDER, 'closed')\n", (1993, 2017), False, 'import os\n'), ((2624, 2689), 'yawn_train.src.download_utils.download_and_unpack_dlib_68_landmarks', 'download_utils.download_and_unpack_dlib_68_landmarks', (['TEMP_FOLDER'], {}), '(TEMP_FOLDER)\n', (2676, 2689), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((2736, 2777), 'dlib.shape_predictor', 'dlib.shape_predictor', (['dlib_landmarks_file'], {}), '(dlib_landmarks_file)\n', (2756, 2777), False, 'import dlib\n'), ((2835, 2867), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (2865, 2867), False, 'import dlib\n'), ((2899, 2941), 'yawn_train.src.download_utils.download_caffe', 'download_utils.download_caffe', (['TEMP_FOLDER'], {}), '(TEMP_FOLDER)\n', (2928, 2941), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((3017, 3070), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['caffe_config', 'caffe_weights'], {}), '(caffe_config, caffe_weights)\n', (3041, 3070), False, 'import cv2\n'), ((3091, 3118), 'yawn_train.src.ssd_face_detector.SSDFaceDetector', 'SSDFaceDetector', (['face_model'], {}), '(face_model)\n', (3106, 3118), False, 'from yawn_train.src.ssd_face_detector import SSDFaceDetector\n'), ((3156, 3202), 'yawn_train.src.download_utils.download_blazeface', 'download_utils.download_blazeface', (['TEMP_FOLDER'], {}), '(TEMP_FOLDER)\n', (3189, 3202), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((3218, 3269), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['bf_model'], {'compile': '(False)'}), '(bf_model, compile=False)\n', (3244, 3269), True, 'import tensorflow as tf\n'), ((3290, 3321), 'yawn_train.src.blazeface_detector.BlazeFaceDetector', 'BlazeFaceDetector', (['blazeface_tf'], {}), '(blazeface_tf)\n', (3307, 3321), False, 'from yawn_train.src.blazeface_detector import BlazeFaceDetector\n'), ((4037, 4098), 'collections.namedtuple', 'collections.namedtuple', (['"""prediction_type"""', "['slice', 'color']"], {}), "('prediction_type', ['slice', 'color'])\n", (4059, 4098), False, 'import collections\n'), ((4887, 5014), 'face_alignment.FaceAlignment', 'face_alignment.FaceAlignment', (['face_alignment.LandmarksType._3D'], {'flip_input': '(True)', 'device': '"""cpu"""', 'face_detector': 'face_detector'}), "(face_alignment.LandmarksType._3D, flip_input=\n True, device='cpu', face_detector=face_detector)\n", (4915, 5014), False, 'import face_alignment\n'), ((156, 174), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (168, 174), False, 'import os\n'), ((212, 240), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (227, 240), False, 'import sys\n'), ((5217, 5252), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['mouth_shape'], {}), '(mouth_shape)\n', (5239, 5252), False, 'from imutils import face_utils\n'), ((5315, 5357), 'yawn_train.src.detect_utils.mouth_aspect_ratio', 'detect_utils.mouth_aspect_ratio', (['mouth_arr'], {}), '(mouth_arr)\n', (5346, 5357), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((6247, 6297), 'yawn_train.src.detect_utils.mouth_aspect_ratio', 'detect_utils.mouth_aspect_ratio', (['mouth_shape_3ddfa'], {}), '(mouth_shape_3ddfa)\n', (6278, 6297), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((7916, 7944), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (7932, 7944), False, 'import os\n'), ((8894, 8962), 'yawn_train.src.detect_utils.resize_img', 'detect_utils.resize_img', (['gray_img', 'MAX_IMAGE_WIDTH', 'MAX_IMAGE_HEIGHT'], {}), '(gray_img, MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT)\n', (8917, 8962), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((10393, 10432), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (10405, 10432), False, 'import cv2\n'), ((10454, 10508), 'yawn_train.src.inference_utils.detect_face_dlib', 'inference_utils.detect_face_dlib', (['detector', 'gray_frame'], {}), '(detector, gray_frame)\n', (10486, 10508), False, 'from yawn_train.src import download_utils, detect_utils, inference_utils\n'), ((10989, 11017), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (11005, 11017), False, 'import os\n'), ((11173, 11201), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (11189, 11201), False, 'import cv2\n'), ((15449, 15486), 'os.path.join', 'os.path.join', (['MOUTH_FOLDER', 'CSV_STATS'], {}), '(MOUTH_FOLDER, CSV_STATS)\n', (15461, 15486), False, 'import os\n'), ((16255, 16284), 'os.walk', 'os.walk', (['YAWDD_DATASET_FOLDER'], {}), '(YAWDD_DATASET_FOLDER)\n', (16262, 16284), False, 'import os\n'), ((2425, 2443), 'pathlib.Path', 'Path', (['MOUTH_FOLDER'], {}), '(MOUTH_FOLDER)\n', (2429, 2443), False, 'from pathlib import Path\n'), ((2479, 2504), 'pathlib.Path', 'Path', (['MOUTH_OPENED_FOLDER'], {}), '(MOUTH_OPENED_FOLDER)\n', (2483, 2504), False, 'from pathlib import Path\n'), ((2540, 2565), 'pathlib.Path', 'Path', (['MOUTH_CLOSED_FOLDER'], {}), '(MOUTH_CLOSED_FOLDER)\n', (2544, 2565), False, 'from pathlib import Path\n'), ((5151, 5197), 'dlib.rectangle', 'dlib.rectangle', (['start_x', 'start_y', 'end_x', 'end_y'], {}), '(start_x, start_y, end_x, end_y)\n', (5165, 5197), False, 'import dlib\n'), ((8829, 8878), 'cv2.cvtColor', 'cv2.cvtColor', (['target_face_roi', 'cv2.COLOR_BGR2GRAY'], {}), '(target_face_roi, cv2.COLOR_BGR2GRAY)\n', (8841, 8878), False, 'import cv2\n'), ((9400, 9544), 'os.path.join', 'os.path.join', (['MOUTH_OPENED_FOLDER', 'f"""{read_mouth_open_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg"""'], {}), "(MOUTH_OPENED_FOLDER,\n f'{read_mouth_open_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg'\n )\n", (9412, 9544), False, 'import os\n'), ((9577, 9609), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'gray_img'], {}), '(file_name, gray_img)\n', (9588, 9609), False, 'import cv2\n'), ((10061, 10206), 'os.path.join', 'os.path.join', (['MOUTH_CLOSED_FOLDER', 'f"""{read_mouth_close_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg"""'], {}), "(MOUTH_CLOSED_FOLDER,\n f'{read_mouth_close_counter}_{open_mouth_ratio}_{video_id}_{frame_id}_{prefix}_{lndmk_type_name}.jpg'\n )\n", (10073, 10206), False, 'import os\n'), ((10239, 10271), 'cv2.imwrite', 'cv2.imwrite', (['file_name', 'gray_img'], {}), '(file_name, gray_img)\n', (10250, 10271), False, 'import cv2\n'), ((12019, 12058), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (12031, 12058), False, 'import cv2\n'), ((15087, 15110), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15108, 15110), False, 'import cv2\n'), ((15494, 15530), 'os.path.isfile', 'os.path.isfile', (['video_stat_dict_path'], {}), '(video_stat_dict_path)\n', (15508, 15530), False, 'import os\n'), ((15813, 15826), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (15823, 15826), False, 'import csv\n'), ((11703, 11718), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (11711, 11718), True, 'import numpy as np\n'), ((15608, 15621), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (15618, 15621), False, 'import csv\n'), ((16425, 16449), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (16437, 16449), False, 'import os\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.