code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
"receive a message"
if len(args) == 1:
flags, = args
pointer = ctypes.c_void_p()
rtn = _nn_recv(socket, ctypes.byref(pointer), ctypes.c_size_t(-1),
flags)
if rtn < 0:
return rtn, None
else:
return rtn, _create_message(pointer.value, rtn)
elif len(args) == 2:
msg_buf, flags = args
mv_buf = memoryview(msg_buf)
if mv_buf.readonly:
raise TypeError('Writable buffer is required')
rtn = _nn_recv(socket, ctypes.addressof(msg_buf), len(mv_buf), flags)
return rtn, msg_buf | def nn_recv(socket, *args) | receive a message | 3.170739 | 3.163214 | 1.002379 |
rtn = wrapper.nn_allocmsg(size, type)
if rtn is None:
raise NanoMsgAPIError()
return rtn | def create_message_buffer(size, type) | Create a message buffer | 12.848817 | 14.428682 | 0.890505 |
sockets = {}
# reverse map fd => socket
fd_sockets = {}
for s in in_sockets:
sockets[s.fd] = POLLIN
fd_sockets[s.fd] = s
for s in out_sockets:
modes = sockets.get(s.fd, 0)
sockets[s.fd] = modes | POLLOUT
fd_sockets[s.fd] = s
# convert to milliseconds or -1
if timeout >= 0:
timeout_ms = int(timeout*1000)
else:
timeout_ms = -1
res, sockets = wrapper.nn_poll(sockets, timeout_ms)
_nn_check_positive_rtn(res)
read_list, write_list = [], []
for fd, result in sockets.items():
if (result & POLLIN) != 0:
read_list.append(fd_sockets[fd])
if (result & POLLOUT) != 0:
write_list.append(fd_sockets[fd])
return read_list, write_list | def poll(in_sockets, out_sockets, timeout=-1) | Poll a list of sockets
:param in_sockets: sockets for reading
:param out_sockets: sockets for writing
:param timeout: poll timeout in seconds, -1 is infinite wait
:return: tuple (read socket list, write socket list) | 3.00699 | 3.008682 | 0.999438 |
if self.uses_nanoconfig:
raise ValueError("Nanoconfig address must be sole endpoint")
endpoint_id = _nn_check_positive_rtn(
wrapper.nn_bind(self._fd, address)
)
ep = Socket.BindEndpoint(self, endpoint_id, address)
self._endpoints.append(ep)
return ep | def bind(self, address) | Add a local endpoint to the socket | 11.419463 | 10.698761 | 1.067363 |
if self.uses_nanoconfig:
raise ValueError("Nanoconfig address must be sole endpoint")
endpoint_id = _nn_check_positive_rtn(
wrapper.nn_connect(self.fd, address)
)
ep = Socket.ConnectEndpoint(self, endpoint_id, address)
self._endpoints.append(ep)
return ep | def connect(self, address) | Add a remote endpoint to the socket | 11.842909 | 10.981286 | 1.078463 |
global nanoconfig_started
if len(self._endpoints):
raise ValueError("Nanoconfig address must be sole endpoint")
endpoint_id = _nn_check_positive_rtn(
wrapper.nc_configure(self.fd, address)
)
if not nanoconfig_started:
nanoconfig_started = True
ep = Socket.NanoconfigEndpoint(self, endpoint_id, address)
self._endpoints.append(ep)
return ep | def configure(self, address) | Configure socket's addresses with nanoconfig | 9.215701 | 8.227778 | 1.120072 |
if self.is_open():
fd = self._fd
self._fd = -1
if self.uses_nanoconfig:
wrapper.nc_close(fd)
else:
_nn_check_positive_rtn(wrapper.nn_close(fd)) | def close(self) | Close the socket | 9.031254 | 9.139271 | 0.988181 |
if buf is None:
rtn, out_buf = wrapper.nn_recv(self.fd, flags)
else:
rtn, out_buf = wrapper.nn_recv(self.fd, buf, flags)
_nn_check_positive_rtn(rtn)
return bytes(buffer(out_buf))[:rtn] | def recv(self, buf=None, flags=0) | Recieve a message. | 5.278502 | 5.247223 | 1.005961 |
_nn_check_positive_rtn(wrapper.nn_send(self.fd, msg, flags)) | def send(self, msg, flags=0) | Send a message | 26.676855 | 29.235176 | 0.912492 |
# ax.figure.canvas.draw() # need to draw before the transforms are set.
transform = sc.get_transform()
transOffset = sc.get_offset_transform()
offsets = sc._offsets
paths = sc.get_paths()
transforms = sc.get_transforms()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
bboxes = []
if len(paths) and len(offsets):
if len(paths) < len(offsets):
# for usual scatters you have one path, but several offsets
paths = [paths[0]]*len(offsets)
if len(transforms) < len(offsets):
# often you may have a single scatter size, but several offsets
transforms = [transforms[0]]*len(offsets)
for p, o, t in zip(paths, offsets, transforms):
result = get_path_collection_extents(
transform.frozen(), [p], [t],
[o], transOffset.frozen())
bboxes.append(result.inverse_transformed(ax.transData))
return bboxes | def get_bboxes_pathcollection(sc, ax) | Function to return a list of bounding boxes in data coordinates
for a scatter plot
Thank you to ImportanceOfBeingErnest
https://stackoverflow.com/a/55007838/1304161 | 3.388869 | 3.38445 | 1.001305 |
cx, cy = get_midpoint(bbox)
dir_x = np.sign(cx-xp)
dir_y = np.sign(cy-yp)
if dir_x == -1:
dx = xp - bbox.xmax
elif dir_x == 1:
dx = xp - bbox.xmin
else:
dx = 0
if dir_y == -1:
dy = yp - bbox.ymax
elif dir_y == 1:
dy = yp - bbox.ymin
else:
dy = 0
return dx, dy | def overlap_bbox_and_point(bbox, xp, yp) | Given a bbox that contains a given point, return the (x, y) displacement
necessary to make the bbox not overlap the point. | 2.001333 | 1.925286 | 1.039499 |
if ax is None:
ax = plt.gca()
if renderer is None:
r = get_renderer(ax.get_figure())
else:
r = renderer
xmin, xmax = sorted(ax.get_xlim())
ymin, ymax = sorted(ax.get_ylim())
bboxes = get_bboxes(texts, r, expand, ax=ax)
if 'x' not in direction:
ha = ['']
else:
ha = ['left', 'right', 'center']
if 'y' not in direction:
va = ['']
else:
va = ['bottom', 'top', 'center']
alignment = list(product(ha, va))
# coords = np.array(zip(x, y))
for i, text in enumerate(texts):
# tcoords = np.array(text.get_position()).T
# nonself_coords = coords[~np.all(coords==tcoords, axis=1)]
# nonself_x, nonself_y = np.split(nonself_coords, 2, axis=1)
counts = []
for h, v in alignment:
if h:
text.set_ha(h)
if v:
text.set_va(v)
bbox = text.get_window_extent(r).expanded(*expand).\
transformed(ax.transData.inverted())
c = len(get_points_inside_bbox(x, y, bbox))
intersections = [bbox.intersection(bbox, bbox2) if i!=j else None
for j, bbox2 in enumerate(bboxes+add_bboxes) ]
intersections = sum([abs(b.width*b.height) if b is not None else 0
for b in intersections])
# Check for out-of-axes position
bbox = text.get_window_extent(r).transformed(ax.transData.inverted())
x1, y1, x2, y2 = bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax
if x1 < xmin or x2 > xmax or y1 < ymin or y2 > ymax:
axout = 1
else:
axout = 0
counts.append((axout, c, intersections))
# Most important: prefer alignments that keep the text inside the axes.
# If tied, take the alignments that minimize the number of x, y points
# contained inside the text.
# Break any remaining ties by minimizing the total area of intersections
# with all text bboxes and other objects to avoid.
a, value = min(enumerate(counts), key=itemgetter(1))
if 'x' in direction:
text.set_ha(alignment[a][0])
if 'y' in direction:
text.set_va(alignment[a][1])
bboxes[i] = text.get_window_extent(r).expanded(*expand).\
transformed(ax.transData.inverted())
return texts | def optimally_align_text(x, y, texts, expand=(1., 1.), add_bboxes=[],
renderer=None, ax=None,
direction='xy') | For all text objects find alignment that causes the least overlap with
points and other texts and apply it | 3.158447 | 3.108475 | 1.016076 |
if ax is None:
ax = plt.gca()
if renderer is None:
r = get_renderer(ax.get_figure())
else:
r = renderer
bboxes = get_bboxes(texts, r, expand, ax=ax)
xmins = [bbox.xmin for bbox in bboxes]
xmaxs = [bbox.xmax for bbox in bboxes]
ymaxs = [bbox.ymax for bbox in bboxes]
ymins = [bbox.ymin for bbox in bboxes]
overlaps_x = np.zeros((len(bboxes), len(bboxes)))
overlaps_y = np.zeros_like(overlaps_x)
overlap_directions_x = np.zeros_like(overlaps_x)
overlap_directions_y = np.zeros_like(overlaps_y)
for i, bbox1 in enumerate(bboxes):
overlaps = get_points_inside_bbox(xmins*2+xmaxs*2, (ymins+ymaxs)*2,
bbox1) % len(bboxes)
overlaps = np.unique(overlaps)
for j in overlaps:
bbox2 = bboxes[j]
x, y = bbox1.intersection(bbox1, bbox2).size
overlaps_x[i, j] = x
overlaps_y[i, j] = y
direction = np.sign(bbox1.extents - bbox2.extents)[:2]
overlap_directions_x[i, j] = direction[0]
overlap_directions_y[i, j] = direction[1]
move_x = overlaps_x*overlap_directions_x
move_y = overlaps_y*overlap_directions_y
delta_x = move_x.sum(axis=1)
delta_y = move_y.sum(axis=1)
q = np.sum(overlaps_x), np.sum(overlaps_y)
if move:
move_texts(texts, delta_x, delta_y, bboxes, ax=ax)
return delta_x, delta_y, q | def repel_text(texts, renderer=None, ax=None, expand=(1.2, 1.2),
only_use_max_min=False, move=False) | Repel texts from each other while expanding their bounding boxes by expand
(x, y), e.g. (1.2, 1.2) would multiply width and height by 1.2.
Requires a renderer to get the actual sizes of the text, and to that end
either one needs to be directly provided, or the axes have to be specified,
and the renderer is then got from the axes object. | 2.170751 | 2.150635 | 1.009354 |
if ax is None:
ax = plt.gca()
if renderer is None:
r = get_renderer(ax.get_figure())
else:
r = renderer
bboxes = get_bboxes(texts, r, expand, ax=ax)
overlaps_x = np.zeros((len(bboxes), len(add_bboxes)))
overlaps_y = np.zeros_like(overlaps_x)
overlap_directions_x = np.zeros_like(overlaps_x)
overlap_directions_y = np.zeros_like(overlaps_y)
for i, bbox1 in enumerate(bboxes):
for j, bbox2 in enumerate(add_bboxes):
try:
x, y = bbox1.intersection(bbox1, bbox2).size
direction = np.sign(bbox1.extents - bbox2.extents)[:2]
overlaps_x[i, j] = x
overlaps_y[i, j] = y
overlap_directions_x[i, j] = direction[0]
overlap_directions_y[i, j] = direction[1]
except AttributeError:
pass
move_x = overlaps_x*overlap_directions_x
move_y = overlaps_y*overlap_directions_y
delta_x = move_x.sum(axis=1)
delta_y = move_y.sum(axis=1)
q = np.sum(overlaps_x), np.sum(overlaps_y)
if move:
move_texts(texts, delta_x, delta_y, bboxes, ax=ax)
return delta_x, delta_y, q | def repel_text_from_bboxes(add_bboxes, texts, renderer=None, ax=None,
expand=(1.2, 1.2), only_use_max_min=False,
move=False) | Repel texts from other objects' bboxes while expanding their (texts')
bounding boxes by expand (x, y), e.g. (1.2, 1.2) would multiply width and
height by 1.2.
Requires a renderer to get the actual sizes of the text, and to that end
either one needs to be directly provided, or the axes have to be specified,
and the renderer is then got from the axes object. | 2.046718 | 2.028652 | 1.008905 |
assert len(x) == len(y)
if ax is None:
ax = plt.gca()
if renderer is None:
r = get_renderer(ax.get_figure())
else:
r = renderer
bboxes = get_bboxes(texts, r, expand, ax=ax)
# move_x[i,j] is the x displacement of the i'th text caused by the j'th point
move_x = np.zeros((len(bboxes), len(x)))
move_y = np.zeros((len(bboxes), len(x)))
for i, bbox in enumerate(bboxes):
xy_in = get_points_inside_bbox(x, y, bbox)
for j in xy_in:
xp, yp = x[j], y[j]
dx, dy = overlap_bbox_and_point(bbox, xp, yp)
move_x[i, j] = dx
move_y[i, j] = dy
delta_x = move_x.sum(axis=1)
delta_y = move_y.sum(axis=1)
q = np.sum(np.abs(move_x)), np.sum(np.abs(move_y))
if move:
move_texts(texts, delta_x, delta_y, bboxes, ax=ax)
return delta_x, delta_y, q | def repel_text_from_points(x, y, texts, renderer=None, ax=None,
expand=(1.2, 1.2), move=False) | Repel texts from all points specified by x and y while expanding their
(texts'!) bounding boxes by expandby (x, y), e.g. (1.2, 1.2)
would multiply both width and height by 1.2.
Requires a renderer to get the actual sizes of the text, and to that end
either one needs to be directly provided, or the axes have to be specified,
and the renderer is then got from the axes object. | 2.343381 | 2.345963 | 0.998899 |
if not need_counts:
return CardinalityEstimator()
if size_mb is None:
raise ValueError("Max size in MB must be provided.")
if need_iteration:
if log_counting:
raise ValueError("Log counting is only supported with CMS implementation (need_iteration=False).")
return HashTable(size_mb=size_mb)
else:
return CountMinSketch(size_mb=size_mb, log_counting=log_counting) | def bounter(size_mb=None, need_iteration=True, need_counts=True, log_counting=None) | Factory method for bounter implementation.
Args:
size_mb (int): Desired memory footprint of the counter.
need_iteration (Bool): With `True`, create a `HashTable` implementation which can
iterate over inserted key/value pairs.
With `False`, create a `CountMinSketch` implementation which performs better in limited-memory scenarios,
but does not support iteration over elements.
need_counts (Bool): With `True`, construct the structure normally. With `False`, ignore all remaining
parameters and create a minimalistic cardinality counter based on hyperloglog which only takes 64KB memory.
log_counting (int): Counting to use with `CountMinSketch` implementation. Accepted values are
`None` (default counting with 32-bit integers), 1024 (16-bit), 8 (8-bit).
See `CountMinSketch` documentation for details.
Raise ValueError if not `None `and `need_iteration` is `True`. | 4.262643 | 3.292398 | 1.294692 |
self.authorize_url = self.authorize_url[:self.authorize_url.find('?')] \
if '?' in self.authorize_url else self.authorize_url
qb_service = OAuth1Service(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
request_token_url=self.request_token_url,
access_token_url=self.access_token_url,
authorize_url=self.authorize_url,
)
response = qb_service.get_raw_request_token(
params={'oauth_callback': callback_url})
oauth_resp = dict(parse_qsl(response.text))
self.request_token = oauth_resp['oauth_token']
self.request_token_secret = oauth_resp['oauth_token_secret']
return qb_service.get_authorize_url(self.request_token) | def get_authorize_url(self, callback_url) | Returns the Authorize URL as returned by QB, and specified by OAuth 1.0a.
:return URI: | 1.898525 | 1.808194 | 1.049956 |
qb_service = OAuth1Service(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
request_token_url=self.request_token_url,
access_token_url=self.access_token_url,
authorize_url=self.authorize_url,
)
session = qb_service.get_auth_session(
self.request_token,
self.request_token_secret,
data={'oauth_verifier': oauth_verifier})
self.access_token = session.access_token
self.access_token_secret = session.access_token_secret
return session | def get_access_tokens(self, oauth_verifier) | Wrapper around get_auth_session, returns session, and sets access_token and
access_token_secret on the QB Object.
:param oauth_verifier: the oauth_verifier as specified by OAuth 1.0a | 1.774562 | 1.668536 | 1.063544 |
auth_service = OAuth2Service(
name='quickbooks',
client_id=self.client_id,
client_secret=self.client_secret,
authorize_url=self.authorize_url,
access_token_url=self.access_token_url,
base_url=self.base_url,
)
params = {
'client_id': self.client_id,
'response_type': 'code',
'scope': 'com.intuit.quickbooks.accounting',
'redirect_uri': callback_url,
'state': state,
}
url = auth_service.get_authorize_url(**params)
return url | def get_authorize_url(self, callback_url, state=None) | Returns the Authorize URL as returned by QB, and specified by OAuth 2.0a.
:return URI: | 1.640785 | 1.666757 | 0.984418 |
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = to_dict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return to_dict(obj._ast())
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
return [to_dict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
if six.PY2:
data = dict([(key, to_dict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
else:
data = dict([(key, to_dict(value, classkey))
for key, value in obj.__dict__.items()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj | def to_dict(obj, classkey=None) | Recursively converts Python object into a dictionary | 1.290948 | 1.281217 | 1.007595 |
return lambda obj: dict((k, v) for k, v in obj.__dict__.items()
if not k.startswith('_') and getattr(obj, k) is not None) | def json_filter(self) | filter out properties that have names starting with _
or properties that have a value of None | 3.790205 | 3.037737 | 1.247707 |
return cls.where("", start_position=start_position, max_results=max_results, qb=qb) | def all(cls, start_position="", max_results=100, qb=None) | :param start_position:
:param max_results: The max number of entities that can be returned in a response is 1000.
:param qb:
:return: Returns list | 3.107196 | 4.760056 | 0.652765 |
return cls.where(build_where_clause(**kwargs),
start_position=start_position, max_results=max_results, qb=qb) | def filter(cls, start_position="", max_results="", qb=None, **kwargs) | :param start_position:
:param max_results:
:param qb:
:param kwargs: field names and values to filter the query
:return: Filtered list | 4.216359 | 4.948277 | 0.852086 |
return cls.where(build_choose_clause(choices, field), qb=qb) | def choose(cls, choices, field="Id", qb=None) | :param choices:
:param field:
:param qb:
:return: Filtered list | 8.865937 | 9.400902 | 0.943094 |
if where_clause:
where_clause = "WHERE " + where_clause
if start_position:
start_position = " STARTPOSITION " + str(start_position)
if max_results:
max_results = " MAXRESULTS " + str(max_results)
select = "SELECT * FROM {0} {1}{2}{3}".format(
cls.qbo_object_name, where_clause, start_position, max_results)
return cls.query(select, qb=qb) | def where(cls, where_clause="", start_position="", max_results="", qb=None) | :param where_clause: QBO SQL where clause (DO NOT include 'WHERE')
:param start_position:
:param max_results:
:param qb:
:return: Returns list filtered by input where_clause | 2.090281 | 2.00917 | 1.040371 |
if not qb:
qb = QuickBooks()
json_data = qb.query(select)
obj_list = []
if cls.qbo_object_name in json_data["QueryResponse"]:
for item_json in json_data["QueryResponse"][cls.qbo_object_name]:
obj_list.append(cls.from_json(item_json))
return obj_list | def query(cls, select, qb=None) | :param select: QBO SQL query select statement
:param qb:
:return: Returns list | 2.959443 | 3.038697 | 0.973918 |
if not qb:
qb = QuickBooks()
if where_clause:
where_clause = "WHERE " + where_clause
select = "SELECT COUNT(*) FROM {0} {1}".format(
cls.qbo_object_name, where_clause)
json_data = qb.query(select)
if "totalCount" in json_data["QueryResponse"]:
return json_data["QueryResponse"]["totalCount"]
else:
return None | def count(cls, where_clause="", qb=None) | :param where_clause: QBO SQL where clause (DO NOT include 'WHERE')
:param qb:
:return: Returns database record count | 2.955835 | 2.858073 | 1.034206 |
url = self.current_user_url
result = self.get(url)
return result | def get_current_user(self) | Get data from the current user endpoint | 5.823729 | 4.132307 | 1.409317 |
if qs is None:
qs = {}
url = self.api_url + "/company/{0}/reports/{1}".format(self.company_id, report_type)
result = self.get(url, params=qs)
return result | def get_report(self, report_type, qs=None) | Get data from the report endpoint | 3.155966 | 2.757015 | 1.144704 |
url = self.disconnect_url
result = self.get(url)
return result | def disconnect_account(self) | Disconnect current account from the application
:return: | 8.379554 | 9.561772 | 0.87636 |
url = self.reconnect_url
result = self.get(url)
return result | def reconnect_account(self) | Reconnect current account by refreshing OAuth access tokens
:return: | 8.830964 | 10.882234 | 0.811503 |
return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / (
tilesize * 2 ** zoom
) | def _meters_per_pixel(zoom, lat=0.0, tilesize=256) | Return the pixel resolution for a given mercator tile zoom and lattitude.
Parameters
----------
zoom: int
Mercator zoom level
lat: float, optional
Latitude in decimal degree (default: 0)
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Pixel resolution in meters | 2.57104 | 3.236562 | 0.794374 |
for z in range(max_z):
if pixel_size > _meters_per_pixel(z, 0, tilesize=tilesize):
return max(0, z - 1) # We don't want to scale up
return max_z - 1 | def zoom_for_pixelsize(pixel_size, max_z=24, tilesize=256) | Get mercator zoom level corresponding to a pixel resolution.
Freely adapted from
https://github.com/OSGeo/gdal/blob/b0dfc591929ebdbccd8a0557510c5efdb893b852/gdal/swig/python/scripts/gdal2tiles.py#L294
Parameters
----------
pixel_size: float
Pixel size
max_z: int, optional (default: 24)
Max mercator zoom level allowed
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
Mercator zoom level corresponding to the pixel resolution | 4.350992 | 5.235334 | 0.831082 |
bounds = transform_bounds(
*[src_dst.crs, "epsg:4326"] + list(src_dst.bounds), densify_pts=21
)
center = [(bounds[0] + bounds[2]) / 2, (bounds[1] + bounds[3]) / 2]
lat = center[1] if ensure_global_max_zoom else 0
dst_affine, w, h = calculate_default_transform(
src_dst.crs, "epsg:3857", src_dst.width, src_dst.height, *src_dst.bounds
)
mercator_resolution = max(abs(dst_affine[0]), abs(dst_affine[4]))
# Correction factor for web-mercator projection latitude scale change
latitude_correction_factor = math.cos(math.radians(lat))
adjusted_resolution = mercator_resolution * latitude_correction_factor
max_zoom = zoom_for_pixelsize(adjusted_resolution, tilesize=tilesize)
ovr_resolution = adjusted_resolution * max(h, w) / tilesize
min_zoom = zoom_for_pixelsize(ovr_resolution, tilesize=tilesize)
return (min_zoom, max_zoom) | def get_zooms(src_dst, ensure_global_max_zoom=False, tilesize=256) | Calculate raster min/max mercator zoom level.
Parameters
----------
src_dst: rasterio.io.DatasetReader
Rasterio io.DatasetReader object
ensure_global_max_zoom: bool, optional
Apply latitude correction factor to ensure max_zoom equality for global
datasets covering different latitudes (default: False).
tilesize: int, optional
Mercator tile size (default: 256).
Returns
-------
min_zoom, max_zoom: Tuple
Min/Max Mercator zoom levels. | 2.699791 | 2.684202 | 1.005808 |
with rasterio.open(address) as src:
wgs_bounds = transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=21
)
return {"url": address, "bounds": list(wgs_bounds)} | def bounds(address) | Retrieve image bounds.
Attributes
----------
address : str
file url.
Returns
-------
out : dict
dictionary with image bounds. | 2.971122 | 3.923895 | 0.757187 |
info = {"address": address}
info.update(utils.raster_get_stats(address, percentiles=(pmin, pmax), **kwargs))
return info | def metadata(address, pmin=2, pmax=98, **kwargs) | Return image bounds and band statistics.
Attributes
----------
address : str or PathLike object
A dataset path or URL. Will be opened in "r" mode.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: overview_level=2, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with image bounds and bands statistics. | 6.166144 | 6.077109 | 1.014651 |
with rasterio.open(address) as src:
wgs_bounds = transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=21
)
if not utils.tile_exists(wgs_bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)
tile_bounds = mercantile.xy_bounds(mercator_tile)
return utils.tile_read(src, tile_bounds, tilesize, **kwargs) | def tile(address, tile_x, tile_y, tile_z, tilesize=256, **kwargs) | Create mercator tile from any images.
Attributes
----------
address : str
file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
tilesize : int, optional (default: 256)
Output image size.
kwargs: dict, optional
These will be passed to the 'rio_tiler.utils._tile_read' function.
Returns
-------
data : numpy ndarray
mask: numpy array | 2.148255 | 2.118707 | 1.013946 |
sample, edges = np.histogram(arr[~arr.mask], **kwargs)
return {
"pc": np.percentile(arr[~arr.mask], percentiles).astype(arr.dtype).tolist(),
"min": arr.min().item(),
"max": arr.max().item(),
"std": arr.std().item(),
"histogram": [sample.tolist(), edges.tolist()],
} | def _stats(arr, percentiles=(2, 98), **kwargs) | Calculate array statistics.
Attributes
----------
arr: numpy ndarray
Input array data to get the stats from.
percentiles: tuple, optional
Tuple of Min/Max percentiles to compute.
kwargs: dict, optional
These will be passed to the numpy.histogram function.
Returns
-------
dict
numpy array statistics: percentiles, min, max, stdev, histogram
e.g.
{
'pc': [38, 147],
'min': 20,
'max': 180,
'std': 28.123562304138662,
'histogram': [
[1625, 219241, 28344, 15808, 12325, 10687, 8535, 7348, 4656, 1208],
[20.0, 36.0, 52.0, 68.0, 84.0, 100.0, 116.0, 132.0, 148.0, 164.0, 180.0]
]
} | 3.126159 | 2.752429 | 1.135782 |
dst_transform, _, _ = calculate_default_transform(
src_dst.crs, bounds_crs, src_dst.width, src_dst.height, *src_dst.bounds
)
w, s, e, n = bounds
vrt_width = math.ceil((e - w) / dst_transform.a)
vrt_height = math.ceil((s - n) / dst_transform.e)
vrt_transform = transform.from_bounds(w, s, e, n, vrt_width, vrt_height)
return vrt_transform, vrt_width, vrt_height | def get_vrt_transform(src_dst, bounds, bounds_crs="epsg:3857") | Calculate VRT transform.
Attributes
----------
src_dst : rasterio.io.DatasetReader
Rasterio io.DatasetReader object
bounds : list
Bounds (left, bottom, right, top)
bounds_crs : str
Coordinate reference system string (default "epsg:3857")
Returns
-------
vrt_transform: Affine
Output affine transformation matrix
vrt_width, vrt_height: int
Output dimensions | 1.94764 | 2.056514 | 0.947059 |
if (
any([MaskFlags.alpha in flags for flags in src_dst.mask_flag_enums])
or ColorInterp.alpha in src_dst.colorinterp
):
return True
return False | def has_alpha_band(src_dst) | Check for alpha band or mask in source. | 6.084464 | 5.749256 | 1.058305 |
if isinstance(indexes, int):
indexes = [indexes]
elif isinstance(indexes, tuple):
indexes = list(indexes)
vrt_params = dict(
add_alpha=True, crs="epsg:3857", resampling=Resampling[resampling_method]
)
vrt_transform, vrt_width, vrt_height = get_vrt_transform(src_dst, bounds)
vrt_params.update(dict(transform=vrt_transform, width=vrt_width, height=vrt_height))
indexes = indexes if indexes is not None else src_dst.indexes
out_shape = (len(indexes), tilesize, tilesize)
nodata = nodata if nodata is not None else src_dst.nodata
if nodata is not None:
vrt_params.update(dict(nodata=nodata, add_alpha=False, src_nodata=nodata))
if has_alpha_band(src_dst):
vrt_params.update(dict(add_alpha=False))
with WarpedVRT(src_dst, **vrt_params) as vrt:
data = vrt.read(
out_shape=out_shape,
indexes=indexes,
resampling=Resampling[resampling_method],
)
mask = vrt.dataset_mask(out_shape=(tilesize, tilesize))
return data, mask | def _tile_read(
src_dst, bounds, tilesize, indexes=None, nodata=None, resampling_method="bilinear"
) | Read data and mask.
Attributes
----------
src_dst : rasterio.io.DatasetReader
rasterio.io.DatasetReader object
bounds : list
Mercator tile bounds (left, bottom, right, top)
tilesize : int
Output image size
indexes : list of ints or a single int, optional, (defaults: None)
If `indexes` is a list, the result is a 3D array, but is
a 2D array if it is a band index number.
nodata: int or float, optional (defaults: None)
resampling_method : str, optional (default: "bilinear")
Resampling algorithm
Returns
-------
out : array, int
returns pixel value. | 2.089906 | 2.138086 | 0.977466 |
if isinstance(source, DatasetReader):
return _tile_read(source, bounds, tilesize, **kwargs)
else:
with rasterio.open(source) as src_dst:
return _tile_read(src_dst, bounds, tilesize, **kwargs) | def tile_read(source, bounds, tilesize, **kwargs) | Read data and mask.
Attributes
----------
source : str or rasterio.io.DatasetReader
input file path or rasterio.io.DatasetReader object
bounds : list
Mercator tile bounds (left, bottom, right, top)
tilesize : int
Output image size
kwargs: dict, optional
These will be passed to the _tile_read function.
Returns
-------
out : array, int
returns pixel value. | 2.209708 | 2.221386 | 0.994743 |
imin, imax = in_range
omin, omax = out_range
image = np.clip(image, imin, imax) - imin
image = image / np.float(imax - imin)
return image * (omax - omin) + omin | def linear_rescale(image, in_range=(0, 1), out_range=(1, 255)) | Linear rescaling.
Attributes
----------
image : numpy ndarray
Image array to rescale.
in_range : list, int, optional, (default: [0,1])
Image min/max value to rescale.
out_range : list, int, optional, (default: [1,255])
output min/max bounds to rescale to.
Returns
-------
out : numpy ndarray
returns rescaled image array. | 2.162869 | 2.758472 | 0.784082 |
mintile = mercantile.tile(bounds[0], bounds[3], tile_z)
maxtile = mercantile.tile(bounds[2], bounds[1], tile_z)
return (
(tile_x <= maxtile.x + 1)
and (tile_x >= mintile.x)
and (tile_y <= maxtile.y + 1)
and (tile_y >= mintile.y)
) | def tile_exists(bounds, tile_z, tile_x, tile_y) | Check if a mercatile tile is inside a given bounds.
Attributes
----------
bounds : list
WGS84 bounds (left, bottom, right, top).
x : int
Mercator tile Y index.
y : int
Mercator tile Y index.
z : int
Mercator tile ZOOM level.
Returns
-------
out : boolean
if True, the z-x-y mercator tile in inside the bounds. | 1.78606 | 2.376371 | 0.751591 |
res = np.zeros((arr.shape[1], arr.shape[2], 3), dtype=np.uint8)
for k, v in cmap.items():
res[arr[0] == k] = v
return np.transpose(res, [2, 0, 1]) | def _apply_discrete_colormap(arr, cmap) | Apply discrete colormap.
Attributes
----------
arr : numpy.ndarray
1D image array to convert.
color_map: dict
Discrete ColorMap dictionary
e.g:
{
1: [255, 255, 255],
2: [255, 0, 0]
}
Returns
-------
arr: numpy.ndarray | 2.329903 | 3.009926 | 0.774073 |
img_format = img_format.lower()
if len(arr.shape) < 3:
arr = np.expand_dims(arr, axis=0)
if color_map is not None and isinstance(color_map, dict):
arr = _apply_discrete_colormap(arr, color_map)
elif color_map is not None:
arr = np.transpose(color_map[arr][0], [2, 0, 1]).astype(np.uint8)
# WEBP doesn't support 1band dataset so we must hack to create a RGB dataset
if img_format == "webp" and arr.shape[0] == 1:
arr = np.repeat(arr, 3, axis=0)
if mask is not None and img_format != "jpeg":
nbands = arr.shape[0] + 1
else:
nbands = arr.shape[0]
output_profile = dict(
driver=img_format,
dtype=arr.dtype,
count=nbands,
height=arr.shape[1],
width=arr.shape[2],
)
output_profile.update(creation_options)
with MemoryFile() as memfile:
with memfile.open(**output_profile) as dst:
dst.write(arr, indexes=list(range(1, arr.shape[0] + 1)))
# Use Mask as an alpha band
if mask is not None and img_format != "jpeg":
dst.write(mask.astype(arr.dtype), indexes=nbands)
return memfile.read() | def array_to_image(
arr, mask=None, img_format="png", color_map=None, **creation_options
) | Translate numpy ndarray to image buffer using GDAL.
Usage
-----
tile, mask = rio_tiler.utils.tile_read(......)
with open('test.jpg', 'wb') as f:
f.write(array_to_image(tile, mask, img_format="jpeg"))
Attributes
----------
arr : numpy ndarray
Image array to encode.
mask: numpy ndarray, optional
Mask array
img_format: str, optional
Image format to return (default: 'png').
List of supported format by GDAL: https://www.gdal.org/formats_list.html
color_map: numpy.ndarray or dict, optional
color_map can be either a (256, 3) array or RGB triplet
(e.g. [[255, 255, 255],...]) mapping each 1D pixel value rescaled
from 0 to 255
OR
it can be a dictionary of discrete values
(e.g. { 1.3: [255, 255, 255], 2.5: [255, 0, 0]}) mapping any pixel value to a triplet
creation_options: dict, optional
Image driver creation options to pass to GDAL
Returns
-------
bytes | 2.521153 | 2.484464 | 1.014767 |
cmap_file = os.path.join(os.path.dirname(__file__), "cmap", "{0}.txt".format(name))
with open(cmap_file) as cmap:
lines = cmap.read().splitlines()
colormap = [
list(map(int, line.split())) for line in lines if not line.startswith("#")
][1:]
cmap = list(np.array(colormap).flatten())
if format.lower() == "pil":
return cmap
elif format.lower() == "gdal":
return np.array(list(_chunks(cmap, 3)))
else:
raise Exception("Unsupported {} colormap format".format(format)) | def get_colormap(name="cfastie", format="pil") | Return Pillow or GDAL compatible colormap array.
Attributes
----------
name : str, optional
Colormap name (default: cfastie)
format: str, optional
Compatiblity library, should be "pil" or "gdal" (default: pil).
Returns
-------
colormap : list or numpy.array
Color map list in a Pillow friendly format
more info: http://pillow.readthedocs.io/en/3.4.x/reference/Image.html#PIL.Image.Image.putpalette
or
Color map array in GDAL friendly format | 2.676276 | 2.855092 | 0.93737 |
arr = np.clip(arr + 32768.0, 0.0, 65535.0)
r = arr / 256
g = arr % 256
b = (arr * 256) % 256
return np.stack([r, g, b]).astype(np.uint8) | def mapzen_elevation_rgb(arr) | Encode elevation value to RGB values compatible with Mapzen tangram.
Attributes
----------
arr : numpy ndarray
Image array to encode.
Returns
-------
out : numpy ndarray
RGB array (3, h, w) | 2.189802 | 2.770291 | 0.790459 |
if not expr:
raise Exception("Missing expression")
bands_names = tuple(set(re.findall(r"b(?P<bands>[0-9A]{1,2})", expr)))
rgb = expr.split(",")
if sceneid.startswith("L"):
from rio_tiler.landsat8 import tile as l8_tile
arr, mask = l8_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("S2"):
from rio_tiler.sentinel2 import tile as s2_tile
arr, mask = s2_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
elif sceneid.startswith("CBERS"):
from rio_tiler.cbers import tile as cbers_tile
arr, mask = cbers_tile(
sceneid, tile_x, tile_y, tile_z, bands=bands_names, **kwargs
)
else:
from rio_tiler.main import tile as main_tile
bands = tuple(map(int, bands_names))
arr, mask = main_tile(sceneid, tile_x, tile_y, tile_z, indexes=bands, **kwargs)
ctx = {}
for bdx, b in enumerate(bands_names):
ctx["b{}".format(b)] = arr[bdx]
return (
np.array(
[np.nan_to_num(ne.evaluate(bloc.strip(), local_dict=ctx)) for bloc in rgb]
),
mask,
) | def expression(sceneid, tile_x, tile_y, tile_z, expr=None, **kwargs) | Apply expression on data.
Attributes
----------
sceneid : str
Landsat id, Sentinel id, CBERS ids or file url.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
expr : str, required
Expression to apply (e.g '(B5+B4)/(B5-B4)')
Band name should start with 'B'.
Returns
-------
out : ndarray
Returns processed pixel value. | 2.343235 | 2.365045 | 0.990778 |
def _calculateRatio(rgb, pan, weight):
return pan / ((rgb[0] + rgb[1] + rgb[2] * weight) / (2 + weight))
with np.errstate(invalid="ignore", divide="ignore"):
ratio = _calculateRatio(rgb, pan, weight)
return np.clip(ratio * rgb, 0, np.iinfo(pan_dtype).max).astype(pan_dtype) | def pansharpening_brovey(rgb, pan, weight, pan_dtype) | Brovey Method: Each resampled, multispectral pixel is
multiplied by the ratio of the corresponding
panchromatic pixel intensity to the sum of all the
multispectral intensities.
Original code from https://github.com/mapbox/rio-pansharpen | 3.447315 | 3.090781 | 1.115354 |
if not re.match("^S2[AB]_tile_[0-9]{8}_[0-9]{2}[A-Z]{3}_[0-9]$", sceneid):
raise InvalidSentinelSceneId("Could not match {}".format(sceneid))
sentinel_pattern = (
r"^S"
r"(?P<sensor>\w{1})"
r"(?P<satellite>[AB]{1})"
r"_tile_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<utm>[0-9]{2})"
r"(?P<lat>\w{1})"
r"(?P<sq>\w{2})"
r"_"
r"(?P<num>[0-9]{1})$"
)
meta = None
match = re.match(sentinel_pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
utm_zone = meta["utm"].lstrip("0")
grid_square = meta["sq"]
latitude_band = meta["lat"]
year = meta["acquisitionYear"]
month = meta["acquisitionMonth"].lstrip("0")
day = meta["acquisitionDay"].lstrip("0")
img_num = meta["num"]
meta["key"] = "tiles/{}/{}/{}/{}/{}/{}/{}".format(
utm_zone, latitude_band, grid_square, year, month, day, img_num
)
meta["scene"] = sceneid
return meta | def _sentinel_parse_scene_id(sceneid) | Parse Sentinel-2 scene id.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
e.g:
_sentinel_parse_scene_id('S2A_tile_20170323_07SNC_0')
{
"acquisitionDay": "23",
"acquisitionMonth": "03",
"acquisitionYear": "2017",
"key": "tiles/7/S/NC/2017/3/23/0",
"lat": "S",
"num": "0",
"satellite": "A",
"scene": "S2A_tile_20170323_07SNC_0",
"sensor": "2",
"sq": "NC",
"utm": "07",
} | 2.572741 | 1.992932 | 1.290933 |
scene_params = _sentinel_parse_scene_id(sceneid)
sentinel_address = "{}/{}".format(SENTINEL_BUCKET, scene_params["key"])
with rasterio.open("{}/preview.jp2".format(sentinel_address)) as src:
wgs_bounds = transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=21
)
info = {"sceneid": sceneid}
info["bounds"] = list(wgs_bounds)
return info | def bounds(sceneid) | Retrieve image bounds.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
Returns
-------
out : dict
dictionary with image bounds. | 3.569127 | 4.051224 | 0.881 |
with rasterio.open(src_path) as src:
arr = src.read(indexes=[1], masked=True)
arr[arr == 0] = np.ma.masked
params = {}
if histogram_bins:
params.update(dict(bins=histogram_bins))
if histogram_range:
params.update(dict(range=histogram_range))
return {1: utils._stats(arr, percentiles=percentiles, **params)} | def _sentinel_stats(
src_path, percentiles=(2, 98), histogram_bins=10, histogram_range=None
) | src_path : str or PathLike object
A dataset path or URL. Will be opened in "r" mode. | 2.963412 | 2.93658 | 1.009137 |
scene_params = _sentinel_parse_scene_id(sceneid)
sentinel_address = "{}/{}".format(SENTINEL_BUCKET, scene_params["key"])
dst_crs = CRS({"init": "EPSG:4326"})
with rasterio.open("{}/preview.jp2".format(sentinel_address)) as src:
bounds = transform_bounds(
*[src.crs, dst_crs] + list(src.bounds), densify_pts=21
)
info = {"sceneid": sceneid}
info["bounds"] = {"value": bounds, "crs": dst_crs.to_string()}
addresses = [
"{}/preview/B{}.jp2".format(sentinel_address, band) for band in SENTINEL_BANDS
]
_stats_worker = partial(_sentinel_stats, percentiles=(pmin, pmax), **kwargs)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = executor.map(_stats_worker, addresses)
info["statistics"] = {
b: v for b, d in zip(SENTINEL_BANDS, responses) for k, v in d.items()
}
return info | def metadata(sceneid, pmin=2, pmax=98, **kwargs) | Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.sentinel2._sentinel_stats'
e.g: histogram_bins=20'
Returns
-------
out : dict
Dictionary with image bounds and bands statistics. | 3.164217 | 3.203086 | 0.987865 |
if not isinstance(bands, tuple):
bands = tuple((bands,))
for band in bands:
if band not in SENTINEL_BANDS:
raise InvalidBandName("{} is not a valid Sentinel band name".format(band))
scene_params = _sentinel_parse_scene_id(sceneid)
sentinel_address = "{}/{}".format(SENTINEL_BUCKET, scene_params["key"])
sentinel_preview = "{}/preview.jp2".format(sentinel_address)
with rasterio.open(sentinel_preview) as src:
wgs_bounds = transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=21
)
if not utils.tile_exists(wgs_bounds, tile_z, tile_x, tile_y):
raise TileOutsideBounds(
"Tile {}/{}/{} is outside image bounds".format(tile_z, tile_x, tile_y)
)
mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)
tile_bounds = mercantile.xy_bounds(mercator_tile)
addresses = ["{}/B{}.jp2".format(sentinel_address, band) for band in bands]
_tiler = partial(utils.tile_read, bounds=tile_bounds, tilesize=tilesize, nodata=0)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
data, masks = zip(*list(executor.map(_tiler, addresses)))
mask = np.all(masks, axis=0).astype(np.uint8) * 255
return np.concatenate(data), mask | def tile(sceneid, tile_x, tile_y, tile_z, bands=("04", "03", "02"), tilesize=256) | Create mercator tile from Sentinel-2 data.
Attributes
----------
sceneid : str
Sentinel-2 sceneid.
tile_x : int
Mercator tile X index.
tile_y : int
Mercator tile Y index.
tile_z : int
Mercator tile ZOOM level.
bands : tuple, str, optional (default: ('04', '03', '02'))
Bands index for the RGB combination.
tilesize : int, optional (default: 256)
Output image size.
Returns
-------
data : numpy ndarray
mask: numpy array | 2.415556 | 2.382569 | 1.013845 |
scene_params = _landsat_parse_scene_id(sceneid)
meta_file = "http://landsat-pds.s3.amazonaws.com/{}_MTL.txt".format(
scene_params["key"]
)
metadata = str(urlopen(meta_file).read().decode())
return toa_utils._parse_mtl_txt(metadata) | def _landsat_get_mtl(sceneid) | Get Landsat-8 MTL metadata.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
returns a JSON like object with the metadata. | 3.842116 | 4.811244 | 0.79857 |
pre_collection = r"(L[COTEM]8\d{6}\d{7}[A-Z]{3}\d{2})"
collection_1 = r"(L[COTEM]08_L\d{1}[A-Z]{2}_\d{6}_\d{8}_\d{8}_\d{2}_(T1|T2|RT))"
if not re.match("^{}|{}$".format(pre_collection, collection_1), sceneid):
raise InvalidLandsatSceneId("Could not match {}".format(sceneid))
precollection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{1})"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionJulianDay>[0-9]{3})"
r"(?P<groundStationIdentifier>\w{3})"
r"(?P<archiveVersion>[0-9]{2})$"
)
collection_pattern = (
r"^L"
r"(?P<sensor>\w{1})"
r"(?P<satellite>\w{2})"
r"_"
r"(?P<processingCorrectionLevel>\w{4})"
r"_"
r"(?P<path>[0-9]{3})"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<processingYear>[0-9]{4})"
r"(?P<processingMonth>[0-9]{2})"
r"(?P<processingDay>[0-9]{2})"
r"_"
r"(?P<collectionNumber>\w{2})"
r"_"
r"(?P<collectionCategory>\w{2})$"
)
meta = None
for pattern in [collection_pattern, precollection_pattern]:
match = re.match(pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
break
if meta.get("acquisitionJulianDay"):
date = datetime.datetime(
int(meta["acquisitionYear"]), 1, 1
) + datetime.timedelta(int(meta["acquisitionJulianDay"]) - 1)
meta["date"] = date.strftime("%Y-%m-%d")
else:
meta["date"] = "{}-{}-{}".format(
meta["acquisitionYear"], meta["acquisitionMonth"], meta["acquisitionDay"]
)
collection = meta.get("collectionNumber", "")
if collection != "":
collection = "c{}".format(int(collection))
meta["key"] = os.path.join(
collection, "L8", meta["path"], meta["row"], sceneid, sceneid
)
meta["scene"] = sceneid
return meta | def _landsat_parse_scene_id(sceneid) | Parse Landsat-8 scene id.
Author @perrygeo - http://www.perrygeo.com | 2.091744 | 2.080871 | 1.005225 |
meta_data = _landsat_get_mtl(sceneid).get("L1_METADATA_FILE")
info = {"sceneid": sceneid}
info["bounds"] = toa_utils._get_bounds_from_metadata(meta_data["PRODUCT_METADATA"])
return info | def bounds(sceneid) | Retrieve image bounds.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
dictionary with image bounds. | 6.580517 | 7.303561 | 0.901001 |
scene_params = _landsat_parse_scene_id(sceneid)
meta_data = _landsat_get_mtl(sceneid).get("L1_METADATA_FILE")
path_prefix = "{}/{}".format(LANDSAT_BUCKET, scene_params["key"])
info = {"sceneid": sceneid}
_stats_worker = partial(
_landsat_stats,
address_prefix=path_prefix,
metadata=meta_data,
overview_level=1,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, LANDSAT_BANDS))
info["bounds"] = [
r["bounds"] for b, r in zip(LANDSAT_BANDS, responses) if b == "8"
][0]
info["statistics"] = {
b: v
for b, d in zip(LANDSAT_BANDS, responses)
for k, v in d["statistics"].items()
}
return info | def metadata(sceneid, pmin=2, pmax=98, **kwargs) | Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.landsat8._landsat_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics. | 3.534616 | 3.597397 | 0.982548 |
if not re.match(r"^CBERS_4_\w+_[0-9]{8}_[0-9]{3}_[0-9]{3}_L[0-9]$", sceneid):
raise InvalidCBERSSceneId("Could not match {}".format(sceneid))
cbers_pattern = (
r"(?P<satellite>\w+)_"
r"(?P<mission>[0-9]{1})"
r"_"
r"(?P<instrument>\w+)"
r"_"
r"(?P<acquisitionYear>[0-9]{4})"
r"(?P<acquisitionMonth>[0-9]{2})"
r"(?P<acquisitionDay>[0-9]{2})"
r"_"
r"(?P<path>[0-9]{3})"
r"_"
r"(?P<row>[0-9]{3})"
r"_"
r"(?P<processingCorrectionLevel>L[0-9]{1})$"
)
meta = None
match = re.match(cbers_pattern, sceneid, re.IGNORECASE)
if match:
meta = match.groupdict()
path = meta["path"]
row = meta["row"]
instrument = meta["instrument"]
meta["key"] = "CBERS4/{}/{}/{}/{}".format(instrument, path, row, sceneid)
meta["scene"] = sceneid
instrument_params = {
"MUX": {
"reference_band": "6",
"bands": ["5", "6", "7", "8"],
"rgb": ("7", "6", "5"),
},
"AWFI": {
"reference_band": "14",
"bands": ["13", "14", "15", "16"],
"rgb": ("15", "14", "13"),
},
"PAN10M": {
"reference_band": "4",
"bands": ["2", "3", "4"],
"rgb": ("3", "4", "2"),
},
"PAN5M": {"reference_band": "1", "bands": ["1"], "rgb": ("1", "1", "1")},
}
meta["reference_band"] = instrument_params[instrument]["reference_band"]
meta["bands"] = instrument_params[instrument]["bands"]
meta["rgb"] = instrument_params[instrument]["rgb"]
return meta | def _cbers_parse_scene_id(sceneid) | Parse CBERS scene id.
Attributes
----------
sceneid : str
CBERS sceneid.
Returns
-------
out : dict
dictionary with metadata constructed from the sceneid.
e.g:
_cbers_parse_scene_id('CBERS_4_PAN5M_20171121_057_094_L2')
{
"acquisitionDay": "21",
"acquisitionMonth": "11",
"acquisitionYear": "2017",
"instrument": "PAN5M",
"key": "CBERS4/PAN5M/057/094/CBERS_4_PAN5M_20171121_057_094_L2",
"path": "057",
"processingCorrectionLevel": "L2",
"row": "094",
"mission": "4",
"scene": "CBERS_4_PAN5M_20171121_057_094_L2",
"reference_band": "1",
"bands": ["1"],
"rgb": ("1", "1", "1"),
"satellite": "CBERS",
} | 2.015836 | 1.779714 | 1.132674 |
scene_params = _cbers_parse_scene_id(sceneid)
cbers_address = "{}/{}".format(CBERS_BUCKET, scene_params["key"])
with rasterio.open(
"{}/{}_BAND{}.tif".format(
cbers_address, sceneid, scene_params["reference_band"]
)
) as src:
wgs_bounds = transform_bounds(
*[src.crs, "epsg:4326"] + list(src.bounds), densify_pts=21
)
info = {"sceneid": sceneid}
info["bounds"] = list(wgs_bounds)
return info | def bounds(sceneid) | Retrieve image bounds.
Attributes
----------
sceneid : str
CBERS sceneid.
Returns
-------
out : dict
dictionary with image bounds. | 3.400771 | 3.726225 | 0.912659 |
scene_params = _cbers_parse_scene_id(sceneid)
cbers_address = "{}/{}".format(CBERS_BUCKET, scene_params["key"])
bands = scene_params["bands"]
ref_band = scene_params["reference_band"]
info = {"sceneid": sceneid}
addresses = [
"{}/{}_BAND{}.tif".format(cbers_address, sceneid, band) for band in bands
]
_stats_worker = partial(
utils.raster_get_stats,
indexes=[1],
nodata=0,
overview_level=2,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, addresses))
info["bounds"] = [r["bounds"] for b, r in zip(bands, responses) if b == ref_band][0]
info["statistics"] = {
b: v for b, d in zip(bands, responses) for k, v in d["statistics"].items()
}
return info | def metadata(sceneid, pmin=2, pmax=98, **kwargs) | Return band bounds and statistics.
Attributes
----------
sceneid : str
CBERS sceneid.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.utils.raster_get_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics. | 3.610513 | 3.541292 | 1.019547 |
datasets = __get_data_folder_path() + 'datasets.csv'
df = pd.read_csv(datasets)
df = df[['Item', 'Title']]
df.columns = ['dataset_id', 'title']
# print('a list of the available datasets:')
return df | def __datasets_desc() | return a df of the available datasets with description | 5.751454 | 4.866846 | 1.181762 |
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]); | def dumb_property_dict(style) | returns a hash of css attributes | 3.524035 | 3.220108 | 1.094384 |
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements | def dumb_css_parser(data) | returns a hash of css selectors, each of which contains a hash of css attributes | 5.950353 | 5.857003 | 1.015938 |
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style | def element_style(attrs, style_def, parent_style) | returns a hash of the 'final' style attributes of the element | 3.254458 | 3.176614 | 1.024505 |
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis | def google_text_emphasis(style) | return a list of all emphasis modifiers of the element | 1.824933 | 1.82039 | 1.002496 |
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False | def google_fixed_width_font(style) | check if the css of the current element defines a fixed width font | 2.989172 | 2.641324 | 1.131695 |
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text | def escape_md_section(text, snob=False) | Escapes markdown-sensitive characters across whole document sections. | 2.594327 | 2.582629 | 1.004529 |
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i | def previousIndex(self, attrs) | returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None | 2.744383 | 2.431109 | 1.12886 |
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1 | def handle_emphasis(self, start, tag_style, parent_style) | handles various text emphases | 3.123207 | 3.087937 | 1.011422 |
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count | def google_nest_count(self, style) | calculate the nesting count of google doc lists | 4.68468 | 3.820847 | 1.226084 |
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result | def optwrap(self, text) | Wrap all paragraphs in the provided text. | 3.652904 | 3.534674 | 1.033449 |
ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio()
return ratio if ratio > threshold else 0 | def similarity(w1, w2, threshold=0.5) | compare two strings 'words', and
return ratio of smiliarity, be it larger than the threshold,
or 0 otherwise.
NOTE: if the result more like junk, increase the threshold value. | 7.087263 | 8.376092 | 0.84613 |
similars = {s2: similarity(s1, s2)
for s2 in dlist
if similarity(s1, s2)}
# a list of tuples [(similar_word, ratio) .. ]
top_match = Counter(similars).most_common(MAX_SIMILARS+1)
return top_match | def search_similar(s1, dlist=DATASET_IDS, MAX_SIMILARS=10) | Returns the top MAX_SIMILARS [(dataset_id : smilarity_ratio)] to s1 | 4.386684 | 4.333453 | 1.012284 |
if item:
try:
if show_doc:
__print_item_docs(item)
return
df = __read_csv(item)
return df
except KeyError:
find_similar(item)
else:
return __datasets_desc() | def data(item=None, show_doc=False) | loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets. | 6.397597 | 7.241722 | 0.883436 |
if p2.ndim < 2:
p2 = p2[np.newaxis, :]
'''p2 can be a vector'''
area = 0.5 * np.abs(p0[0] * p1[1] - p0[0] * p2[:,1] +
p1[0] * p2[:,1] - p1[0] * p0[1] +
p2[:,0] * p0[1] - p2[:,0] * p1[1])
return area | def triangle_area(p0, p1, p2) | p2 can be a vector | 2.875875 | 2.449317 | 1.174154 |
'''which points are inside ROI'''
if Y.ndim > 1:
area = np.zeros((Y.shape[0],4))
else:
area = np.zeros((1,4))
pts = np.zeros((0,), int)
pdist = np.zeros((0,), int)
dist0 = 0
for k in range(len(self.prect)):
self.square_area = (triangle_area(self.prect[k][0,:], self.prect[k][1,:], self.prect[k][2,:]) +
triangle_area(self.prect[k][2,:], self.prect[k][3,:], self.prect[k][4,:]))
for n in range(4):
area[:,n] = triangle_area(self.prect[k][0+n,:], self.prect[k][1+n,:], Y)
# points inside prect
newpts = np.array((area.sum(axis=1) <= self.square_area+1e-5).nonzero()).flatten().astype(int)
if newpts.size > 0:
pts = np.concatenate((pts, newpts))
newdists = self.orthproj(Y[newpts, :], k) + dist0
pdist = np.concatenate((pdist, newdists))
dist0 += (np.diff(self.pos[k], axis=0)[0,:]**2).sum()
# check if in radius of circle
if k < len(self.prect)-1:
pcent = self.pos[k][1,:]
dist = ((Y - pcent[np.newaxis,:])**2).sum(axis=1)**0.5
newpts = np.array((dist<=self.d).nonzero()[0].astype(int))
if newpts.size > 0:
pts = np.concatenate((pts, newpts))
newdists = dist0 * np.ones(newpts.shape)
pdist = np.concatenate((pdist, newdists))
pts, inds = np.unique(pts, return_index=True)
pdist = pdist[inds]
return pts, pdist | def inROI(self, Y) | which points are inside ROI | 2.921597 | 2.916943 | 1.001595 |
'''remove ROI'''
parent.p0.removeItem(self.ROIplot)
parent.p0.removeItem(self.dotplot) | def remove(self, parent) | remove ROI | 13.800606 | 11.946772 | 1.155174 |
'''compute a wrapped distance'''
q1 = np.mod(kx, nc)
q2 = np.minimum(q1, nc-q1)
return q2 | def dwrap(kx,nc) | compute a wrapped distance | 6.245943 | 6.273131 | 0.995666 |
self.fit(X, u, sv, v)
return self.embedding | def fit_transform(self, X, u=None, sv=None, v=None) | Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space. | 4.615491 | 8.372327 | 0.551279 |
iclustup = []
dims = self.n_components
if hasattr(self, 'isort1'):
if X.shape[1] == self.v.shape[0]:
# reduce dimensionality of X
X = X @ self.v
nclust = self.n_X
AtS = self.A.T @ self.S
vnorm = np.sum(self.S * (self.A @ AtS), axis=0)[np.newaxis,:]
cv = X @ AtS
cmap = np.maximum(0., cv)**2 / vnorm
iclustup, cmax = upsample(np.sqrt(cmap), dims, nclust, 10)
else:
print('ERROR: new points do not have as many features as original data')
else:
print('ERROR: need to fit model first before you can embed new points')
if iclustup.ndim > 1:
iclustup = iclustup.T
else:
iclustup = iclustup.flatten()
return iclustup | def transform(self, X) | if already fit, can add new points and see where they fall | 5.954863 | 5.784652 | 1.029425 |
X = X.copy()
X -= X.mean(axis=0)
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if ((u is None)):
# compute svd and keep iPC's of data
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
# first smooth in Y (if n_Y > 0)
self.u = u
if self.mode is 'parallel':
NN = Xall.shape[1]
X = np.zeros((2, NN, u.shape[1]), 'float64')
for j in range(2):
Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis]
X[j] = Xall[j] @ self.v
nclust = self.n_X
if self.n_components==1 and init_sort.ndim==1:
uinit = uinit[:,np.newaxis]
# now sort in X
Y = self._map(u.copy(), self.n_components)
return self | def fit(self, X=None, u=None, s = None) | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode | 6.047341 | 5.792677 | 1.043963 |
X = X.copy()
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if ((u is None)):
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
self.u = u
# now sort in X
U = self._map(u.copy(), self.n_components, self.n_X, u.copy())
return self | def fit(self, X=None, u=None) | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode | 6.873699 | 6.419642 | 1.070729 |
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if (u is None) or (sv is None) or (v is None):
# compute svd and keep iPC's of data
nmin = min([X.shape[0],X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
#u, sv, v = np.float32(u), np.float32(sv), np.float32(v)
self.nPC = sv.size
# first smooth in Y (if n_Y > 0)
# this will be a 1-D fit
isort2 = []
if self.n_Y > 0:
vsort = np.argsort(v[:,0])[:,np.newaxis]
isort2, iclustup = self._map(v * sv, 1, self.n_Y, vsort)
#X = gaussian_filter1d(X[:, isort2], self.sig_Y, axis=1)
#u,sv,v = svdecon(np.float64(X), k=nmin)
self.u = u
self.sv = sv
self.v = v
if self.mode is 'parallel':
NN = Xall.shape[1]
X = np.zeros((2, NN, u.shape[1]), 'float64')
for j in range(2):
Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis]
X[j] = Xall[j] @ self.v
else:
NN = X.shape[0]
X = X @ self.v
if self.init == 'pca':
u = u * np.sign(skew(u, axis=0))
init_sort = np.argsort(u[:NN, :self.n_components], axis=0)
#init_sort = u[:NN,:self.n_components]
if False:
ix = init_sort > 0
iy = init_sort < 0
init_sort[ix] = init_sort[ix] - 100.
init_sort[iy] = init_sort[iy] + 100.
elif self.init == 'random':
init_sort = np.random.permutation(NN)[:,np.newaxis]
for j in range(1,self.n_components):
init_sort = np.concatenate((init_sort, np.random.permutation(NN)[:,np.newaxis]), axis=-1)
else:
init_sort = self.init
if self.n_components==1 and init_sort.ndim==1:
init_sort = init_sort[:,np.newaxis]
# now sort in X
isort1, iclustup = self._map(X, self.n_components, self.n_X, init_sort)
self.isort2 = isort2
self.isort1 = isort1
self.embedding = iclustup
return self | def fit(self, X, u=None, sv=None, v=None) | Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features)
y : Ignored | 3.386726 | 3.453763 | 0.98059 |
self.fit(X, u)
return self.embedding | def fit_transform(self, X, u=None) | Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space. | 7.463374 | 15.130131 | 0.493279 |
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if ((u is None)):
# compute svd and keep iPC's of data
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
if self.constraints==3:
plaw = 1/(1+np.arange(1000))**(self.alpha/2)
self.vscale = np.sum(u**2,axis=0)**.5
tail = self.vscale[-1] * plaw[u.shape[1]:]/plaw[u.shape[1]]
self.vscale = np.hstack((self.vscale, tail))
# first smooth in Y (if n_Y > 0)
self.u = u
self.v = v
if self.mode is 'parallel':
NN = Xall.shape[1]
X = np.zeros((2, NN, u.shape[1]), 'float64')
for j in range(2):
Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis]
X[j] = Xall[j] @ self.v
if self.init == 'pca':
usort = u * np.sign(skew(u, axis=0))
init_sort = np.argsort(usort[:NN, :self.n_components], axis=0)
#init_sort = u[:NN,:self.n_components]
if False:
ix = init_sort > 0
iy = init_sort < 0
init_sort[ix] = init_sort[ix] - 100.
init_sort[iy] = init_sort[iy] + 100.
elif self.init == 'random':
init_sort = np.random.permutation(NN)[:,np.newaxis]
for j in range(1,self.n_components):
init_sort = np.concatenate((init_sort, np.random.permutation(NN)[:,np.newaxis]), axis=-1)
else:
init_sort = self.init
if self.n_components==1 and init_sort.ndim==1:
init_sort = init_sort[:,np.newaxis]
# now sort in X
isort1, iclustup = self._map(u.copy(), self.n_components, self.n_X, init_sort)
self.isort = isort1
self.embedding = iclustup
return self | def fit(self, X=None, u=None, sv=None, v=None) | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode | 4.177786 | 4.005119 | 1.043112 |
X = X.copy()
if self.mode is 'parallel':
Xall = X.copy()
X = np.reshape(Xall.copy(), (-1, Xall.shape[-1]))
#X -= X.mean(axis=-1)[:,np.newaxis]
if ((u is None)):
# compute svd and keep iPC's of data
nmin = min([X.shape[0], X.shape[1]])
nmin = np.minimum(nmin-1, self.nPC)
u,sv,v = svdecon(np.float64(X), k=nmin)
u = u * sv
NN, self.nPC = u.shape
# first smooth in Y (if n_Y > 0)
self.u = u
if self.mode is 'parallel':
NN = Xall.shape[1]
X = np.zeros((2, NN, u.shape[1]), 'float64')
for j in range(2):
Xall[j] -= Xall[j].mean(axis=-1)[:, np.newaxis]
X[j] = Xall[j] @ self.v
utu = np.sum(u**2, axis=1)
ikeep = np.argmax(utu)
#ikeep = int(NN/2)
#ikeep = np.random.randint(0, NN)
ccu = u @ u[ikeep,:]
cmax = np.maximum(0, ccu)**2/utu
ikeep = np.argsort(cmax)[::-1]
ikeep = ikeep[:int(NN/10)]
ikeep = np.sort(ikeep)
if self.init == 'pca':
U = svdecon(u[ikeep,:], k=2)[0]
#U = u[ikeep, :2]
usort = U * np.sign(skew(U, axis=0))
init_sort = np.argsort(usort[:, :self.n_components], axis=0)
elif self.init == 'random':
init_sort = np.random.permutation(len(ikeep))[:,np.newaxis]
for j in range(1,self.n_components):
init_sort = np.concatenate((init_sort, np.random.permutation(len(ikeep))[:,np.newaxis]), axis=-1)
else:
init_sort = self.init
if self.n_components==1 and init_sort.ndim==1:
init_sort = init_sort[:,np.newaxis]
# now sort in X
isort1, iclustup = self._map(u.copy(), self.n_components, self.n_X, init_sort, ikeep, s)
self.isort = isort1
self.embedding = iclustup
return self | def fit(self, X=None, u=None, s = None) | Fit X into an embedded space.
Inputs
----------
X : array, shape (n_samples, n_features)
u,s,v : svd decomposition of X (optional)
Assigns
----------
embedding : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
u,sv,v : singular value decomposition of data S, potentially with smoothing
isort1 : sorting along first dimension of matrix
isort2 : sorting along second dimension of matrix (if n_Y > 0)
cmap: correlation of each item with all locations in the embedding map (before upsampling)
A: PC coefficients of each Fourier mode | 4.29995 | 4.099957 | 1.048779 |
flip = False
choose = False
zoom = False
replot = False
items = self.win.scene().items(event.scenePos())
posx = 0
posy = 0
iplot = 0
if self.loaded:
# print(event.modifiers() == QtCore.Qt.ControlModifier)
for x in items:
if x == self.p0:
if self.embedded:
iplot = 0
vb = self.p0.vb
pos = vb.mapSceneToView(event.scenePos())
x = pos.x()
y = pos.y()
if event.double():
self.zoom_plot(iplot)
elif event.button() == 2:
# do nothing
nothing = True
elif event.modifiers() == QtCore.Qt.ShiftModifier:
if not self.startROI:
self.startROI = True
self.endROI = False
self.posROI[0,:] = [x,y]
else:
# plotting
self.startROI = True
self.endROI = False
self.posROI[1,:] = [x,y]
#print(self.)
self.posAll.append(self.posROI[:2,:].copy())
pos = self.posAll[-1]
self.lp.append(pg.PlotDataItem(pos[:, 0], pos[:, 1]))
self.posROI[0,:] = [x,y]
self.p0.addItem(self.lp[-1])
self.p0.show()
elif self.startROI:
self.posROI[1,:] = [x,y]
self.posAll.append(self.posROI[:2,:].copy())
self.p0.removeItem(self.l0)
pos = self.posAll[-1]
self.lp.append(pg.PlotDataItem(pos[:, 0], pos[:, 1]))
self.p0.addItem(self.lp[-1])
self.p0.show()
self.endROI = True
self.startROI = False
elif self.endROI:
self.posROI[2,:] = [x,y]
self.endROI = False
for lp in self.lp:
self.p0.removeItem(lp)
self.ROI_add(self.posAll, self.prect)
self.posAll = []
self.lp = []
elif event.modifiers() == QtCore.Qt.AltModifier:
self.ROI_remove([x,y])
elif x == self.p1:
iplot = 1
y = self.p1.vb.mapSceneToView(event.scenePos()).y()
ineur = min(self.colormat.shape[0]-1, max(0, int(np.floor(y))))
ineur = ineur + self.yrange[0]
if event.double():
self.zoom_plot(iplot)
elif event.modifiers() == QtCore.Qt.AltModifier:
self.ROI_remove([ineur])
elif x == self.p3:
iplot = 2
y = self.p3.vb.mapSceneToView(event.scenePos()).y()
ineur = min(self.colormat.shape[0]-1, max(0, int(np.floor(y))))
if event.modifiers() == QtCore.Qt.AltModifier:
self.ROI_remove([ineur]) | def plot_clicked(self, event) | left-click chooses a cell, right-click flips cell to other view | 2.641902 | 2.631464 | 1.003967 |
if socks_endpoint is None:
raise ValueError(
"Must provide socks_endpoint as Deferred or IStreamClientEndpoint"
)
if circuit is not None:
factory = _AgentEndpointFactoryForCircuit(reactor, socks_endpoint, circuit)
else:
factory = _AgentEndpointFactoryUsingTor(reactor, socks_endpoint)
return Agent.usingEndpointFactory(reactor, factory, pool=pool) | def tor_agent(reactor, socks_endpoint, circuit=None, pool=None) | This is the low-level method used by
:meth:`txtorcon.Tor.web_agent` and
:meth:`txtorcon.Circuit.web_agent` -- probably you should call one
of those instead.
:returns: a Deferred that fires with an object that implements
:class:`twisted.web.iweb.IAgent` and is thus suitable for passing
to ``treq`` as the ``agent=`` kwarg. Of course can be used
directly; see `using Twisted web cliet
<http://twistedmatrix.com/documents/current/web/howto/client.html>`_.
:param reactor: the reactor to use
:param circuit: If supplied, a particular circuit to use
:param socks_endpoint: Deferred that fires w/
IStreamClientEndpoint (or IStreamClientEndpoint instance)
which points at a SOCKS5 port of our Tor
:param pool: passed on to the Agent (as ``pool=``) | 4.245628 | 3.666223 | 1.158039 |
# :param tls: True (the default) will use Twisted's default options
# with the hostname in the URI -- that is, TLS verification
# similar to a Browser. Otherwise, you can pass whatever Twisted
# returns for `optionsForClientTLS
# <https://twistedmatrix.com/documents/current/api/twisted.internet.ssl.optionsForClientTLS.html>`_
socks_config = str(socks_config) # sadly, all lists are lists-of-strings to Tor :/
if socks_config not in torconfig.SocksPort:
txtorlog.msg("Adding SOCKS port '{}' to Tor".format(socks_config))
torconfig.SocksPort.append(socks_config)
try:
yield torconfig.save()
except Exception as e:
raise RuntimeError(
"Failed to reconfigure Tor with SOCKS port '{}': {}".format(
socks_config, str(e)
)
)
if socks_config.startswith('unix:'):
socks_ep = UNIXClientEndpoint(reactor, socks_config[5:])
else:
if ':' in socks_config:
host, port = socks_config.split(':', 1)
else:
host = '127.0.0.1'
port = int(socks_config)
socks_ep = TCP4ClientEndpoint(reactor, host, port)
returnValue(
Agent.usingEndpointFactory(
reactor,
_AgentEndpointFactoryUsingTor(reactor, socks_ep),
pool=pool,
)
) | def agent_for_socks_port(reactor, torconfig, socks_config, pool=None) | This returns a Deferred that fires with an object that implements
:class:`twisted.web.iweb.IAgent` and is thus suitable for passing
to ``treq`` as the ``agent=`` kwarg. Of course can be used
directly; see `using Twisted web cliet
<http://twistedmatrix.com/documents/current/web/howto/client.html>`_. If
you have a :class:`txtorcon.Tor` instance already, the preferred
API is to call :meth:`txtorcon.Tor.web_agent` on it.
:param torconfig: a :class:`txtorcon.TorConfig` instance.
:param socks_config: anything valid for Tor's ``SocksPort``
option. This is generally just a TCP port (e.g. ``9050``), but
can also be a unix path like so ``unix:/path/to/socket`` (Tor
has restrictions on the ownership/permissions of the directory
containing ``socket``). If the given SOCKS option is not
already available in the underlying Tor instance, it is
re-configured to add the SOCKS option. | 4.327927 | 4.050781 | 1.068418 |
'''
Overrides nevow method; not really safe to just save ctx,
client in self for multiple clients, but nice and simple.
'''
self.ctx = ctx
self.client = client | def goingLive(self, ctx, client) | Overrides nevow method; not really safe to just save ctx,
client in self for multiple clients, but nice and simple. | 28.927393 | 2.597786 | 11.135401 |
if IStreamClientEndpoint.providedBy(connection):
endpoint = connection
elif isinstance(connection, tuple):
if len(connection) == 2:
reactor, socket = connection
if (os.path.exists(socket) and
os.stat(socket).st_mode & (stat.S_IRGRP | stat.S_IRUSR |
stat.S_IROTH)):
endpoint = UNIXClientEndpoint(reactor, socket)
else:
raise ValueError('Can\'t use "%s" as a socket' % (socket, ))
elif len(connection) == 3:
endpoint = TCP4ClientEndpoint(*connection)
else:
raise TypeError('Expected either a (reactor, socket)- or a '
'(reactor, host, port)-tuple for argument '
'"connection", got %s' % (connection, ))
else:
raise TypeError('Expected a (reactor, socket)- or a (reactor, host, '
'port)-tuple or an object implementing IStreamClient'
'Endpoint for argument "connection", got %s' %
(connection, ))
d = endpoint.connect(
TorProtocolFactory(
password_function=password_function
)
)
if build_state:
d.addCallback(build_state
if isinstance(build_state, collections.Callable)
else _build_state)
elif wait_for_proto:
d.addCallback(wait_for_proto
if isinstance(wait_for_proto, collections.Callable)
else _wait_for_proto)
return d | def build_tor_connection(connection, build_state=True, wait_for_proto=True,
password_function=lambda: None) | This is used to build a valid TorState (which has .protocol for
the TorControlProtocol). For example::
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
import txtorcon
def example(state):
print "Fully bootstrapped state:",state
print " with bootstrapped protocol:",state.protocol
d = txtorcon.build_tor_connection(TCP4ClientEndpoint(reactor,
"localhost",
9051))
d.addCallback(example)
reactor.run()
:param password_function:
See :class:`txtorcon.TorControlProtocol`
:param build_state:
If True (the default) a TorState object will be
built as well. If False, just a TorControlProtocol will be
returned via the Deferred.
:return:
a Deferred that fires with a TorControlProtocol or, if you
specified build_state=True, a TorState. In both cases, the
object has finished bootstrapping
(i.e. TorControlProtocol.post_bootstrap or
TorState.post_bootstap has fired, as needed) | 2.439011 | 2.493947 | 0.977972 |
try:
return build_tor_connection((reactor, socket), *args, **kwargs)
except Exception:
return build_tor_connection((reactor, host, port), *args, **kwargs) | def build_local_tor_connection(reactor, host='127.0.0.1', port=9051,
socket='/var/run/tor/control', *args, **kwargs) | This builds a connection to a local Tor, either via 127.0.0.1:9051
or /var/run/tor/control (by default; the latter is tried
first). See also :meth:`build_tor_connection
<txtorcon.torstate.build_tor_connection>` for other key-word
arguments that are accepted here also.
**Note**: new code should use :meth:`txtorcon.connect` instead.
:param host:
An IP address to find Tor at. Corresponds to the
ControlListenAddress torrc option.
:param port:
The port to use with the address when trying to contact
Tor. This corresponds to the ControlPort option in torrc
(default is 9051). | 2.964689 | 3.833253 | 0.773413 |
if len(kw) == 0:
return ''
flags = ''
for (k, v) in kw.items():
if v:
flags += ' ' + str(k)
# note that we want the leading space if there's at least one
# flag.
return flags | def flags_from_dict(kw) | This turns a dict with keys that are flags (e.g. for CLOSECIRCUIT,
CLOSESTREAM) only if the values are true. | 4.253462 | 3.872841 | 1.09828 |
rtn = {}
key = None
value = ''
# FIXME could use some refactoring to reduce code duplication!
for line in lines.split('\n'):
if line.strip() == 'OK':
continue
sp = line.split('=', 1)
found_key = ('=' in line and ' ' not in sp[0])
if found_key and key_hints and sp[0] not in key_hints:
found_key = False
if found_key:
if key:
if key in rtn:
if isinstance(rtn[key], list):
rtn[key].append(unquote(value))
else:
rtn[key] = [rtn[key], unquote(value)]
else:
rtn[key] = unquote(value)
(key, value) = line.split('=', 1)
else:
if key is None:
rtn[line.strip()] = DEFAULT_VALUE
elif multiline_values is False:
rtn[key] = value
rtn[line.strip()] = DEFAULT_VALUE
key = None
value = ''
else:
value = value + '\n' + line
if key:
if key in rtn:
if isinstance(rtn[key], list):
rtn[key].append(unquote(value))
else:
rtn[key] = [rtn[key], unquote(value)]
else:
rtn[key] = unquote(value)
return rtn | def parse_keywords(lines, multiline_values=True, key_hints=None) | Utility method to parse name=value pairs (GETINFO etc). Takes a
string with newline-separated lines and expects at most one = sign
per line. Accumulates multi-line values.
:param multiline_values:
The default is True which allows for multi-line values until a
line with the next = sign on it. So: '''Foo=bar\nBar'''
produces one key, 'Foo', with value 'bar\nBar' -- set to
False, there would be two keys: 'Foo' with value 'bar' and
'Bar' with value DEFAULT_VALUE. | 2.296232 | 2.215404 | 1.036485 |
args = data.split()[1:]
try:
self._relay_attrs['ip_v6'].extend(args)
except KeyError:
self._relay_attrs['ip_v6'] = list(args) | def _router_address(self, data) | only for IPv6 addresses | 5.78865 | 4.819919 | 1.200985 |
listener = IStreamListener(listen)
if listener not in self.listeners:
self.listeners.append(listener) | def listen(self, listen) | Attach an :class:`txtorcon.interface.IStreamListener` to this stream.
See also :meth:`txtorcon.TorState.add_stream_listener` to
listen to all streams.
:param listen: something that knows
:class:`txtorcon.interface.IStreamListener` | 6.056939 | 7.154801 | 0.846556 |
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self.circuit_container.close_stream(self, **kw)
d.addCallback(close_command_is_queued)
return self._closing_deferred | def close(self, **kw) | This asks Tor to close the underlying stream object. See
:meth:`txtorcon.interface.ITorControlProtocol.close_stream`
for details.
Although Tor currently takes no flags, it allows you to; any
keyword arguments are passed through as flags.
NOTE that the callback delivered from this method only
callbacks after the underlying stream is really destroyed
(*not* just when the CLOSESTREAM command has successfully
completed). | 5.710527 | 5.206823 | 1.096739 |
flags = {}
for k in kw.keys():
flags[k] = kw[k]
flags[k.lower()] = flags[k]
return flags | def _create_flags(self, kw) | this clones the kw dict, adding a lower-case version of every key
(duplicated in circuit.py; consider putting in util?) | 3.756928 | 2.49141 | 1.507953 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.