Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
2,300
def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:, 0], gbboxes1[:, 1], gbboxes1[:, 2], gbboxes1[:, 3], gbboxes1[:, 4] x2, y2, a2_, b2_, c2_ = gbboxes2[:, 0], gbboxes2[:, 1], gbboxes2[:, 2], gbboxes2[:, 3], gbboxes2[:, 4] a1, b1, c1 = rotated_form(a1_, b1_, c1_) a2, b2, c2 = rotated_form(a2_, b2_, c2_) t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \ 0.5 * ((c1+c2)*(x2-x1)*(y1-y2)) t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2) t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2) t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps)) B_d = (t1 / t2) + t3 # B_d = t1 + t2 + t3 B_d = paddle.clip(B_d, min=eps, max=100.0) l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps) l_i = paddle.pow(l1, 2.0) l2 = -paddle.log(1.0 - l_i + eps) if mode == 'l1': probiou = l1 if mode == 'l2': probiou = l2 return probiou @serializable @register
pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0 target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0 eps -> threshold to avoid infinite values mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper
58
181
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def probiou_loss(pred, target, eps=1e-3, mode='l1'): gbboxes1 = gbb_form(pred) gbboxes2 = gbb_form(target) x1, y1, a1_, b1_, c1_ = gbboxes1[:, 0], gbboxes1[:, 1], gbboxes1[:, 2], gbboxes1[:, 3], gbboxes1[:, 4] x2, y2, a2_, b2_, c2_ = gbboxes2[:, 0], gbboxes2[:, 1], gbboxes2[:, 2], gbboxes2[:, 3], gbboxes2[:, 4] a1, b1, c1 = rotated_form(a1_, b1_, c1_) a2, b2, c2 = rotated_form(a2_, b2_, c2_) t1 = 0.25 * ((a1 + a2) * (paddle.pow(y1 - y2, 2)) + (b1 + b2) * (paddle.pow(x1 - x2, 2))) + \ 0.5 * ((c1+c2)*(x2-x1)*(y1-y2)) t2 = (a1 + a2) * (b1 + b2) - paddle.pow(c1 + c2, 2) t3_ = (a1 * b1 - c1 * c1) * (a2 * b2 - c2 * c2) t3 = 0.5 * paddle.log(t2 / (4 * paddle.sqrt(F.relu(t3_)) + eps)) B_d = (t1 / t2) + t3 # B_d = t1 + t2 + t3 B_d = paddle.clip(B_d, min=eps, max=100.0) l1 = paddle.sqrt(1.0 - paddle.exp(-B_d) + eps) l_i = paddle.pow(l1, 2.0) l2 = -paddle.log(1.0 - l_i + eps) if mode == 'l1': probiou = l1 if mode == 'l2': probiou = l2 return probiou @serializable @register ``` ###Assistant : pred -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours predicted box ;in case of HBB angle == 0 target -> a matrix [N,5](x,y,w,h,angle - in radians) containing ours target box ;in case of HBB angle == 0 eps -> threshold to avoid infinite values mode -> ('l1' in [0,1] or 'l2' in [0,inf]) metrics according our paper
2,301
def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS()
:returns: bool derived from platform support of disabling the console,
10
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS() ``` ###Assistant : :returns: bool derived from platform support of disabling the console,
2,302
def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg
This function would modify some filed to keep the compatibility of config. For example, it will move some args which will be deprecated to the correct fields.
27
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compat_cfg(cfg): cfg = copy.deepcopy(cfg) cfg = compat_imgs_per_gpu(cfg) cfg = compat_loader_args(cfg) cfg = compat_runner_args(cfg) return cfg ``` ###Assistant : This function would modify some filed to keep the compatibility of config. For example, it will move some args which will be deprecated to the correct fields.
2,303
def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or (isframe(object) and object.f_code.co_name == "<module>")): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1
Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An OSError is raised if the source code cannot be retrieved.
71
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def getsourcelines(object): object = unwrap(object) lines, lnum = findsource(object) if istraceback(object): object = object.tb_frame # for module or frame that corresponds to module, return all source lines if (ismodule(object) or (isframe(object) and object.f_code.co_name == "<module>")): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 ``` ###Assistant : Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An OSError is raised if the source code cannot be retrieved.
2,304
def get_del_batches(self, objs, fields): field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs]
Return the objs in suitably sized batches for the used connection.
11
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_del_batches(self, objs, fields): field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs] ``` ###Assistant : Return the objs in suitably sized batches for the used connection.
2,305
def save(self, global_step): save_path = osp.join(self.directory, f"{global_step:09d}.ckpt") self.checkpoint.save(save_path) self.latest_checkpoint = save_path self.queue.put(True)
Create a new checkpoint. Args: global_step (int): The iteration number which will be used to name the checkpoint.
18
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, global_step): save_path = osp.join(self.directory, f"{global_step:09d}.ckpt") self.checkpoint.save(save_path) self.latest_checkpoint = save_path self.queue.put(True) ``` ###Assistant : Create a new checkpoint. Args: global_step (int): The iteration number which will be used to name the checkpoint.
2,306
def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1))
Try to determine the version from _version.py if present.\n(.*)\r\n(.*)
9
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def versions_from_file(filename): try: with open(filename) as f: contents = f.read() except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) ``` ###Assistant : Try to determine the version from _version.py if present.\n(.*)\r\n(.*)
2,307
def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.payment_document, payment_entry.payment_entry), as_dict=True, )
SELECT SUM(btp.allocated_amount) as allocated_amount, bt.name FROM `tabBank Transaction Payments` as btp LEFT JOIN `tabBank Transaction` bt ON bt.name=btp.parent WHERE btp.payment_document = %s AND btp.payment_entry = %s AND bt.docstatus = 1
30
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_total_allocated_amount(payment_entry): return frappe.db.sql( , (payment_entry.payment_document, payment_entry.payment_entry), as_dict=True, ) ``` ###Assistant : SELECT SUM(btp.allocated_amount) as allocated_amount, bt.name FROM `tabBank Transaction Payments` as btp LEFT JOIN `tabBank Transaction` bt ON bt.name=btp.parent WHERE btp.payment_document = %s AND btp.payment_entry = %s AND bt.docstatus = 1
2,308
def min_weight_matching(G, maxcardinality=False, weight="weight"): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for("multigraph") @not_implemented_for("directed")
Computing a minimum-weight maximal matching of G. Use reciprocal edge weights with the maximum-weight algorithm. A matching is a subset of edges in which no node occurs more than once. The weight of a matching is the sum of the weights of its edges. A maximal matching cannot add more edges and still be a matching. The cardinality of a matching is the number of matched edges. This method replaces the weights with their reciprocal and then runs :func:`max_weight_matching`. Read the documentation of max_weight_matching for more information. Parameters ---------- G : NetworkX graph Undirected graph maxcardinality: bool, optional (default=False) If maxcardinality is True, compute the maximum-cardinality matching with minimum weight among all maximum-cardinality matchings. weight: string, optional (default='weight') Edge data key corresponding to the edge weight. If key not found, uses 1 as weight. Returns ------- matching : set A minimal weight matching of the graph.
146
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min_weight_matching(G, maxcardinality=False, weight="weight"): if len(G.edges) == 0: return max_weight_matching(G, maxcardinality, weight) G_edges = G.edges(data=weight, default=1) min_weight = min(w for _, _, w in G_edges) InvG = nx.Graph() edges = ((u, v, 1 / (1 + w - min_weight)) for u, v, w in G_edges) InvG.add_weighted_edges_from(edges, weight=weight) return max_weight_matching(InvG, maxcardinality, weight) @not_implemented_for("multigraph") @not_implemented_for("directed") ``` ###Assistant : Computing a minimum-weight maximal matching of G. Use reciprocal edge weights with the maximum-weight algorithm. A matching is a subset of edges in which no node occurs more than once. The weight of a matching is the sum of the weights of its edges. A maximal matching cannot add more edges and still be a matching. The cardinality of a matching is the number of matched edges. This method replaces the weights with their reciprocal and then runs :func:`max_weight_matching`. Read the documentation of max_weight_matching for more information. Parameters ---------- G : NetworkX graph Undirected graph maxcardinality: bool, optional (default=False) If maxcardinality is True, compute the maximum-cardinality matching with minimum weight among all maximum-cardinality matchings. weight: string, optional (default='weight') Edge data key corresponding to the edge weight. If key not found, uses 1 as weight. Returns ------- matching : set A minimal weight matching of the graph.
2,309
def site_config_dir(self) -> str: return self._append_app_name_and_version("/Library/Preferences")
:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``
9
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def site_config_dir(self) -> str: return self._append_app_name_and_version("/Library/Preferences") ``` ###Assistant : :return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``
2,310
def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == "eq" or op_name == "ne": return LiteralExpr(op_name == "ne") else: raise TypeError( f"Invalid comparison between {self._dtype} and {other._dtype}" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == "ne"), cmp, res_dtype )
Build a comparison expression. Parameters ---------- other : BaseExpr A value to compare with. op_name : str The comparison operation name. Returns ------- BaseExpr The resulting comparison expression.
28
99
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cmp_op(self, other, op_name): lhs_dtype_class = self._get_dtype_cmp_class(self._dtype) rhs_dtype_class = self._get_dtype_cmp_class(other._dtype) res_dtype = get_dtype(bool) # In HDK comparison with NULL always results in NULL, # but in pandas it is True for 'ne' comparison and False # for others. # Also pandas allows 'eq' and 'ne' comparison for values # of incompatible types which doesn't work in HDK. if lhs_dtype_class != rhs_dtype_class: if op_name == "eq" or op_name == "ne": return LiteralExpr(op_name == "ne") else: raise TypeError( f"Invalid comparison between {self._dtype} and {other._dtype}" ) else: cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype) return build_if_then_else( self.is_null(), LiteralExpr(op_name == "ne"), cmp, res_dtype ) ``` ###Assistant : Build a comparison expression. Parameters ---------- other : BaseExpr A value to compare with. op_name : str The comparison operation name. Returns ------- BaseExpr The resulting comparison expression.
2,311
def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba("none") ): self._crosses.get_facecolor()[index] = colors.to_rgba("k") else: self._crosses.get_facecolor()[index] = colors.to_rgba("none") if hasattr(self, "_rectangles"): for i, p in enumerate(self._rectangles): p.set_facecolor("k" if colors.same_color( p.get_facecolor(), colors.to_rgba("none")) else "none") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text())
Toggle (activate or deactivate) a check button by index. Callbacks will be triggered if :attr:`eventson` is True. Parameters ---------- index : int Index of the check button to toggle. Raises ------ ValueError If *index* is invalid.
36
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba("none") ): self._crosses.get_facecolor()[index] = colors.to_rgba("k") else: self._crosses.get_facecolor()[index] = colors.to_rgba("none") if hasattr(self, "_rectangles"): for i, p in enumerate(self._rectangles): p.set_facecolor("k" if colors.same_color( p.get_facecolor(), colors.to_rgba("none")) else "none") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text()) ``` ###Assistant : Toggle (activate or deactivate) a check button by index. Callbacks will be triggered if :attr:`eventson` is True. Parameters ---------- index : int Index of the check button to toggle. Raises ------ ValueError If *index* is invalid.
2,312
async def async_update(self) -> None: await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 )
Get the latest data from EBox and update the state.
10
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self) -> None: await self.ebox_data.async_update() if self.entity_description.key in self.ebox_data.data: self._attr_native_value = round( self.ebox_data.data[self.entity_description.key], 2 ) ``` ###Assistant : Get the latest data from EBox and update the state.
2,313
def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs
Evaluation for panoptic segmentation. Args: dataset (Dataset): A PyTorch dataset. results (list): Panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. Returns: tuple: A tuple contains good samples and bad samples. good_pqs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_pqs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them.
78
102
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def panoptic_evaluate(self, dataset, results, topk=20): # image to annotations gt_json = dataset.coco.img_ann_map result_files, tmp_dir = dataset.format_results(results) pred_json = mmcv.load(result_files['panoptic'])['annotations'] pred_folder = osp.join(tmp_dir.name, 'panoptic') gt_folder = dataset.seg_prefix pqs = {} prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): data_info = dataset.prepare_train_img(i) image_id = data_info['img_info']['id'] gt_ann = { 'image_id': image_id, 'segments_info': gt_json[image_id], 'file_name': data_info['img_info']['segm_file'] } pred_ann = pred_json[i] pq_stat = pq_compute_single_core( i, [(gt_ann, pred_ann)], gt_folder, pred_folder, dataset.categories, dataset.file_client, print_log=False) pq_results, classwise_results = pq_stat.pq_average( dataset.categories, isthing=None) pqs[i] = pq_results['pq'] prog_bar.update() if tmp_dir is not None: tmp_dir.cleanup() # descending select topk image pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) good_pqs = pqs[-topk:] bad_pqs = pqs[:topk] return good_pqs, bad_pqs ``` ###Assistant : Evaluation for panoptic segmentation. Args: dataset (Dataset): A PyTorch dataset. results (list): Panoptic segmentation results from test results pkl file. topk (int): Number of the highest topk and lowest topk after evaluation index sorting. Default: 20. Returns: tuple: A tuple contains good samples and bad samples. good_pqs (dict[int, float]): A dict contains good samples's indices in dataset and model's performance on them. bad_pqs (dict[int, float]): A dict contains bad samples's indices in dataset and model's performance on them.
2,314
def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi)
Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014.
126
113
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _compute_mi_cd(c, d, n_neighbors): n_samples = c.shape[0] c = c.reshape((-1, 1)) radius = np.empty(n_samples) label_counts = np.empty(n_samples) k_all = np.empty(n_samples) nn = NearestNeighbors() for label in np.unique(d): mask = d == label count = np.sum(mask) if count > 1: k = min(n_neighbors, count - 1) nn.set_params(n_neighbors=k) nn.fit(c[mask]) r = nn.kneighbors()[0] radius[mask] = np.nextafter(r[:, -1], 0) k_all[mask] = k label_counts[mask] = count # Ignore points with unique labels. mask = label_counts > 1 n_samples = np.sum(mask) label_counts = label_counts[mask] k_all = k_all[mask] c = c[mask] radius = radius[mask] kd = KDTree(c) m_all = kd.query_radius(c, radius, count_only=True, return_distance=False) m_all = np.array(m_all) mi = ( digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all)) ) return max(0, mi) ``` ###Assistant : Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information. If it turned out to be negative it is replace by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross "Mutual Information between Discrete and Continuous Data Sets". PLoS ONE 9(2), 2014.
2,315
def handle_app_config(self, app_config, **options): raise NotImplementedError( "Subclasses of AppCommand must provide a handle_app_config() method." )
Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line.
19
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def handle_app_config(self, app_config, **options): raise NotImplementedError( "Subclasses of AppCommand must provide a handle_app_config() method." ) ``` ###Assistant : Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line.
2,316
def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( "max_features", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], )
Check max_features_ and output shape for integer max_features.
8
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_inferred_max_features_integer(max_features): clf = RandomForestClassifier(n_estimators=5, random_state=0) transformer = SelectFromModel( estimator=clf, max_features=max_features, threshold=-np.inf ) X_trans = transformer.fit_transform(data, y) assert transformer.max_features_ == max_features assert X_trans.shape[1] == transformer.max_features_ @pytest.mark.parametrize( "max_features", [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)], ) ``` ###Assistant : Check max_features_ and output shape for integer max_features.
2,317
def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold)
Produces the correct results when all pairs given as a generator.
11
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_naive_all_pairs_lowest_common_ancestor3(self): all_pairs = product(self.DG.nodes(), self.DG.nodes()) ans = naive_all_pairs_lca(self.DG, pairs=all_pairs) self.assert_lca_dicts_same(dict(ans), self.gold) ``` ###Assistant : Produces the correct results when all pairs given as a generator.
2,318
def _resolve_dependency(dependency): if dependency[0] != "__setting__": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(".") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
Return the resolved dependency and a boolean denoting whether or not it was swappable.
14
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _resolve_dependency(dependency): if dependency[0] != "__setting__": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(".") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True ``` ###Assistant : Return the resolved dependency and a boolean denoting whether or not it was swappable.
2,319
def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for("undirected") @not_implemented_for("multigraph")
Compute the lowest common ancestor of the given pair of nodes. Parameters ---------- G : NetworkX directed graph node1, node2 : nodes in the graph. default : object Returned if no common ancestor between `node1` and `node2` Returns ------- The lowest common ancestor of node1 and node2, or default if they have no common ancestors. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)]) >>> nx.lowest_common_ancestor(G, 3, 5) 2 We can also set `default` argument as below. The value of default is returned if there are no common ancestors of given two nodes. >>> G = nx.DiGraph([(4, 5), (12, 13)]) >>> nx.lowest_common_ancestor(G, 12, 5, default="No common ancestors!") 'No common ancestors!' Notes ----- Only defined on non-null directed acyclic graphs. Takes n log(n) time in the size of the graph. See `all_pairs_lowest_common_ancestor` when you have more than one pair of nodes of interest. See Also -------- tree_all_pairs_lowest_common_ancestor all_pairs_lowest_common_ancestor
155
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lowest_common_ancestor(G, node1, node2, default=None): ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) if ans: assert len(ans) == 1 return ans[0][1] else: return default @not_implemented_for("undirected") @not_implemented_for("multigraph") ``` ###Assistant : Compute the lowest common ancestor of the given pair of nodes. Parameters ---------- G : NetworkX directed graph node1, node2 : nodes in the graph. default : object Returned if no common ancestor between `node1` and `node2` Returns ------- The lowest common ancestor of node1 and node2, or default if they have no common ancestors. Examples -------- >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)]) >>> nx.lowest_common_ancestor(G, 3, 5) 2 We can also set `default` argument as below. The value of default is returned if there are no common ancestors of given two nodes. >>> G = nx.DiGraph([(4, 5), (12, 13)]) >>> nx.lowest_common_ancestor(G, 12, 5, default="No common ancestors!") 'No common ancestors!' Notes ----- Only defined on non-null directed acyclic graphs. Takes n log(n) time in the size of the graph. See `all_pairs_lowest_common_ancestor` when you have more than one pair of nodes of interest. See Also -------- tree_all_pairs_lowest_common_ancestor all_pairs_lowest_common_ancestor
2,320
def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { "program": entry.program, "fees_collected": entry.paid_amount, "outstanding_amount": entry.outstanding_amount, "grand_total": entry.grand_total, } ) return data
SELECT FeesCollected.program, FeesCollected.paid_amount, FeesCollected.outstanding_amount, FeesCollected.grand_total FROM ( SELECT sum(grand_total) - sum(outstanding_amount) AS paid_amount, program, sum(outstanding_amount) AS outstanding_amount, sum(grand_total) AS grand_total FROM `tabFees` WHERE docstatus = 1 and program IS NOT NULL %s GROUP BY program ) AS FeesCollected ORDER BY FeesCollected.paid_amount DESC
42
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(filters=None): data = [] conditions = get_filter_conditions(filters) fee_details = frappe.db.sql( % (conditions), as_dict=1, ) for entry in fee_details: data.append( { "program": entry.program, "fees_collected": entry.paid_amount, "outstanding_amount": entry.outstanding_amount, "grand_total": entry.grand_total, } ) return data ``` ###Assistant : SELECT FeesCollected.program, FeesCollected.paid_amount, FeesCollected.outstanding_amount, FeesCollected.grand_total FROM ( SELECT sum(grand_total) - sum(outstanding_amount) AS paid_amount, program, sum(outstanding_amount) AS outstanding_amount, sum(grand_total) AS grand_total FROM `tabFees` WHERE docstatus = 1 and program IS NOT NULL %s GROUP BY program ) AS FeesCollected ORDER BY FeesCollected.paid_amount DESC
2,321
def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip("=")
Retrieves dependencies for the given install requirement from the json api. :param ireq: A single InstallRequirement :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
33
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dependencies_from_json(ireq): if ireq.editable or not is_pinned_requirement(ireq): return # It is technically possible to parse extras out of the JSON API's # requirement format, but it is such a chore let's just use the simple API. if ireq.extras: return session = requests.session() atexit.register(session.close) version = str(ireq.req.specifier).lstrip("=") ``` ###Assistant : Retrieves dependencies for the given install requirement from the json api. :param ireq: A single InstallRequirement :type ireq: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
2,322
def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data
Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
268
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def assign(self, **kwargs) -> DataFrame: r data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data ``` ###Assistant : Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
2,323
def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out
Downsample the first feat and upsample last feat in feats.
10
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out ``` ###Assistant : Downsample the first feat and upsample last feat in feats.
2,324
def get_bin_list(filters): conditions = [] if filters.item_code: conditions.append("item_code = '%s' " % filters.item_code) if filters.warehouse: warehouse_details = frappe.db.get_value( "Warehouse", filters.warehouse, ["lft", "rgt"], as_dict=1 ) if warehouse_details: conditions.append( " exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)" % (warehouse_details.lft, warehouse_details.rgt) ) bin_list = frappe.db.sql( .format( conditions=" where " + " and ".join(conditions) if conditions else "" ), as_dict=1, ) return bin_list
select item_code, warehouse, actual_qty, planned_qty, indented_qty, ordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty from tabBin bin {conditions} order by item_code, warehouse
19
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_bin_list(filters): conditions = [] if filters.item_code: conditions.append("item_code = '%s' " % filters.item_code) if filters.warehouse: warehouse_details = frappe.db.get_value( "Warehouse", filters.warehouse, ["lft", "rgt"], as_dict=1 ) if warehouse_details: conditions.append( " exists (select name from `tabWarehouse` wh \ where wh.lft >= %s and wh.rgt <= %s and bin.warehouse = wh.name)" % (warehouse_details.lft, warehouse_details.rgt) ) bin_list = frappe.db.sql( .format( conditions=" where " + " and ".join(conditions) if conditions else "" ), as_dict=1, ) return bin_list ``` ###Assistant : select item_code, warehouse, actual_qty, planned_qty, indented_qty, ordered_qty, reserved_qty, reserved_qty_for_production, reserved_qty_for_sub_contract, projected_qty from tabBin bin {conditions} order by item_code, warehouse
2,325
def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You cannot set `run_eagerly=False`." ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( "When using `Model` with `ParameterServerStrategy`, " "`run_eagerly` is not supported." ) # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return ( self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None) )
Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly.
72
113
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def run_eagerly(self): if ( self.dynamic and self._run_eagerly is False ): # pylint:disable=g-bool-id-comparison # TODO(fchollet): consider using py_func to enable this. raise ValueError( "Your model contains layers that can only be " "successfully run in eager execution (layers " "constructed with `dynamic=True`). " "You cannot set `run_eagerly=False`." ) if self._cluster_coordinator and self._run_eagerly: raise ValueError( "When using `Model` with `ParameterServerStrategy`, " "`run_eagerly` is not supported." ) # Run eagerly logic, by priority: # (1) Dynamic models must be run eagerly. # (2) Explicitly setting run_eagerly causes a Model to be run eagerly. # (3) Not explicitly setting run_eagerly defaults to TF's global setting. return ( self.dynamic or self._run_eagerly or (tf.config.functions_run_eagerly() and self._run_eagerly is None) ) ``` ###Assistant : Settable attribute indicating whether the model should run eagerly. Running eagerly means that your model will be run step by step, like Python code. Your model might run slower, but it should become easier for you to debug it by stepping into individual layer calls. By default, we will attempt to compile your model to a static graph to deliver the best execution performance. Returns: Boolean, whether the model should run eagerly.
2,326
def get_preview_context(self, request, *args, **kwargs): return {"object": self, "request": request}
Returns a context dictionary for use in templates for previewing this object.
12
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_preview_context(self, request, *args, **kwargs): return {"object": self, "request": request} ``` ###Assistant : Returns a context dictionary for use in templates for previewing this object.
2,327
async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1]["webhook_id"] webhook_url = f"/api/webhook/{webhook_id}" reg_resp = await webhook_client.post( webhook_url, json={ "type": "register_sensor", "data": { "name": "Battery State", "type": "sensor", "unique_id": "battery_state", "default_disabled": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {"success": True} await hass.async_block_till_done() entity = hass.states.get("sensor.test_1_battery_state") assert entity is None assert ( er.async_get(hass).async_get("sensor.test_1_battery_state").disabled_by == er.RegistryEntryDisabler.INTEGRATION )
Test that sensors can be disabled by default upon registration.
10
61
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1]["webhook_id"] webhook_url = f"/api/webhook/{webhook_id}" reg_resp = await webhook_client.post( webhook_url, json={ "type": "register_sensor", "data": { "name": "Battery State", "type": "sensor", "unique_id": "battery_state", "default_disabled": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {"success": True} await hass.async_block_till_done() entity = hass.states.get("sensor.test_1_battery_state") assert entity is None assert ( er.async_get(hass).async_get("sensor.test_1_battery_state").disabled_by == er.RegistryEntryDisabler.INTEGRATION ) ``` ###Assistant : Test that sensors can be disabled by default upon registration.
2,328
def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info["schema"] index = dataset_info["index"] categories = dataset_info["categories"] partition_obj = dataset_info["partitions"] partitions = dataset_info["partition_names"] physical_column_names = dataset_info.get("physical_schema", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata["columns"]: if (col["pandas_type"] == "categorical") and ( col["name"] not in categories ): categories.append(col["name"]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info["partitions"] = None dataset_info["partition_keys"] = {} dataset_info["partition_names"] = partitions elif len(_partitions) != len(partitions): raise ValueError( "No partition-columns should be written in the \n" "file unless they are ALL written in the file.\n" "physical columns: {} | partitions: {}".format( physical_column_names, partitions ) ) column_names, index_names = _normalize_index_columns( columns, column_names + partitions, index, index_names ) all_columns = index_names + column_names # Check that categories are included in columns if categories and not set(categories).intersection(all_columns): raise ValueError( "categories not in available columns.\n" "categories: {} | columns: {}".format(categories, list(all_columns)) ) dtypes = _get_pyarrow_dtypes(schema, categories) dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()} index_cols = index or () meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names) if categories: # Make sure all categories are set to "unknown". # Cannot include index names in the `cols` argument. meta = clear_known_categories( meta, cols=[c for c in categories if c not in meta.index.names] ) if partition_obj: for partition in partition_obj: if isinstance(index, list) and partition.name == index[0]: # Index from directory structure meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=index[0] ) elif partition.name == meta.index.name: # Index created from a categorical column meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=meta.index.name ) elif partition.name in meta.columns: meta[partition.name] = pd.Series( pd.Categorical(categories=partition.keys, values=[]), index=meta.index, ) # Update `dataset_info` and return `meta` dataset_info["index"] = index dataset_info["index_cols"] = index_cols dataset_info["categories"] = categories return meta
Use parquet schema and hive-partition information (stored in dataset_info) to construct DataFrame metadata. This method is used by both arrow engines.
21
379
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_dd_meta(cls, dataset_info): # Collect necessary information from dataset_info schema = dataset_info["schema"] index = dataset_info["index"] categories = dataset_info["categories"] partition_obj = dataset_info["partitions"] partitions = dataset_info["partition_names"] physical_column_names = dataset_info.get("physical_schema", schema).names columns = None # Set index and column names using # pandas metadata (when available) pandas_metadata = _get_pandas_metadata(schema) if pandas_metadata: ( index_names, column_names, storage_name_mapping, column_index_names, ) = _parse_pandas_metadata(pandas_metadata) if categories is None: categories = [] for col in pandas_metadata["columns"]: if (col["pandas_type"] == "categorical") and ( col["name"] not in categories ): categories.append(col["name"]) else: # No pandas metadata implies no index, unless selected by the user index_names = [] column_names = physical_column_names storage_name_mapping = {k: k for k in column_names} column_index_names = [None] if index is None and index_names: # Pandas metadata has provided the index name for us index = index_names # Ensure that there is no overlap between partition columns # and explicit column storage if partitions: _partitions = [p for p in partitions if p not in physical_column_names] if not _partitions: partitions = [] dataset_info["partitions"] = None dataset_info["partition_keys"] = {} dataset_info["partition_names"] = partitions elif len(_partitions) != len(partitions): raise ValueError( "No partition-columns should be written in the \n" "file unless they are ALL written in the file.\n" "physical columns: {} | partitions: {}".format( physical_column_names, partitions ) ) column_names, index_names = _normalize_index_columns( columns, column_names + partitions, index, index_names ) all_columns = index_names + column_names # Check that categories are included in columns if categories and not set(categories).intersection(all_columns): raise ValueError( "categories not in available columns.\n" "categories: {} | columns: {}".format(categories, list(all_columns)) ) dtypes = _get_pyarrow_dtypes(schema, categories) dtypes = {storage_name_mapping.get(k, k): v for k, v in dtypes.items()} index_cols = index or () meta = _meta_from_dtypes(all_columns, dtypes, index_cols, column_index_names) if categories: # Make sure all categories are set to "unknown". # Cannot include index names in the `cols` argument. meta = clear_known_categories( meta, cols=[c for c in categories if c not in meta.index.names] ) if partition_obj: for partition in partition_obj: if isinstance(index, list) and partition.name == index[0]: # Index from directory structure meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=index[0] ) elif partition.name == meta.index.name: # Index created from a categorical column meta.index = pd.CategoricalIndex( [], categories=partition.keys, name=meta.index.name ) elif partition.name in meta.columns: meta[partition.name] = pd.Series( pd.Categorical(categories=partition.keys, values=[]), index=meta.index, ) # Update `dataset_info` and return `meta` dataset_info["index"] = index dataset_info["index_cols"] = index_cols dataset_info["categories"] = categories return meta ``` ###Assistant : Use parquet schema and hive-partition information (stored in dataset_info) to construct DataFrame metadata. This method is used by both arrow engines.
2,329
def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(value, datetime.date): return value.isoformat() if isinstance(value, datetime.time): formatted_time = time.strptime(str(value), "%H:%M:%S") time_delta = datetime.timedelta( hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec ) return str(time_delta) if stringify_dict and isinstance(value, dict): return json.dumps(value) if isinstance(value, Decimal): return float(value) return value
Takes a value from Postgres, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. Timezone aware Datetime are converted to UTC seconds. Unaware Datetime, Date and Time are converted to ISO formatted strings. Decimals are converted to floats. :param value: Postgres column value. :param schema_type: BigQuery data type. :param stringify_dict: Specify whether to convert dict to string.
60
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_type(self, value, schema_type, stringify_dict=True): if isinstance(value, datetime.datetime): iso_format_value = value.isoformat() if value.tzinfo is None: return iso_format_value return pendulum.parse(iso_format_value).float_timestamp if isinstance(value, datetime.date): return value.isoformat() if isinstance(value, datetime.time): formatted_time = time.strptime(str(value), "%H:%M:%S") time_delta = datetime.timedelta( hours=formatted_time.tm_hour, minutes=formatted_time.tm_min, seconds=formatted_time.tm_sec ) return str(time_delta) if stringify_dict and isinstance(value, dict): return json.dumps(value) if isinstance(value, Decimal): return float(value) return value ``` ###Assistant : Takes a value from Postgres, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. Timezone aware Datetime are converted to UTC seconds. Unaware Datetime, Date and Time are converted to ISO formatted strings. Decimals are converted to floats. :param value: Postgres column value. :param schema_type: BigQuery data type. :param stringify_dict: Specify whether to convert dict to string.
2,330
def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2 ) return x * top_k_mask + NEG_INF * (1 - top_k_mask)
Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x.
59
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _filter_top_k(x, k): _, top_k_idx = tf.math.top_k(x, k, sorted=False) top_k_mask = tf.reduce_sum( tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2 ) return x * top_k_mask + NEG_INF * (1 - top_k_mask) ``` ###Assistant : Filters top-k values in the last dim of x and set the rest to NEG_INF. Used for computing top-k prediction values in dense labels (which has the same shape as predictions) for recall and precision top-k metrics. Args: x: tensor with any dimensions. k: the number of values to keep. Returns: tensor with same shape and dtype as x.
2,331
def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None
Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible
12
102
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None ``` ###Assistant : Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible
2,332
def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[str]] res = {} for iface in six.itervalues(conf.ifaces): if v6: res[iface] = iface.ips[6] else: res[iface] = iface.ips[4] return res
Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback. :param v6: IPv6 addresses
23
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ips(v6=False): # type: (bool) -> Dict[NetworkInterface, List[str]] res = {} for iface in six.itervalues(conf.ifaces): if v6: res[iface] = iface.ips[6] else: res[iface] = iface.ips[4] return res ``` ###Assistant : Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback. :param v6: IPv6 addresses
2,333
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_GENERATION_EXAMPLE = r OPT_INPUTS_DOCSTRING = r
Generation example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = OPTForCausalLM.from_pretrained("ArthurZ/opt-350m") >>> tokenizer = GPT2Tokenizer.from_pretrained("patrickvonplaten/opt_gpt2_tokenizer") >>> TEXTS_TO_GENERATE = "Hey, are you consciours? Can you talk to me?" "Hi there, my name is Barack" >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'I'm not conscious.<\s>' ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
470
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (OPTDecoder)): module.gradient_checkpointing = value OPT_GENERATION_EXAMPLE = r OPT_INPUTS_DOCSTRING = r ``` ###Assistant : Generation example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = OPTForCausalLM.from_pretrained("ArthurZ/opt-350m") >>> tokenizer = GPT2Tokenizer.from_pretrained("patrickvonplaten/opt_gpt2_tokenizer") >>> TEXTS_TO_GENERATE = "Hey, are you consciours? Can you talk to me?" "Hi there, my name is Barack" >>> inputs = tokenizer([TEXTS_TO_GENERATE], max_length=1024, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'I'm not conscious.<\s>' ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_inputs`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
2,334
def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ]
Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to`
15
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ] ``` ###Assistant : Move internal buffers of the ExponentialMovingAverage to `device`. Args: device: like `device` argument to `torch.Tensor.to`
2,335
def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger
Calculate granger tests Parameters ---------- dependent_series: Series The series you want to test Granger Causality for. independent_series: Series The series that you want to test whether it Granger-causes time_series_y lags : int The amount of lags for the Granger test. By default, this is set to 3.
47
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_granger_causality(dependent_series, independent_series, lags): granger_set = pd.concat([dependent_series, independent_series], axis=1) granger = grangercausalitytests(granger_set, [lags], verbose=False) return granger ``` ###Assistant : Calculate granger tests Parameters ---------- dependent_series: Series The series you want to test Granger Causality for. independent_series: Series The series that you want to test whether it Granger-causes time_series_y lags : int The amount of lags for the Granger test. By default, this is set to 3.
2,336
def _multi_decorate(decorators, method): if hasattr(decorators, "__iter__"): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators]
Decorate `method` with one or more function decorators. `decorators` can be a single decorator or an iterable of decorators.
19
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _multi_decorate(decorators, method): if hasattr(decorators, "__iter__"): # Apply a list/tuple of decorators if 'decorators' is one. Decorator # functions are applied so that the call order is the same as the # order in which they appear in the iterable. decorators = decorators[::-1] else: decorators = [decorators] ``` ###Assistant : Decorate `method` with one or more function decorators. `decorators` can be a single decorator or an iterable of decorators.
2,337
def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps
Generate ground truth target on each level. Args: img_size (list[int]): Shape of input image. text_polys (list[list[ndarray]]): A list of ground truth polygons. ignore_polys (list[list[ndarray]]): A list of ignored polygons. Returns: level_maps (list(ndarray)): A list of ground target on each level.
40
191
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps ``` ###Assistant : Generate ground truth target on each level. Args: img_size (list[int]): Shape of input image. text_polys (list[list[ndarray]]): A list of ground truth polygons. ignore_polys (list[list[ndarray]]): A list of ignored polygons. Returns: level_maps (list(ndarray)): A list of ground target on each level.
2,338
def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(reserved_qty[0][0]) if reserved_qty else 0
select sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty)) from ( (select qty as dnpi_qty, ( select qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and (delivered_by_supplier is null or delivered_by_supplier = 0) ) as so_item_qty, ( select delivered_qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and delivered_by_supplier = 0 ) as so_item_delivered_qty, parent, name from ( select qty, parent_detail_docname, parent, name from `tabPacked Item` dnpi_in where item_code = %s and warehouse = %s and parenttype="Sales Order" and item_code != parent_item and exists (select * from `tabSales Order` so where name = dnpi_in.parent and docstatus = 1 and status != 'Closed') ) dnpi) union (select stock_qty as dnpi_qty, qty as so_item_qty, delivered_qty as so_item_delivered_qty, parent, name from `tabSales Order Item` so_item where item_code = %s and warehouse = %s and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0) and exists(select * from `tabSales Order` so where so.name = so_item.parent and so.docstatus = 1 and so.status != 'Closed')) ) tab where so_item_qty >= so_item_delivered_qty
163
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_reserved_qty(item_code, warehouse): reserved_qty = frappe.db.sql( , (item_code, warehouse, item_code, warehouse), ) return flt(reserved_qty[0][0]) if reserved_qty else 0 ``` ###Assistant : select sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty)) from ( (select qty as dnpi_qty, ( select qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and (delivered_by_supplier is null or delivered_by_supplier = 0) ) as so_item_qty, ( select delivered_qty from `tabSales Order Item` where name = dnpi.parent_detail_docname and delivered_by_supplier = 0 ) as so_item_delivered_qty, parent, name from ( select qty, parent_detail_docname, parent, name from `tabPacked Item` dnpi_in where item_code = %s and warehouse = %s and parenttype="Sales Order" and item_code != parent_item and exists (select * from `tabSales Order` so where name = dnpi_in.parent and docstatus = 1 and status != 'Closed') ) dnpi) union (select stock_qty as dnpi_qty, qty as so_item_qty, delivered_qty as so_item_delivered_qty, parent, name from `tabSales Order Item` so_item where item_code = %s and warehouse = %s and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0) and exists(select * from `tabSales Order` so where so.name = so_item.parent and so.docstatus = 1 and so.status != 'Closed')) ) tab where so_item_qty >= so_item_delivered_qty
2,339
def cool(): set_cmap('cool') # Autogenerated by boilerplate.py. Do not edit as changes will be lost.
Set the colormap to 'cool'. This changes the default colormap as well as the colormap of the current image if there is one. See ``help(colormaps)`` for more information.
28
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cool(): set_cmap('cool') # Autogenerated by boilerplate.py. Do not edit as changes will be lost. ``` ###Assistant : Set the colormap to 'cool'. This changes the default colormap as well as the colormap of the current image if there is one. See ``help(colormaps)`` for more information.
2,340
def add_parent(self, parent): # type: (Packet) -> None self.parent = parent
Set packet parent. When packet is an element in PacketListField, parent field would point to the list owner packet.
19
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_parent(self, parent): # type: (Packet) -> None self.parent = parent ``` ###Assistant : Set packet parent. When packet is an element in PacketListField, parent field would point to the list owner packet.
2,341
def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name)
Create an empty, named, temporary file for the duration of the context.
12
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name) ``` ###Assistant : Create an empty, named, temporary file for the duration of the context.
2,342
def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response
On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response.
42
120
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response ``` ###Assistant : On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response.
2,343
def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss # --------------------------- MULTICLASS LOSSES ---------------------------
Binary Cross entropy loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) ignore: void class id
33
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_xloss(logits, labels, ignore=None): logits, labels = flatten_binary_scores(logits, labels, ignore) loss = StableBCELoss()(logits, Variable(labels.float())) return loss # --------------------------- MULTICLASS LOSSES --------------------------- ``` ###Assistant : Binary Cross entropy loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) ignore: void class id
2,344
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { "appCodeName": config["app_code_name"], "appName": config["app_name"], "appVersion": config["app_version"], "platform": config["platform"], "userAgent": config["user_agent"], "oscpu": config["oscpu"], "product": config["product"], "productSub": config["product_sub"], "vendor": config["vendor"], "vendorSub": config["vendor_sub"], "buildID": config["build_id"], }
Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
95
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): config = generate_navigator( os=os, navigator=navigator, platform=platform, device_type=device_type ) return { "appCodeName": config["app_code_name"], "appName": config["app_name"], "appVersion": config["app_version"], "platform": config["platform"], "userAgent": config["user_agent"], "oscpu": config["oscpu"], "product": config["product"], "productSub": config["product_sub"], "vendor": config["vendor"], "vendorSub": config["vendor_sub"], "buildID": config["build_id"], } ``` ###Assistant : Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
2,345
def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?")
Returns text encoded in a way suitable for print or `tf.logging`.
11
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def printable_text(text): # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") ``` ###Assistant : Returns text encoded in a way suitable for print or `tf.logging`.
2,346
def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long))
not trade signal is : Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short
25
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_tradesignal(self, action): # trade signal return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or (action == Actions.Short.value and self._position == Positions.Short) or (action == Actions.Long.value and self._position == Positions.Long)) ``` ###Assistant : not trade signal is : Action: Neutral, position: Neutral -> Nothing Action: Long, position: Long -> Hold Long Action: Short, position: Short -> Hold Short
2,347
def log_message(self, format, *args): sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message.
66
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def log_message(self, format, *args): sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args)) ``` ###Assistant : Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client ip and current date/time are prefixed to every message.
2,348
def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice
This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed
19
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice ``` ###Assistant : This method is identical to `logger.warninging()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed
2,349
def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f(self[i, j], self[k, j])
In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row row_op col_op
54
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def zip_row_op(self, i, k, f): for j in range(self.cols): self[i, j] = f(self[i, j], self[k, j]) ``` ###Assistant : In-place operation on row ``i`` using two-arg functor whose args are interpreted as ``(self[i, j], self[k, j])``. Examples ======== >>> from sympy import eye >>> M = eye(3) >>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M Matrix([ [1, 0, 0], [2, 1, 0], [0, 0, 1]]) See Also ======== row row_op col_op
2,350
def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys)
Generates the Legendre polynomial `P_n(x)`. Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
31
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def legendre_poly(n, x=None, polys=False): r return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys) ``` ###Assistant : Generates the Legendre polynomial `P_n(x)`. Parameters ========== n : int Degree of the polynomial. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
2,351
def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ "text"]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results["text"] += results[count]["text"] single_results["items"].extend(results[count]["items"]) count += 1 elif len(single_results["text"]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( "The length of input text and raw text is not equal.") for result in concat_results: pred_words = result['items'] pred_words = self._reset_offset(pred_words) result['items'] = pred_words return concat_results
Concat the model output of short texts to the total result of long text.
14
91
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _concat_short_text_reuslts(self, input_texts, results): long_text_lens = [len(text) for text in input_texts] concat_results = [] single_results = {} count = 0 for text in input_texts: text_len = len(text) while True: if len(single_results) == 0 or len(single_results[ "text"]) < text_len: if len(single_results) == 0: single_results = copy.deepcopy(results[count]) else: single_results["text"] += results[count]["text"] single_results["items"].extend(results[count]["items"]) count += 1 elif len(single_results["text"]) == text_len: concat_results.append(single_results) single_results = {} break else: raise Exception( "The length of input text and raw text is not equal.") for result in concat_results: pred_words = result['items'] pred_words = self._reset_offset(pred_words) result['items'] = pred_words return concat_results ``` ###Assistant : Concat the model output of short texts to the total result of long text.
2,352
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y ]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) # bboxes: [N, 10], label, score, bbox pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image pred_bbox = pred_bbox.reshape([-1, 8]) scaled_bbox = pred_bbox / scale_factor_list origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result
Rescale, clip and filter the bbox from the output of NMS to get final prediction. Args: bboxes(Tensor): bboxes [N, 10] bbox_num(Tensor): bbox_num im_shape(Tensor): [1 2] scale_factor(Tensor): [1 2] Returns: bbox_pred(Tensor): The output is the prediction with shape [N, 8] including labels, scores and bboxes. The size of bboxes are corresponding to the original image.
54
191
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): origin_shape = paddle.floor(im_shape / scale_factor + 0.5) origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([ scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x, scale_y ]) expand_scale = paddle.expand(scale, [bbox_num[i], 8]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) # bboxes: [N, 10], label, score, bbox pred_label_score = bboxes[:, 0:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image pred_bbox = pred_bbox.reshape([-1, 8]) scaled_bbox = pred_bbox / scale_factor_list origin_h = origin_shape_list[:, 0] origin_w = origin_shape_list[:, 1] bboxes = scaled_bbox zeros = paddle.zeros_like(origin_h) x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros) y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros) x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros) y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros) x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros) y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros) x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros) y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1) pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1) return pred_result ``` ###Assistant : Rescale, clip and filter the bbox from the output of NMS to get final prediction. Args: bboxes(Tensor): bboxes [N, 10] bbox_num(Tensor): bbox_num im_shape(Tensor): [1 2] scale_factor(Tensor): [1 2] Returns: bbox_pred(Tensor): The output is the prediction with shape [N, 8] including labels, scores and bboxes. The size of bboxes are corresponding to the original image.
2,353
def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: "" for d in linked} frappe.db.set_value("Company", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ "Account", "Party Account", "Mode of Payment Account", "Tax Withholding Account", "Sales Taxes and Charges Template", "Purchase Taxes and Charges Template", ]: frappe.db.sql( .format(doctype) % (company) # nosec )
select fieldname from tabDocField where fieldtype="Link" and options="Account" and parent="Company"delete from `tab{0}` where `company`="%s"
14
65
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unset_existing_data(company): linked = frappe.db.sql( , as_dict=True, ) # remove accounts data from company update_values = {d.fieldname: "" for d in linked} frappe.db.set_value("Company", company, update_values, update_values) # remove accounts data from various doctypes for doctype in [ "Account", "Party Account", "Mode of Payment Account", "Tax Withholding Account", "Sales Taxes and Charges Template", "Purchase Taxes and Charges Template", ]: frappe.db.sql( .format(doctype) % (company) # nosec ) ``` ###Assistant : select fieldname from tabDocField where fieldtype="Link" and options="Account" and parent="Company"delete from `tab{0}` where `company`="%s"
2,354
def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list
Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2).
91
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def simple_test(self, feats, batch_img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, batch_img_metas=batch_img_metas, rescale=rescale) return results_list ``` ###Assistant : Test function without test-time augmentation. Args: feats (tuple[torch.Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2).
2,355
def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys()) tf_out_attrs = set(tf_outputs.keys()) if pt_out_attrs != tf_out_attrs: raise ValueError( f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:" f" {tf_out_attrs})" ) # 2. For each output attribute, ALL values must be the same
Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed difference and its source.
20
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compare_pt_tf_models(pt_model, pt_input, tf_model, tf_input): pt_outputs = pt_model(**pt_input, output_hidden_states=True) tf_outputs = tf_model(**tf_input, output_hidden_states=True) # 1. All output attributes must be the same pt_out_attrs = set(pt_outputs.keys()) tf_out_attrs = set(tf_outputs.keys()) if pt_out_attrs != tf_out_attrs: raise ValueError( f"The model outputs have different attributes, aborting. (Pytorch: {pt_out_attrs}, TensorFlow:" f" {tf_out_attrs})" ) # 2. For each output attribute, ALL values must be the same ``` ###Assistant : Compares the TensorFlow and PyTorch models, given their inputs, returning a tuple with the maximum observed difference and its source.
2,356
def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, "values"): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else () param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f"{parser_id}-{precision}") return {"params": params, "ids": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()["params"], ids=_get_all_parser_float_precision_combinations()["ids"], )
Return all allowable parser and float precision combinations and corresponding ids.
11
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_all_parser_float_precision_combinations(): params = [] ids = [] for parser, parser_id in zip(_all_parsers, _all_parser_ids): if hasattr(parser, "values"): # Wrapped in pytest.param, get the actual parser back parser = parser.values[0] for precision in parser.float_precision_choices: # Re-wrap in pytest.param for pyarrow mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else () param = pytest.param((parser(), precision), marks=mark) params.append(param) ids.append(f"{parser_id}-{precision}") return {"params": params, "ids": ids} @pytest.fixture( params=_get_all_parser_float_precision_combinations()["params"], ids=_get_all_parser_float_precision_combinations()["ids"], ) ``` ###Assistant : Return all allowable parser and float precision combinations and corresponding ids.
2,357
def test_egg3(self): egg_name = "%s/omelet.egg" % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=["omelet.app_with_models"]): models_module = apps.get_app_config("app_with_models").models_module self.assertIsNotNone(models_module) del apps.all_models["app_with_models"]
Models module can be loaded from an app located under an egg's top-level package
14
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_egg3(self): egg_name = "%s/omelet.egg" % self.egg_dir with extend_sys_path(egg_name): with self.settings(INSTALLED_APPS=["omelet.app_with_models"]): models_module = apps.get_app_config("app_with_models").models_module self.assertIsNotNone(models_module) del apps.all_models["app_with_models"] ``` ###Assistant : Models module can be loaded from an app located under an egg's top-level package
2,358
def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, "machine") assert result == setting @pytest.mark.parametrize("setting", ["Enabled", "Disabled"])
Helper function to set an audit setting and assert that it was successful
13
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_auditing_case_names(lgpo, setting_name, setting, enable_legacy_auditing): lgpo.set_computer_policy(setting_name, setting) result = lgpo.get_policy(setting_name, "machine") assert result == setting @pytest.mark.parametrize("setting", ["Enabled", "Disabled"]) ``` ###Assistant : Helper function to set an audit setting and assert that it was successful
2,359
def test_get(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response["Content-Type"], "image/png")
Test a valid GET request to the view
8
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response["Content-Type"], "image/png") ``` ###Assistant : Test a valid GET request to the view
2,360
def _object2proto(self) -> Slice_PB: slice_pb = Slice_PB() if self.start: slice_pb.start = self.start slice_pb.has_start = True if self.stop: slice_pb.stop = self.stop slice_pb.has_stop = True if self.step: slice_pb.step = self.step slice_pb.has_step = True slice_pb.id.CopyFrom(serialize(obj=self._id)) return slice_pb
Serialize the Slice object instance returning a protobuf. Returns: Slice_PB: returns a protobuf object class representing this Slice object.
19
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _object2proto(self) -> Slice_PB: slice_pb = Slice_PB() if self.start: slice_pb.start = self.start slice_pb.has_start = True if self.stop: slice_pb.stop = self.stop slice_pb.has_stop = True if self.step: slice_pb.step = self.step slice_pb.has_step = True slice_pb.id.CopyFrom(serialize(obj=self._id)) return slice_pb ``` ###Assistant : Serialize the Slice object instance returning a protobuf. Returns: Slice_PB: returns a protobuf object class representing this Slice object.
2,361
def __call__(self) -> List[Tuple[int, int]]: logger.info("Sorting face distances. Depending on your dataset this may take some time...") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_predictions, self._num_predictions + self._num_predictions - 2) return result_order
Process the linkages. Transforms a distance matrix into a sorted distance matrix according to the order implied by the hierarchical tree (dendrogram). Returns ------- list: List of indices with the order implied by the hierarchical tree or list of tuples of (`index`, `bin`) if a binning threshold was provided
49
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self) -> List[Tuple[int, int]]: logger.info("Sorting face distances. Depending on your dataset this may take some time...") if self._threshold: self._threshold = self._result_linkage[:, 2].max() * self._threshold result_order = self._seriation(self._result_linkage, self._num_predictions, self._num_predictions + self._num_predictions - 2) return result_order ``` ###Assistant : Process the linkages. Transforms a distance matrix into a sorted distance matrix according to the order implied by the hierarchical tree (dendrogram). Returns ------- list: List of indices with the order implied by the hierarchical tree or list of tuples of (`index`, `bin`) if a binning threshold was provided
2,362
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num
Rescale, clip and filter the bbox from the output of NMS to get final prediction. Notes: Currently only support bs = 1. Args: bboxes (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: pred_result (Tensor): The final prediction results with shape [N, 6] including labels, scores and bboxes.
90
292
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num ``` ###Assistant : Rescale, clip and filter the bbox from the output of NMS to get final prediction. Notes: Currently only support bs = 1. Args: bboxes (Tensor): The output bboxes with shape [N, 6] after decode and NMS, including labels, scores and bboxes. bbox_num (Tensor): The number of prediction boxes of each batch with shape [1], and is N. im_shape (Tensor): The shape of the input image. scale_factor (Tensor): The scale factor of the input image. Returns: pred_result (Tensor): The final prediction results with shape [N, 6] including labels, scores and bboxes.
2,363
def _signal_zone_update(self): async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
Signal a zone update. Whenever the underlying library does an action against a zone, the data for the zone is updated. Update a single zone.
25
4
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _signal_zone_update(self): async_dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}") ``` ###Assistant : Signal a zone update. Whenever the underlying library does an action against a zone, the data for the zone is updated. Update a single zone.
2,364
def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), axis=1, ) intercept = 1.23 coef = np.array([0.5, -2]) # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), # the quantile at level q is: # quantile(q) = - log(1 - q) / lambda # scale = 1/lambda = -quantile(q) / log(1-q) y = rng.exponential( scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples ) model = HistGradientBoostingRegressor( loss="quantile", quantile=quantile, max_iter=25, random_state=0, max_leaf_nodes=10, ).fit(X, y) assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) pinball_loss = PinballLoss(quantile=quantile) loss_true_quantile = pinball_loss(y, X @ coef + intercept) loss_pred_quantile = pinball_loss(y, model.predict(X)) # we are overfitting assert loss_pred_quantile <= loss_true_quantile @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
Test quantile regression for asymmetric distributed targets.
7
133
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_asymmetric_error(quantile): n_samples = 10_000 rng = np.random.RandomState(42) # take care that X @ coef + intercept > 0 X = np.concatenate( ( np.abs(rng.randn(n_samples)[:, None]), -rng.randint(2, size=(n_samples, 1)), ), axis=1, ) intercept = 1.23 coef = np.array([0.5, -2]) # For an exponential distribution with rate lambda, e.g. exp(-lambda * x), # the quantile at level q is: # quantile(q) = - log(1 - q) / lambda # scale = 1/lambda = -quantile(q) / log(1-q) y = rng.exponential( scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples ) model = HistGradientBoostingRegressor( loss="quantile", quantile=quantile, max_iter=25, random_state=0, max_leaf_nodes=10, ).fit(X, y) assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2) pinball_loss = PinballLoss(quantile=quantile) loss_true_quantile = pinball_loss(y, X @ coef + intercept) loss_pred_quantile = pinball_loss(y, model.predict(X)) # we are overfitting assert loss_pred_quantile <= loss_true_quantile @pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])]) ``` ###Assistant : Test quantile regression for asymmetric distributed targets.
2,365
def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure["departure_time"].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name
Get the latest data from GTFS and update the states.
10
259
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> None: with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = self._departure["departure_time"].replace( tzinfo=dt_util.UTC ) # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._agency: self._attr_attribution = self._agency.agency_name else: self._attr_attribution = None if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name ``` ###Assistant : Get the latest data from GTFS and update the states.
2,366
async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == "off" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == "on" assert state.attributes["device_class"] == BinarySensorDeviceClass.CO
Test that we can read the state of a HomeKit contact accessory.
12
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_carbon_monoxide_sensor_read_state(hass, utcnow): helper = await setup_test_component(hass, create_carbon_monoxide_sensor_service) await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 0}, ) state = await helper.poll_and_get_state() assert state.state == "off" await helper.async_update( ServicesTypes.CARBON_MONOXIDE_SENSOR, {CharacteristicsTypes.CARBON_MONOXIDE_DETECTED: 1}, ) state = await helper.poll_and_get_state() assert state.state == "on" assert state.attributes["device_class"] == BinarySensorDeviceClass.CO ``` ###Assistant : Test that we can read the state of a HomeKit contact accessory.
2,367
def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = "cpu" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, "config.pbtxt") with open(config_path, "w") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform="pytorch_libtorch", path=config_path, content_type="text/x-protobuf", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass
Save the Triton config. Return the appropriate artifact.
8
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = "cpu" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, "config.pbtxt") with open(config_path, "w") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform="pytorch_libtorch", path=config_path, content_type="text/x-protobuf", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass ``` ###Assistant : Save the Triton config. Return the appropriate artifact.
2,368
def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body)
Tests that a lookup for a device that does not exist returns either 404 or 200.
16
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body) ``` ###Assistant : Tests that a lookup for a device that does not exist returns either 404 or 200.
2,369
def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch["task_id"].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens
Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute the processing to multiple GPUs. dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from the evalution dataset to the modelm as the following: [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1] where nc is the number of copies of the prompt, and nt is the number of tasks. nc is such that num_sample = nc * batch_size Parameters ---------- accelerator: Accelerator model: transformers.PreTrainedModel Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = "lvwerra/codeparrot" tokenizer: transformers.AutoTokenizer The tokenizer used to train model dataloader: DataLoader The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs. n_tasks: int The number of tasks in the dataset. It is used to determine the length of the output. Should be aligned with the number of tasks in the TokenizeDataset. batch_size: int num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies gen_kwargs: dict Keyword arguments for the generation function of the model. Returns ------- code_gens: list of list of str, of length n_tasks List of generated codes for each task. Each element is a list of generated codes for each task, with length num_samples
207
96
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch["task_id"].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens ``` ###Assistant : Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute the processing to multiple GPUs. dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from the evalution dataset to the modelm as the following: [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1] where nc is the number of copies of the prompt, and nt is the number of tasks. nc is such that num_sample = nc * batch_size Parameters ---------- accelerator: Accelerator model: transformers.PreTrainedModel Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = "lvwerra/codeparrot" tokenizer: transformers.AutoTokenizer The tokenizer used to train model dataloader: DataLoader The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs. n_tasks: int The number of tasks in the dataset. It is used to determine the length of the output. Should be aligned with the number of tasks in the TokenizeDataset. batch_size: int num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies gen_kwargs: dict Keyword arguments for the generation function of the model. Returns ------- code_gens: list of list of str, of length n_tasks List of generated codes for each task. Each element is a list of generated codes for each task, with length num_samples
2,370
def get_status(start_date, end_date): if not end_date: return "Active" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return "Active" if start_date <= now_date <= end_date else "Inactive"
Get a Contract's status based on the start, current and end dates Args: start_date (str): The start date of the contract end_date (str): The end date of the contract Returns: str: 'Active' if within range, otherwise 'Inactive'
37
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_status(start_date, end_date): if not end_date: return "Active" start_date = getdate(start_date) end_date = getdate(end_date) now_date = getdate(nowdate()) return "Active" if start_date <= now_date <= end_date else "Inactive" ``` ###Assistant : Get a Contract's status based on the start, current and end dates Args: start_date (str): The start date of the contract end_date (str): The end date of the contract Returns: str: 'Active' if within range, otherwise 'Inactive'
2,371
def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_position(s) return x, y
calc position Parameters ---------- s : float distance from the start point. if `s` is outside the data point's range, return None. Returns ------- x : float x position for given s. y : float y position for given s.
40
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calc_position(self, s): x = self.sx.calc_position(s) y = self.sy.calc_position(s) return x, y ``` ###Assistant : calc position Parameters ---------- s : float distance from the start point. if `s` is outside the data point's range, return None. Returns ------- x : float x position for given s. y : float y position for given s.
2,372
def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator.
Registers a callback with the standard __exit__ method signature. Can suppress exceptions the same way __exit__ method can. Also accepts any object with an __exit__ method (registering a call to the method instead of the object itself).
37
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator. ``` ###Assistant : Registers a callback with the standard __exit__ method signature. Can suppress exceptions the same way __exit__ method can. Also accepts any object with an __exit__ method (registering a call to the method instead of the object itself).
2,373
async def async_media_play(self) -> None: if self._status["state"] == "pause": await self._client.pause(0) else: await self._client.play()
Service to send the MPD the command for play/pause.
9
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_media_play(self) -> None: if self._status["state"] == "pause": await self._client.pause(0) else: await self._client.play() ``` ###Assistant : Service to send the MPD the command for play/pause.
2,374
def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get("company") fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company) filters["fiscal_year_start"] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value("Company", company, "chart_of_accounts") filters["skr"] = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "") datev_settings = frappe.get_doc("DATEV Settings", company) filters["account_number_length"] = datev_settings.account_number_length filters["temporary_against_account_number"] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = "{} DATEV.zip".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { "file_name": "EXTF_Buchungsstapel.csv", "csv_data": get_datev_csv(transactions, filters, csv_class=Transactions), }, { "file_name": "EXTF_Kontenbeschriftungen.csv", "csv_data": get_datev_csv(account_names, filters, csv_class=AccountNames), }, { "file_name": "EXTF_Kunden.csv", "csv_data": get_datev_csv(customers, filters, csv_class=DebtorsCreditors), }, { "file_name": "EXTF_Lieferanten.csv", "csv_data": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors), }, ], )
Provide accounting entries for download in DATEV format. Validate the filters, get the data, produce the CSV file and provide it for download. Can be called like this: GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv Arguments / Params: filters -- dict of filters to be passed to the sql query
45
109
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def download_datev_csv(filters): if isinstance(filters, str): filters = json.loads(filters) validate(filters) company = filters.get("company") fiscal_year = get_fiscal_year(date=filters.get("from_date"), company=company) filters["fiscal_year_start"] = fiscal_year[1] # set chart of accounts used coa = frappe.get_value("Company", company, "chart_of_accounts") filters["skr"] = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "") datev_settings = frappe.get_doc("DATEV Settings", company) filters["account_number_length"] = datev_settings.account_number_length filters["temporary_against_account_number"] = datev_settings.temporary_against_account_number transactions = get_transactions(filters) account_names = get_account_names(filters) customers = get_customers(filters) suppliers = get_suppliers(filters) zip_name = "{} DATEV.zip".format(frappe.utils.datetime.date.today()) zip_and_download( zip_name, [ { "file_name": "EXTF_Buchungsstapel.csv", "csv_data": get_datev_csv(transactions, filters, csv_class=Transactions), }, { "file_name": "EXTF_Kontenbeschriftungen.csv", "csv_data": get_datev_csv(account_names, filters, csv_class=AccountNames), }, { "file_name": "EXTF_Kunden.csv", "csv_data": get_datev_csv(customers, filters, csv_class=DebtorsCreditors), }, { "file_name": "EXTF_Lieferanten.csv", "csv_data": get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors), }, ], ) ``` ###Assistant : Provide accounting entries for download in DATEV format. Validate the filters, get the data, produce the CSV file and provide it for download. Can be called like this: GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv Arguments / Params: filters -- dict of filters to be passed to the sql query
2,375
def taggedsent_to_conll(sentence): for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] input_str = "\t".join(input_str) + "\n" yield input_str
A module to convert a single POS tagged sentence into CONLL format. >>> from nltk import word_tokenize, pos_tag >>> text = "This is a foobar sentence." >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ :param sentence: A single input sentence to parse :type sentence: list(tuple(str, str)) :rtype: iter(str) :return: a generator yielding a single sentence in CONLL format.
121
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def taggedsent_to_conll(sentence): for (i, (word, tag)) in enumerate(sentence, start=1): input_str = [str(i), word, "_", tag, tag, "_", "0", "a", "_", "_"] input_str = "\t".join(input_str) + "\n" yield input_str ``` ###Assistant : A module to convert a single POS tagged sentence into CONLL format. >>> from nltk import word_tokenize, pos_tag >>> text = "This is a foobar sentence." >>> for line in taggedsent_to_conll(pos_tag(word_tokenize(text))): # doctest: +NORMALIZE_WHITESPACE ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ :param sentence: A single input sentence to parse :type sentence: list(tuple(str, str)) :rtype: iter(str) :return: a generator yielding a single sentence in CONLL format.
2,376
def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or "ascii" codecs.lookup(encoding) except Exception: encoding = "ascii" return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding()
The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846.
27
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_system_encoding(): try: encoding = locale.getdefaultlocale()[1] or "ascii" codecs.lookup(encoding) except Exception: encoding = "ascii" return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding() ``` ###Assistant : The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846.
2,377
def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.get_submenu_items()) return GroupMenuItem(self, self.get_menu_order(), submenu)
Utilised by Wagtail's 'register_menu_item' hook to create a menu for this group with a submenu linking to listing pages for any associated ModelAdmin instances
24
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_menu_item(self): if self.modeladmin_instances: submenu = Menu(items=self.get_submenu_items()) return GroupMenuItem(self, self.get_menu_order(), submenu) ``` ###Assistant : Utilised by Wagtail's 'register_menu_item' hook to create a menu for this group with a submenu linking to listing pages for any associated ModelAdmin instances
2,378
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform( x, y, s, prop, angle, ismath) color = gc.get_rgb() gc.set_linewidth(0.0) self.draw_path(gc, path, transform, rgbFace=color)
Draw the text by converting them to paths using `.TextToPath`. Parameters ---------- x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The text to be converted. prop : `~matplotlib.font_manager.FontProperties` The font property. angle : float Angle in degrees to render the text at. ismath : bool or "TeX" If True, use mathtext parser. If "TeX", use tex for rendering.
78
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath): path, transform = self._get_text_path_transform( x, y, s, prop, angle, ismath) color = gc.get_rgb() gc.set_linewidth(0.0) self.draw_path(gc, path, transform, rgbFace=color) ``` ###Assistant : Draw the text by converting them to paths using `.TextToPath`. Parameters ---------- x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The text to be converted. prop : `~matplotlib.font_manager.FontProperties` The font property. angle : float Angle in degrees to render the text at. ismath : bool or "TeX" If True, use mathtext parser. If "TeX", use tex for rendering.
2,379
def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels
Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored
69
128
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self, match_quality_matrix): assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels ``` ###Assistant : Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored
2,380
def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: # Other implementations
Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.
18
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ismemberdescriptor(object): return isinstance(object, types.MemberDescriptorType) else: # Other implementations ``` ###Assistant : Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.
2,381
def pandas_dtype_to_arrow_c(dtype) -> str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype("O"): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> '<M8[ns]' resolution = re.findall(r"\[(.*)\]", dtype.str)[0][:1] return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") raise NotImplementedError( f"Convertion of {dtype} to Arrow C format string is not implemented." )
Represent pandas `dtype` as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given `dtype`.
37
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pandas_dtype_to_arrow_c(dtype) -> str: if isinstance(dtype, pandas.CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype("O"): return ArrowCTypes.STRING format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if is_datetime64_dtype(dtype): # Selecting the first char of resolution string: # dtype.str -> '<M8[ns]' resolution = re.findall(r"\[(.*)\]", dtype.str)[0][:1] return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="") raise NotImplementedError( f"Convertion of {dtype} to Arrow C format string is not implemented." ) ``` ###Assistant : Represent pandas `dtype` as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given `dtype`.
2,382
def is_file_like(obj) -> bool: if not (hasattr(obj, "read") or hasattr(obj, "write")): return False return bool(hasattr(obj, "__iter__"))
Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> buffer = io.StringIO("data") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False
76
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_file_like(obj) -> bool: if not (hasattr(obj, "read") or hasattr(obj, "write")): return False return bool(hasattr(obj, "__iter__")) ``` ###Assistant : Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> buffer = io.StringIO("data") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False
2,383
def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload)
Public function to update the resource on the remote Airbyte instance. Returns: Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.
18
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self) -> Union[SourceRead, DestinationRead, ConnectionRead]: return self._create_or_update(self._update_fn, self.update_payload) ``` ###Assistant : Public function to update the resource on the remote Airbyte instance. Returns: Union[SourceRead, DestinationRead, ConnectionRead]: The updated resource.
2,384
def test_callback_session(self) -> None: request = Mock(spec=["args", "getCookie", "cookies"]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session session = self._generate_oidc_session_token( state="state", nonce="nonce", client_redirect_url="http://client/redirect", ) request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request")
The callback verifies the session presence and validity
8
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_callback_session(self) -> None: request = Mock(spec=["args", "getCookie", "cookies"]) # Missing cookie request.args = {} request.getCookie.return_value = None self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session session = self._generate_oidc_session_token( state="state", nonce="nonce", client_redirect_url="http://client/redirect", ) request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") ``` ###Assistant : The callback verifies the session presence and validity
2,385
def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == "https" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f"{self.credentials.get_secret_value()}@{url_components.netloc}" ) full_url = urllib.parse.urlunparse(updated_components) else: full_url = self.repository_url return full_url
Format the URL provided to the `git clone` command. For private repos: https://<oauth-key>@github.com/<username>/<repo>.git All other repos should be the same as `self.repository`.
22
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_repo_url(self) -> str: url_components = urllib.parse.urlparse(self.repository_url) if url_components.scheme == "https" and self.credentials is not None: repo_url = url_components.netloc + url_components.path updated_components = url_components._replace( netloc=f"{self.credentials.get_secret_value()}@{url_components.netloc}" ) full_url = urllib.parse.urlunparse(updated_components) else: full_url = self.repository_url return full_url ``` ###Assistant : Format the URL provided to the `git clone` command. For private repos: https://<oauth-key>@github.com/<username>/<repo>.git All other repos should be the same as `self.repository`.
2,386
def test_lookup_with_dynamic_value(self): modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
Ensure SimpleListFilter can access self.value() inside the lookup.
8
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_lookup_with_dynamic_value(self): modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site) ``` ###Assistant : Ensure SimpleListFilter can access self.value() inside the lookup.
2,387
def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6)
When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor
51
144
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_batch_encode_dynamic_overflowing(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): if is_torch_available(): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" elif is_flax_available(): returned_tensor = "jax" else: return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return tokens = tokenizer.encode_plus( "HuggingFace is solving NLP one commit at a time", max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) # Mono sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) # Multi sample tokens = tokenizer.batch_encode_plus( ["HuggingFace is solving NLP one commit at a time", "Very tiny input"], max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) ``` ###Assistant : When calling batch_encode with multiple sequence it can returns different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor
2,388
def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export("keras.optimizers.get")
Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance.
32
106
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import ( loss_scale_optimizer, ) # pylint: disable=g-import-not-at-top all_classes = { "adadelta": adadelta_v2.Adadelta, "adagrad": adagrad_v2.Adagrad, "adam": adam_v2.Adam, "adamax": adamax_v2.Adamax, "experimentaladadelta": adadelta_experimental.Adadelta, "experimentaladagrad": adagrad_experimental.Adagrad, "experimentaladam": adam_experimental.Adam, "experimentalsgd": sgd_experimental.SGD, "nadam": nadam_v2.Nadam, "rmsprop": rmsprop_v2.RMSprop, "sgd": gradient_descent_v2.SGD, "ftrl": ftrl.Ftrl, "lossscaleoptimizer": loss_scale_optimizer.LossScaleOptimizer, "lossscaleoptimizerv3": loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer "lossscaleoptimizerv1": loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config["class_name"].lower() in all_classes: config["class_name"] = config["class_name"].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name="optimizer", ) @keras_export("keras.optimizers.get") ``` ###Assistant : Inverse of the `serialize` function. Args: config: Optimizer configuration dictionary. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras Optimizer instance.
2,389
def commutes_with(self, other): a = self.array_form b = other.array_form return _af_commutes_with(a, b)
Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False
46
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def commutes_with(self, other): a = self.array_form b = other.array_form return _af_commutes_with(a, b) ``` ###Assistant : Checks if the elements are commuting. Examples ======== >>> from sympy.combinatorics import Permutation >>> a = Permutation([1, 4, 3, 0, 2, 5]) >>> b = Permutation([0, 1, 2, 3, 4, 5]) >>> a.commutes_with(b) True >>> b = Permutation([2, 3, 5, 4, 1, 0]) >>> a.commutes_with(b) False
2,390
def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield name
Return a generator for all styles by name, both builtin and plugin.
12
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_styles(): yield from STYLE_MAP for name, _ in find_plugin_styles(): yield name ``` ###Assistant : Return a generator for all styles by name, both builtin and plugin.
2,391
def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid="ignore", divide="ignore"): result = cooccurrence / (diag_rows * diag_cols) return np.array(result)
Helper method to calculate the Lift of a matrix of co-occurrences. In comparison with basic co-occurrence and Jaccard similarity, lift favours discoverability and serendipity, as opposed to co-occurrence that favours the most popular items, and Jaccard that is a compromise between the two. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of Lifts between any two items.
63
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def lift(cooccurrence): diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) with np.errstate(invalid="ignore", divide="ignore"): result = cooccurrence / (diag_rows * diag_cols) return np.array(result) ``` ###Assistant : Helper method to calculate the Lift of a matrix of co-occurrences. In comparison with basic co-occurrence and Jaccard similarity, lift favours discoverability and serendipity, as opposed to co-occurrence that favours the most popular items, and Jaccard that is a compromise between the two. Args: cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. Returns: numpy.ndarray: The matrix of Lifts between any two items.
2,392
def update(self, props): return self._update_props( props, "{cls.__name__!r} object has no property {prop_name!r}")
Update this artist's properties from the dict *props*. Parameters ---------- props : dict
13
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, props): return self._update_props( props, "{cls.__name__!r} object has no property {prop_name!r}") ``` ###Assistant : Update this artist's properties from the dict *props*. Parameters ---------- props : dict
2,393
def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get("CPU", 0), max_remote_workers) if self._use_gpu: new_remote_workers = min(remote_resources.get("GPU", 0), new_remote_workers) return new_remote_workers
Returns number of workers to create based on available resources.
10
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def new_workers_size(self): remote_resources = ray.available_resources() max_remote_workers = self._max_workers new_remote_workers = min(remote_resources.get("CPU", 0), max_remote_workers) if self._use_gpu: new_remote_workers = min(remote_resources.get("GPU", 0), new_remote_workers) return new_remote_workers ``` ###Assistant : Returns number of workers to create based on available resources.
2,394
def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error("Npcap WlanHelper returned with an error code !") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp)
Alias for setmode('monitor') or setmode('managed') Only available with Npcap
9
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setmonitor(self, enable=True): # type: (bool) -> bool # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error("Npcap WlanHelper returned with an error code !") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp) ``` ###Assistant : Alias for setmode('monitor') or setmode('managed') Only available with Npcap
2,395
def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # it is sufficient to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return dict( x_min=max(0, int(w / 2 - wr / 2)), x_max=min(w, int(w / 2 + wr / 2)), y_min=max(0, int(h / 2 - hr / 2)), y_max=min(h, int(h / 2 + hr / 2)), )
Given a rectangle of size wxh that has been rotated by 'angle' (in degrees), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
34
195
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _rotated_rect_with_max_area(h, w, angle): angle = math.radians(angle) width_is_longer = w >= h side_long, side_short = (w, h) if width_is_longer else (h, w) # since the solutions for angle, -angle and 180-angle are all the same, # it is sufficient to look at the first quadrant and the absolute values of sin,cos: sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) if side_short <= 2.0 * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10: # half constrained case: two crop corners touch the longer side, # the other two corners are on the mid-line parallel to the longer line x = 0.5 * side_short wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a) else: # fully constrained case: crop touches all 4 sides cos_2a = cos_a * cos_a - sin_a * sin_a wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a return dict( x_min=max(0, int(w / 2 - wr / 2)), x_max=min(w, int(w / 2 + wr / 2)), y_min=max(0, int(h / 2 - hr / 2)), y_max=min(h, int(h / 2 + hr / 2)), ) ``` ###Assistant : Given a rectangle of size wxh that has been rotated by 'angle' (in degrees), computes the width and height of the largest possible axis-aligned rectangle (maximal area) within the rotated rectangle. Code from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
2,396
def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this:
Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
70
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def using(_other, **kwargs): gt_kwargs = {} if 'state' in kwargs: s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s) if _other is this: ``` ###Assistant : Callback that processes the match with a different lexer. The keyword arguments are forwarded to the lexer, except `state` which is handled separately. `state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state. Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
2,397
def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs["document_ids"], [self.doc1.id])
GIVEN: - 4 documents without defined storage path - 1 document with a defined storage WHEN: - Bulk edit called to remove storage path from 1 document THEN: - Single document storage path removed
34
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_unset_document_storage_path(self): self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) bulk_edit.set_storage_path( [self.doc1.id], self.sp1.id, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 4) bulk_edit.set_storage_path( [self.doc1.id], None, ) self.assertEqual(Document.objects.filter(storage_path=None).count(), 5) self.async_task.assert_called() args, kwargs = self.async_task.call_args self.assertCountEqual(kwargs["document_ids"], [self.doc1.id]) ``` ###Assistant : GIVEN: - 4 documents without defined storage path - 1 document with a defined storage WHEN: - Bulk edit called to remove storage path from 1 document THEN: - Single document storage path removed
2,398
def test_with_spinner(self): function_with_spinner() self.assertFalse(self.forward_msg_queue.is_empty())
If the show_spinner flag is set, there should be one element in the report queue.
15
4
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_spinner(self): function_with_spinner() self.assertFalse(self.forward_msg_queue.is_empty()) ``` ###Assistant : If the show_spinner flag is set, there should be one element in the report queue.
2,399
def test_update_device_too_long_display_name(self) -> None: # Set iniital display name. update = {"display_name": "new display"} self.get_success( self.handler.update_device( self.other_user, self.other_user_device_id, update ) ) # Request to update a device display name with a new value that is longer than allowed. update = { "display_name": "a" * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) } channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, content=update, ) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"])
Update a device with a display name that is invalid (too long).
12
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_device_too_long_display_name(self) -> None: # Set iniital display name. update = {"display_name": "new display"} self.get_success( self.handler.update_device( self.other_user, self.other_user_device_id, update ) ) # Request to update a device display name with a new value that is longer than allowed. update = { "display_name": "a" * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) } channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, content=update, ) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new display", channel.json_body["display_name"]) ``` ###Assistant : Update a device with a display name that is invalid (too long).