Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
1,800
def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info("Active pair whitelist is empty.") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) logger.debug('Ignoring %s in pair whitelist', trade.pair) if not whitelist: logger.info("No currency pair in active pair whitelist, " "but checking to exit open trades.") return trades_created if PairLocks.is_global_lock(side='*'): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. lock = PairLocks.get_pair_longest_lock('*') if lock: self.log_once(f"Global pairlock active until " f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. " f"Not creating new trades, reason: {lock.reason}.", logger.info) else: self.log_once("Global pairlock active. Not creating new trades.", logger.info) return trades_created # Create entity and execute trade for each pair from whitelist for pair in whitelist: try: trades_created += self.create_trade(pair) except DependencyException as exception: logger.warning('Unable to create trade for %s: %s', pair, exception) if not trades_created: logger.debug("Found no enter signals for whitelisted currencies. Trying again...") return trades_created
Tries to execute entry orders for new trades (positions)
9
170
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enter_positions(self) -> int: trades_created = 0 whitelist = copy.deepcopy(self.active_pair_whitelist) if not whitelist: logger.info("Active pair whitelist is empty.") return trades_created # Remove pairs for currently opened trades from the whitelist for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) logger.debug('Ignoring %s in pair whitelist', trade.pair) if not whitelist: logger.info("No currency pair in active pair whitelist, " "but checking to exit open trades.") return trades_created if PairLocks.is_global_lock(side='*'): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. lock = PairLocks.get_pair_longest_lock('*') if lock: self.log_once(f"Global pairlock active until " f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. " f"Not creating new trades, reason: {lock.reason}.", logger.info) else: self.log_once("Global pairlock active. Not creating new trades.", logger.info) return trades_created # Create entity and execute trade for each pair from whitelist for pair in whitelist: try: trades_created += self.create_trade(pair) except DependencyException as exception: logger.warning('Unable to create trade for %s: %s', pair, exception) if not trades_created: logger.debug("Found no enter signals for whitelisted currencies. Trying again...") return trades_created ``` ###Assistant : Tries to execute entry orders for new trades (positions)
1,801
def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f )
Error loading dependency. "{package_name}" is not a registered library. Registered libraries are: {list(registered_paths.keys())} "{package_name}" is registered but the path requested is not valid. The path requested: "{path_in_package_dist}" List of registered paths: {registered_paths}
32
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( f ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( f ) ``` ###Assistant : Error loading dependency. "{package_name}" is not a registered library. Registered libraries are: {list(registered_paths.keys())} "{package_name}" is registered but the path requested is not valid. The path requested: "{path_in_package_dist}" List of registered paths: {registered_paths}
1,802
def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry["NodeID"]): # print(node) if node["Alive"]: node_ids.append((node["NodeID"], node["NodeName"])) return node_ids
Get IDs for all live nodes in the cluster. Returns a list of (node_id: str, ip_address: str). The node_id can be passed into the Ray SchedulingPolicy API.
27
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_node_ids() -> List[Tuple[str, str]]: node_ids = [] # Sort on NodeID to ensure the ordering is deterministic across the cluster. for node in sorted(ray.nodes(), key=lambda entry: entry["NodeID"]): # print(node) if node["Alive"]: node_ids.append((node["NodeID"], node["NodeName"])) return node_ids ``` ###Assistant : Get IDs for all live nodes in the cluster. Returns a list of (node_id: str, ip_address: str). The node_id can be passed into the Ray SchedulingPolicy API.
1,803
def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b"#") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:comment_start] comment_spans = True break return block, comment_spans
Deletes comments from block. If comment does not end in this block, raises a flag.
15
70
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _ignore_comments(self, block): comment_spans = False while True: comment_start = block.find(b"#") # look for next comment if comment_start == -1: # no comment found break comment_end = self._find_comment_end(block, comment_start) if comment_end != -1: # comment ends in this block block = ( block[:comment_start] + block[comment_end + 1 :] ) # delete comment else: # last comment continues to next block(s) block = block[:comment_start] comment_spans = True break return block, comment_spans ``` ###Assistant : Deletes comments from block. If comment does not end in this block, raises a flag.
1,804
def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
Tests if GPU tensors are auto converted to CPU on driver.
11
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ["CUDA_VISIBLE_DEVICES"] == "" ``` ###Assistant : Tests if GPU tensors are auto converted to CPU on driver.
1,805
def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % self.cssclass_year) a('\n') a('<tr><th colspan="%d" class="%s">%s</th></tr>' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v)
Return a formatted year as a table of tables.
9
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def formatyear(self, theyear, width=3): v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="%s">' % self.cssclass_year) a('\n') a('<tr><th colspan="%d" class="%s">%s</th></tr>' % ( width, self.cssclass_year_head, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v) ``` ###Assistant : Return a formatted year as a table of tables.
1,806
def predict(self, inputs): training = False user = inputs["user"] input_seq = inputs["input_seq"] candidate = inputs["candidate"] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings += positional_embeddings # (b, s, h1 + h2) seq_embeddings *= mask seq_attention = seq_embeddings seq_attention = self.encoder(seq_attention, training, mask) seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) seq_emb = tf.reshape( seq_attention, [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], ) # (b*s1, h1+h2) candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) candidate_emb = tf.reshape( tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] ) # (b*s2, h1+h2) candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) test_logits = tf.reshape( test_logits, [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], ) # (1, s, 101) test_logits = test_logits[:, -1, :] # (1, 101) return test_logits
Model prediction for candidate (negative) items
6
198
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def predict(self, inputs): training = False user = inputs["user"] input_seq = inputs["input_seq"] candidate = inputs["candidate"] mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) u0_latent = self.user_embedding_layer(user) u0_latent = u0_latent * (self.user_embedding_dim ** 0.5) # (1, 1, h) u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) u_latent = self.user_embedding_layer(user) u_latent = u_latent * (self.user_embedding_dim ** 0.5) # (b, 1, h) u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) seq_embeddings = tf.reshape( tf.concat([seq_embeddings, u_latent], 2), [tf.shape(input_seq)[0], -1, self.hidden_units], ) seq_embeddings += positional_embeddings # (b, s, h1 + h2) seq_embeddings *= mask seq_attention = seq_embeddings seq_attention = self.encoder(seq_attention, training, mask) seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) seq_emb = tf.reshape( seq_attention, [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], ) # (b*s1, h1+h2) candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) candidate_emb = tf.reshape( tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] ) # (b*s2, h1+h2) candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) test_logits = tf.reshape( test_logits, [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], ) # (1, s, 101) test_logits = test_logits[:, -1, :] # (1, 101) return test_logits ``` ###Assistant : Model prediction for candidate (negative) items
1,807
def tobitmap(self, name="image"): self.load() if self.mode != "1": msg = "not a bitmap" raise ValueError(msg) data = self.tobytes("xbm") return b"".join( [ f"#define {name}_width {self.size[0]}\n".encode("ascii"), f"#define {name}_height {self.size[1]}\n".encode("ascii"), f"static char {name}_bits[] = {{\n".encode("ascii"), data, b"};", ] )
Returns the image converted to an X11 bitmap. .. note:: This method only works for mode "1" images. :param name: The name prefix to use for the bitmap variables. :returns: A string containing an X11 bitmap. :raises ValueError: If the mode is not "1"
44
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tobitmap(self, name="image"): self.load() if self.mode != "1": msg = "not a bitmap" raise ValueError(msg) data = self.tobytes("xbm") return b"".join( [ f"#define {name}_width {self.size[0]}\n".encode("ascii"), f"#define {name}_height {self.size[1]}\n".encode("ascii"), f"static char {name}_bits[] = {{\n".encode("ascii"), data, b"};", ] ) ``` ###Assistant : Returns the image converted to an X11 bitmap. .. note:: This method only works for mode "1" images. :param name: The name prefix to use for the bitmap variables. :returns: A string containing an X11 bitmap. :raises ValueError: If the mode is not "1"
1,808
def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") if u not in self._succ: if u is None: raise ValueError("None cannot be a node") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError("None cannot be a node") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict
Add all the edges in ebunch_to_add. Parameters ---------- ebunch_to_add : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v) or 3-tuples (u, v, d) where d is a dictionary containing edge data. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Edge attributes specified in an ebunch take precedence over attributes specified via keyword arguments. When adding edges from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_edges)`, and pass this object to `G.add_edges_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples >>> e = zip(range(0, 3), range(1, 4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) >>> # Grow graph by one new node, adding edges to all existing nodes. >>> # wrong way - will raise RuntimeError >>> # G.add_edges_from(((5, n) for n in G.nodes)) >>> # right way - note that there will be no self-edge for node 5 >>> G.add_edges_from(list((5, n) for n in G.nodes))
305
102
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") if u not in self._succ: if u is None: raise ValueError("None cannot be a node") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError("None cannot be a node") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict ``` ###Assistant : Add all the edges in ebunch_to_add. Parameters ---------- ebunch_to_add : container of edges Each edge given in the container will be added to the graph. The edges must be given as 2-tuples (u, v) or 3-tuples (u, v, d) where d is a dictionary containing edge data. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Edge attributes specified in an ebunch take precedence over attributes specified via keyword arguments. When adding edges from an iterator over the graph you are changing, a `RuntimeError` can be raised with message: `RuntimeError: dictionary changed size during iteration`. This happens when the graph's underlying dictionary is modified during iteration. To avoid this error, evaluate the iterator into a separate object, e.g. by using `list(iterator_of_edges)`, and pass this object to `G.add_edges_from`. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples >>> e = zip(range(0, 3), range(1, 4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") Evaluate an iterator over a graph if using it to modify the same graph >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) >>> # Grow graph by one new node, adding edges to all existing nodes. >>> # wrong way - will raise RuntimeError >>> # G.add_edges_from(((5, n) for n in G.nodes)) >>> # right way - note that there will be no self-edge for node 5 >>> G.add_edges_from(list((5, n) for n in G.nodes))
1,809
def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log("debug", "Setting PlaidML devices from user_settings") else: self._select_largest_gpu()
If the plaidml user configuration settings exist, then set the default GPU from the settings file, Otherwise set the GPU to be the one with most VRAM.
27
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _select_device(self) -> None: if os.path.exists(plaidml.settings.user_settings): # pylint:disable=no-member self._log("debug", "Setting PlaidML devices from user_settings") else: self._select_largest_gpu() ``` ###Assistant : If the plaidml user configuration settings exist, then set the default GPU from the settings file, Otherwise set the GPU to be the one with most VRAM.
1,810
def test_readlink_non_canonical(file, source): intermediate = source.parent / "intermediate.lnk" intermediate.symlink_to(source) target = source.parent / "symlink.lnk" target.symlink_to(intermediate) try: result = file.readlink(path=target) assert result == str(intermediate) finally: intermediate.unlink() target.unlink()
Test readlink where there are nested symlinks and canonicalize=False Should resolve to the first symlink
15
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readlink_non_canonical(file, source): intermediate = source.parent / "intermediate.lnk" intermediate.symlink_to(source) target = source.parent / "symlink.lnk" target.symlink_to(intermediate) try: result = file.readlink(path=target) assert result == str(intermediate) finally: intermediate.unlink() target.unlink() ``` ###Assistant : Test readlink where there are nested symlinks and canonicalize=False Should resolve to the first symlink
1,811
def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, "all://" + urlparts.hostname, "all", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy
Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
29
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def select_proxy(url, proxies): proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get("all")) proxy_keys = [ urlparts.scheme + "://" + urlparts.hostname, urlparts.scheme, "all://" + urlparts.hostname, "all", ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy ``` ###Assistant : Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
1,812
def eof_received(self): try: if self._loop.get_debug(): logger.debug("%r received EOF", self) self._wakeup_waiter(ConnectionResetError) if not self._in_handshake: keep_open = self._app_protocol.eof_received() if keep_open: logger.warning('returning true from eof_received() ' 'has no effect when using ssl') finally: self._transport.close()
Called when the other end of the low-level stream is half-closed. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol.
38
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eof_received(self): try: if self._loop.get_debug(): logger.debug("%r received EOF", self) self._wakeup_waiter(ConnectionResetError) if not self._in_handshake: keep_open = self._app_protocol.eof_received() if keep_open: logger.warning('returning true from eof_received() ' 'has no effect when using ssl') finally: self._transport.close() ``` ###Assistant : Called when the other end of the low-level stream is half-closed. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol.
1,813
def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, }
Returns a URL that points to a page that should be processed
12
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs): if ie is not None: kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key() if video_id is not None: kwargs['id'] = video_id if video_title is not None: kwargs['title'] = video_title return { **kwargs, '_type': 'url_transparent' if url_transparent else 'url', 'url': url, } ``` ###Assistant : Returns a URL that points to a page that should be processed
1,814
def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid="ignore"): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade) if mask.any(): # convert dot product to allowed shading fractions in_norm = mcolors.Normalize(-1, 1) out_norm = mcolors.Normalize(0.3, 1).inverse
Shade *color* using normal vectors given by *normals*, assuming a *lightsource* (using default position if not given). *color* can also be an array of the same length as *normals*.
29
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _shade_colors(color, normals, lightsource=None): if lightsource is None: # chosen for backwards-compatibility lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712) with np.errstate(invalid="ignore"): shade = ((normals / np.linalg.norm(normals, axis=1, keepdims=True)) @ lightsource.direction) mask = ~np.isnan(shade) if mask.any(): # convert dot product to allowed shading fractions in_norm = mcolors.Normalize(-1, 1) out_norm = mcolors.Normalize(0.3, 1).inverse ``` ###Assistant : Shade *color* using normal vectors given by *normals*, assuming a *lightsource* (using default position if not given). *color* can also be an array of the same length as *normals*.
1,815
def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params)
Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **fit_params : dict Parameters to pass to the `score` method of the underlying estimator. .. versionadded:: 1.0 Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`.
72
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params) ``` ###Assistant : Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **fit_params : dict Parameters to pass to the `score` method of the underlying estimator. .. versionadded:: 1.0 Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`.
1,816
def reset(self, pos): self.value = pos self.velocity = 0 if self.history: val = self.history[-1][1] self.history = [(time(), val)]
(internal) Reset the value and the velocity to the `pos`. Mostly used when the bounds are checked.
17
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reset(self, pos): self.value = pos self.velocity = 0 if self.history: val = self.history[-1][1] self.history = [(time(), val)] ``` ###Assistant : (internal) Reset the value and the velocity to the `pos`. Mostly used when the bounds are checked.
1,817
def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytest.mark.parametrize( "percentage, expected_result", [ (1, 2), (100, 50), (50, 26), ], )
Test that we can convert fan speed to percentage value.
10
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_from_fan_speed(fan_speed, expected_result): assert _from_fan_speed(fan_speed) == expected_result @pytest.mark.parametrize( "percentage, expected_result", [ (1, 2), (100, 50), (50, 26), ], ) ``` ###Assistant : Test that we can convert fan speed to percentage value.
1,818
def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return TaskRunner @pytest.fixture
Context manager that ensures Celery tasks run directly inline where invoked. While this context manager is active any Celery tasks created will run immediately at the callsite rather than being sent to RabbitMQ and handled by a worker.
38
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def task_runner(): from sentry.testutils.helpers.task_runner import TaskRunner return TaskRunner @pytest.fixture ``` ###Assistant : Context manager that ensures Celery tasks run directly inline where invoked. While this context manager is active any Celery tasks created will run immediately at the callsite rather than being sent to RabbitMQ and handled by a worker.
1,819
def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return "".join(secrets.choice(allowed_chars) for i in range(length))
Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits
44
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_random_string(length, allowed_chars=RANDOM_STRING_CHARS): return "".join(secrets.choice(allowed_chars) for i in range(length)) ``` ###Assistant : Return a securely generated random string. The bit length of the returned value can be calculated with the formula: log_2(len(allowed_chars)^length) For example, with default `allowed_chars` (26+26+10), this gives: * length: 12, bit length =~ 71 bits * length: 22, bit length =~ 131 bits
1,820
def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command
CommandReplies can use relative offsets to point to the matching command.
11
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_command_reply(tplaybook): tplaybook >> TEvent() tplaybook << TCommand() tplaybook >> tutils.reply() assert tplaybook assert tplaybook.actual[1] == tplaybook.actual[2].command ``` ###Assistant : CommandReplies can use relative offsets to point to the matching command.
1,821
def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce, } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( "When used with `mirrored`, valid values for all_reduce_alg are " "[`nccl`, `hierarchical_copy`]. Supplied value: {}".format( all_reduce_alg ) ) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs)
Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, "nccl", "hierarchical_copy"].
47
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _mirrored_cross_device_ops(all_reduce_alg, num_packs): if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce, } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( "When used with `mirrored`, valid values for all_reduce_alg are " "[`nccl`, `hierarchical_copy`]. Supplied value: {}".format( all_reduce_alg ) ) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs) ``` ###Assistant : Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, "nccl", "hierarchical_copy"].
1,822
def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) hook.delete_queue(self.queue_name) expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)] mock_sb_admin_client.assert_has_calls(expected_calls)
Test Delete queue functionality by passing queue name, assert the function with values, mock the azure service bus function `delete_queue`
20
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_queue(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) hook.delete_queue(self.queue_name) expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)] mock_sb_admin_client.assert_has_calls(expected_calls) ``` ###Assistant : Test Delete queue functionality by passing queue name, assert the function with values, mock the azure service bus function `delete_queue`
1,823
def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() return c_d # I.e. def walk(self): ... from email.iterators import walk
Return the message's content-disposition if it exists, or None. The return values can be either 'inline', 'attachment' or None according to the rfc2183.
23
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_content_disposition(self): value = self.get('content-disposition') if value is None: return None c_d = _splitparam(value)[0].lower() return c_d # I.e. def walk(self): ... from email.iterators import walk ``` ###Assistant : Return the message's content-disposition if it exists, or None. The return values can be either 'inline', 'attachment' or None according to the rfc2183.
1,824
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value
Get the Aesara variable for a SymPy symbol from the cache, or create it if it does not exist.
19
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_or_create(self, s, name=None, dtype=None, broadcastable=None): # Defaults if name is None: name = s.name if dtype is None: dtype = 'floatX' if broadcastable is None: broadcastable = () key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable) if key in self.cache: return self.cache[key] value = aet.tensor(name=name, dtype=dtype, shape=broadcastable) self.cache[key] = value return value ``` ###Assistant : Get the Aesara variable for a SymPy symbol from the cache, or create it if it does not exist.
1,825
def defer(self, *fields): self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone
Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals.
46
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def defer(self, *fields): self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone ``` ###Assistant : Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals.
1,826
def match_seq(self, nodes, results=None): if len(nodes) != 1: return False return self.match(nodes[0], results)
Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns.
14
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_seq(self, nodes, results=None): if len(nodes) != 1: return False return self.match(nodes[0], results) ``` ###Assistant : Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns.
1,827
def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._team_memberships.keys())
Return the IDs of teams in which the user has actual membership. This represents the set of all teams for which `has_team_membership` returns true. Use that method where possible and use this property only when you need to iterate or query for all such teams. Compare to accessible_team_ids, which is equal to this property in the typical case but represents a superset of IDs in case of superuser access.
69
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def team_ids_with_membership(self) -> FrozenSet[int]: return frozenset(team.id for team in self._team_memberships.keys()) ``` ###Assistant : Return the IDs of teams in which the user has actual membership. This represents the set of all teams for which `has_team_membership` returns true. Use that method where possible and use this property only when you need to iterate or query for all such teams. Compare to accessible_team_ids, which is equal to this property in the typical case but represents a superset of IDs in case of superuser access.
1,828
def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=("tf2", "tf"), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)
Tests the GumbelSoftmax ActionDistribution (tf + eager only).
8
99
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=("tf2", "tf"), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08) ``` ###Assistant : Tests the GumbelSoftmax ActionDistribution (tf + eager only).
1,829
def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param num_classes: the nunber of class :return: the confusion matrix
30
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_confusion_matrix(gt_label, pred_label, num_classes): index = (gt_label * num_classes + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((num_classes, num_classes)) for i_label in range(num_classes): for i_pred_label in range(num_classes): cur_index = i_label * num_classes + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix ``` ###Assistant : Calcute the confusion matrix by given label and pred :param gt_label: the ground truth label :param pred_label: the pred label :param num_classes: the nunber of class :return: the confusion matrix
1,830
def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim())) mask = mask[ind].expand_as(xs).to(xs.device) return mask
Make mask tensor containing indices of padded part. Args: lengths (LongTensor or List): Batch of lengths (B,). xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor. length_dim (int, optional): Dimension indicator of the above tensor. See the example. Returns: Tensor: Mask tensor containing indices of padded part. dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (including 1.2) Examples: With only lengths. >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[0, 0, 0, 0 ,0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 1]] With the reference tensor. >>> xs = torch.zeros((3, 2, 4)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1]], [[0, 0, 1, 1], [0, 0, 1, 1]]], dtype=torch.uint8) >>> xs = torch.zeros((3, 2, 6)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) With the reference tensor and dimension indicator. >>> xs = torch.zeros((3, 6, 6)) >>> make_pad_mask(lengths, xs, 1) tensor([[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) >>> make_pad_mask(lengths, xs, 2) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
417
103
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def make_pad_mask(lengths, xs=None, length_dim=-1): if length_dim == 0: raise ValueError('length_dim cannot be 0: {}'.format(length_dim)) if not isinstance(lengths, list): lengths = lengths.tolist() bs = int(len(lengths)) if xs is None: maxlen = int(max(lengths)) else: maxlen = xs.size(length_dim) seq_range = torch.arange(0, maxlen, dtype=torch.int64) seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) mask = seq_range_expand >= seq_length_expand if xs is not None: assert xs.size(0) == bs, (xs.size(0), bs) if length_dim < 0: length_dim = xs.dim() + length_dim # ind = (:, None, ..., None, :, , None, ..., None) ind = tuple(slice(None) if i in (0, length_dim) else None for i in range(xs.dim())) mask = mask[ind].expand_as(xs).to(xs.device) return mask ``` ###Assistant : Make mask tensor containing indices of padded part. Args: lengths (LongTensor or List): Batch of lengths (B,). xs (Tensor, optional): The reference tensor. If set, masks will be the same shape as this tensor. length_dim (int, optional): Dimension indicator of the above tensor. See the example. Returns: Tensor: Mask tensor containing indices of padded part. dtype=torch.uint8 in PyTorch 1.2- dtype=torch.bool in PyTorch 1.2+ (including 1.2) Examples: With only lengths. >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[0, 0, 0, 0 ,0], [0, 0, 0, 1, 1], [0, 0, 1, 1, 1]] With the reference tensor. >>> xs = torch.zeros((3, 2, 4)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 1], [0, 0, 0, 1]], [[0, 0, 1, 1], [0, 0, 1, 1]]], dtype=torch.uint8) >>> xs = torch.zeros((3, 2, 6)) >>> make_pad_mask(lengths, xs) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) With the reference tensor and dimension indicator. >>> xs = torch.zeros((3, 6, 6)) >>> make_pad_mask(lengths, xs, 1) tensor([[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]], [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) >>> make_pad_mask(lengths, xs, 2) tensor([[[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1]], [[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]], [[0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
1,831
def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({"file": ("somefile", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) data, content_type = encode_multipart_formdata({"file": ("somefile.txt", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) # This test will go through the code path for uploading files onto LOCAL storage # when Zulip is in DEVELOPMENT mode.
Test coverage for files without content-type in the metadata; in which case we try to guess the content-type from the filename.
21
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_guess_content_type_from_filename(self) -> None: data, content_type = encode_multipart_formdata({"file": ("somefile", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) data, content_type = encode_multipart_formdata({"file": ("somefile.txt", b"zulip!", None)}) result = self.api_post( self.example_user("hamlet"), "/api/v1/user_uploads", data, content_type=content_type ) self.assert_json_success(result) # This test will go through the code path for uploading files onto LOCAL storage # when Zulip is in DEVELOPMENT mode. ``` ###Assistant : Test coverage for files without content-type in the metadata; in which case we try to guess the content-type from the filename.
1,832
def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name}64\.so' member = get_one_match(expr, members) if member: return member # since an exact match with .so as suffix was not found # look for a versioned name # If a versioned name is not found, look for AIX legacy member name member = get_version(name, members) if member: return member else: return get_legacy(members)
Return an archive member matching the request in name. Name is the library name without any prefix like lib, suffix like .so, or version number. Given a list of members find and return the most appropriate result Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c and finally, legacy AIX naming scheme.
53
85
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_member(name, members): # look first for a generic match - prepend lib and append .so expr = rf'lib{name}\.so' member = get_one_match(expr, members) if member: return member elif AIX_ABI == 64: expr = rf'lib{name}64\.so' member = get_one_match(expr, members) if member: return member # since an exact match with .so as suffix was not found # look for a versioned name # If a versioned name is not found, look for AIX legacy member name member = get_version(name, members) if member: return member else: return get_legacy(members) ``` ###Assistant : Return an archive member matching the request in name. Name is the library name without any prefix like lib, suffix like .so, or version number. Given a list of members find and return the most appropriate result Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c and finally, legacy AIX naming scheme.
1,833
def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None: raise pytest.skip( )
Adding/removing a user as part of a test is very bad for host os (especially if the user already existed to begin with on the OS), therefore we check if we run inside a the official docker container and only allow to run the test there. This is done by checking /.dockerenv file (always present inside container) and checking for PYTHON_BASE_IMAGE variable.
62
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_original_docker_image(): if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None: raise pytest.skip( ) ``` ###Assistant : Adding/removing a user as part of a test is very bad for host os (especially if the user already existed to begin with on the OS), therefore we check if we run inside a the official docker container and only allow to run the test there. This is done by checking /.dockerenv file (always present inside container) and checking for PYTHON_BASE_IMAGE variable.
1,834
def _useWizardInterface(): if not conf.wizard: return logger.info("starting wizard interface") while not conf.url: message = "Please enter full target URL (-u): " conf.url = readInput(message, default=None) message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). " if not conf.crawlDepth and not conf.forms: warnMsg += "Will search for forms" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Injection difficulty (--level/--risk). Please choose:\n" message += "[1] Normal (default)\n[2] Medium\n[3] Hard" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Enumeration (--banner/--current-user/etc). Please choose:\n" message += "[1] Basic (default)\n[2] Intermediate\n[3] All" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, True) logger.debug("muting sqlmap.. it will do the magic for you") conf.verbose = 0 conf.batch = True conf.threads = 4 dataToStdout("\nsqlmap is running, please wait..\n\n") kb.wizardMode = True
Presents simple wizard interface for beginner users
7
253
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _useWizardInterface(): if not conf.wizard: return logger.info("starting wizard interface") while not conf.url: message = "Please enter full target URL (-u): " conf.url = readInput(message, default=None) message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) conf.data = readInput(message, default=None) if not (any('=' in _ for _ in (conf.url, conf.data)) or '*' in conf.url): warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else None) or HTTPMETHOD.POST) warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). " if not conf.crawlDepth and not conf.forms: warnMsg += "Will search for forms" conf.forms = True logger.warning(warnMsg) choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Injection difficulty (--level/--risk). Please choose:\n" message += "[1] Normal (default)\n[2] Medium\n[3] Hard" choice = readInput(message, default='1') if choice == '2': conf.risk = 2 conf.level = 3 elif choice == '3': conf.risk = 3 conf.level = 5 else: conf.risk = 1 conf.level = 1 if not conf.getAll: choice = None while choice is None or choice not in ("", "1", "2", "3"): message = "Enumeration (--banner/--current-user/etc). Please choose:\n" message += "[1] Basic (default)\n[2] Intermediate\n[3] All" choice = readInput(message, default='1') if choice == '2': options = WIZARD.INTERMEDIATE elif choice == '3': options = WIZARD.ALL else: options = WIZARD.BASIC for _ in options: conf.__setitem__(_, True) logger.debug("muting sqlmap.. it will do the magic for you") conf.verbose = 0 conf.batch = True conf.threads = 4 dataToStdout("\nsqlmap is running, please wait..\n\n") kb.wizardMode = True ``` ###Assistant : Presents simple wizard interface for beginner users
1,835
async def run_migrations_online() -> None: engine = await db_interface.engine() versions_dir = context.get_x_argument(as_dictionary=True).get("versions_dir", None) if versions_dir is None: # if version dir is not explicitly provided determine versions location from dialect dialect = get_dialect(engine=engine) if dialect.name == "postgresql": versions_dir = Path(context.script.dir / "postgresql") elif dialect.name == "sqlite": versions_dir = Path(context.script.dir / "sqlite") else: raise ValueError(f"No versions dir exists for dialect: {dialect.name}") context.script.version_locations = [versions_dir]
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
21
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def run_migrations_online() -> None: engine = await db_interface.engine() versions_dir = context.get_x_argument(as_dictionary=True).get("versions_dir", None) if versions_dir is None: # if version dir is not explicitly provided determine versions location from dialect dialect = get_dialect(engine=engine) if dialect.name == "postgresql": versions_dir = Path(context.script.dir / "postgresql") elif dialect.name == "sqlite": versions_dir = Path(context.script.dir / "sqlite") else: raise ValueError(f"No versions dir exists for dialect: {dialect.name}") context.script.version_locations = [versions_dir] ``` ###Assistant : Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
1,836
def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {"parent": filters.get("parent"), "start": start, "page_len": page_len, "txt": "%%%s%%" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
select mode_of_payment from `tabPayment Order Reference` where parent = %(parent)s and mode_of_payment like %(txt)s limit %(start)s, %(page_len)s
17
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {"parent": filters.get("parent"), "start": start, "page_len": page_len, "txt": "%%%s%%" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs ``` ###Assistant : select mode_of_payment from `tabPayment Order Reference` where parent = %(parent)s and mode_of_payment like %(txt)s limit %(start)s, %(page_len)s
1,837
def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip("Documentation sources not available") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_plot_commands()) missing = plot_commands.difference(doc_functions) if missing: raise AssertionError( f"The following pyplot functions are not listed in the " f"documentation. Please add them to doc/api/pyplot_summary.rst: " f"{missing!r}") extra = doc_functions.difference(plot_commands) if extra: raise AssertionError( f"The following functions are listed in the pyplot documentation, " f"but they do not exist in pyplot. " f"Please remove them from doc/api/pyplot_summary.rst: {extra!r}")
Test that pyplot_summary lists all the plot functions.
8
88
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_doc_pyplot_summary(): pyplot_docs = Path(__file__).parent / '../../../doc/api/pyplot_summary.rst' if not pyplot_docs.exists(): pytest.skip("Documentation sources not available") lines = pyplot_docs.read_text() m = re.search(r':nosignatures:\n\n(.*?)\n\n', lines, re.DOTALL) doc_functions = set(line.strip() for line in m.group(1).split('\n')) plot_commands = set(plt.get_plot_commands()) missing = plot_commands.difference(doc_functions) if missing: raise AssertionError( f"The following pyplot functions are not listed in the " f"documentation. Please add them to doc/api/pyplot_summary.rst: " f"{missing!r}") extra = doc_functions.difference(plot_commands) if extra: raise AssertionError( f"The following functions are listed in the pyplot documentation, " f"but they do not exist in pyplot. " f"Please remove them from doc/api/pyplot_summary.rst: {extra!r}") ``` ###Assistant : Test that pyplot_summary lists all the plot functions.
1,838
def validate_child_on_delete(row, parent): if parent.doctype == "Sales Order": if flt(row.delivered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been delivered").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has work order assigned to it.").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order.").format( row.idx, row.item_code ) ) if parent.doctype == "Purchase Order" and flt(row.received_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been received").format( row.idx, row.item_code ) ) if flt(row.billed_amt): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been billed.").format( row.idx, row.item_code ) )
Check if partially transacted item (row) is being deleted.
9
107
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_child_on_delete(row, parent): if parent.doctype == "Sales Order": if flt(row.delivered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been delivered").format( row.idx, row.item_code ) ) if flt(row.work_order_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has work order assigned to it.").format( row.idx, row.item_code ) ) if flt(row.ordered_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which is assigned to customer's purchase order.").format( row.idx, row.item_code ) ) if parent.doctype == "Purchase Order" and flt(row.received_qty): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been received").format( row.idx, row.item_code ) ) if flt(row.billed_amt): frappe.throw( _("Row #{0}: Cannot delete item {1} which has already been billed.").format( row.idx, row.item_code ) ) ``` ###Assistant : Check if partially transacted item (row) is being deleted.
1,839
def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None # Face Filter post processing if ((hasattr(self._args, "filter") and self._args.filter is not None) or (hasattr(self._args, "nfilter") and self._args.nfilter is not None)): if hasattr(self._args, "detector"): detector = self._args.detector.replace("-", "_").lower() else: detector = "cv2_dnn" if hasattr(self._args, "aligner"): aligner = self._args.aligner.replace("-", "_").lower() else: aligner = "cv2_dnn" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, "ref_threshold"): face_filter["ref_threshold"] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter["filter_lists"] = filter_lists postprocess_items["FaceFilter"] = {"kwargs": face_filter} logger.debug("Postprocess Items: %s", postprocess_items) return postprocess_items
Check the passed in command line arguments for requested actions, For any requested actions, add the item to the actions list along with any relevant arguments and keyword arguments. Returns ------- dict The name of the action to be performed as the key. Any action specific arguments and keyword arguments as the value.
53
108
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None # Face Filter post processing if ((hasattr(self._args, "filter") and self._args.filter is not None) or (hasattr(self._args, "nfilter") and self._args.nfilter is not None)): if hasattr(self._args, "detector"): detector = self._args.detector.replace("-", "_").lower() else: detector = "cv2_dnn" if hasattr(self._args, "aligner"): aligner = self._args.aligner.replace("-", "_").lower() else: aligner = "cv2_dnn" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, "ref_threshold"): face_filter["ref_threshold"] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter["filter_lists"] = filter_lists postprocess_items["FaceFilter"] = {"kwargs": face_filter} logger.debug("Postprocess Items: %s", postprocess_items) return postprocess_items ``` ###Assistant : Check the passed in command line arguments for requested actions, For any requested actions, add the item to the actions list along with any relevant arguments and keyword arguments. Returns ------- dict The name of the action to be performed as the key. Any action specific arguments and keyword arguments as the value.
1,840
def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, last.network_address <= net.network_address # is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net
Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed.
57
83
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _collapse_addresses_internal(addresses): # First merge to_merge = list(addresses) subnets = {} while to_merge: net = to_merge.pop() supernet = net.supernet() existing = subnets.get(supernet) if existing is None: subnets[supernet] = net elif existing != net: # Merge consecutive subnets del subnets[supernet] to_merge.append(supernet) # Then iterate over resulting networks, skipping subsumed subnets last = None for net in sorted(subnets.values()): if last is not None: # Since they are sorted, last.network_address <= net.network_address # is a given. if last.broadcast_address >= net.broadcast_address: continue yield net last = net ``` ###Assistant : Loops through the addresses, collapsing concurrent netblocks. Example: ip1 = IPv4Network('192.0.2.0/26') ip2 = IPv4Network('192.0.2.64/26') ip3 = IPv4Network('192.0.2.128/26') ip4 = IPv4Network('192.0.2.192/26') _collapse_addresses_internal([ip1, ip2, ip3, ip4]) -> [IPv4Network('192.0.2.0/24')] This shouldn't be called directly; it is called via collapse_addresses([]). Args: addresses: A list of IPv4Network's or IPv6Network's Returns: A list of IPv4Network's or IPv6Network's depending on what we were passed.
1,841
def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list) else: axes = None return axes
Return the topmost visible `~.axes.Axes` containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- `~matplotlib.axes.Axes` or None The topmost visible Axes containing the point, or None if there is no Axes at the point.
46
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inaxes(self, xy): axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()] if axes_list: axes = cbook._topmost_artist(axes_list) else: axes = None return axes ``` ###Assistant : Return the topmost visible `~.axes.Axes` containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- `~matplotlib.axes.Axes` or None The topmost visible Axes containing the point, or None if there is no Axes at the point.
1,842
def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") is_mssql = bool(conn.dialect.name == "mssql") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True)) batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True)) # Create with nullable and no default, then ALTER to set values, to avoid table level lock batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True)) batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True)) batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False) try: from airflow.configuration import conf concurrency = conf.getint('core', 'dag_concurrency', fallback=16) except: # noqa concurrency = 16 # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the # DagParser it will get set to correct value. op.execute( f ) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False) batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False) if is_sqlite: op.execute("PRAGMA foreign_keys=on")
Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` UPDATE dag SET concurrency={concurrency}, has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()} where concurrency IS NULL
22
135
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") is_mssql = bool(conn.dialect.name == "mssql") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.add_column(sa.Column('last_scheduling_decision', TIMESTAMP, nullable=True)) batch_op.create_index('idx_last_scheduling_decision', ['last_scheduling_decision'], unique=False) batch_op.add_column(sa.Column('dag_hash', sa.String(32), nullable=True)) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.add_column(sa.Column('next_dagrun', TIMESTAMP, nullable=True)) batch_op.add_column(sa.Column('next_dagrun_create_after', TIMESTAMP, nullable=True)) # Create with nullable and no default, then ALTER to set values, to avoid table level lock batch_op.add_column(sa.Column('concurrency', sa.Integer(), nullable=True)) batch_op.add_column(sa.Column('has_task_concurrency_limits', sa.Boolean(), nullable=True)) batch_op.create_index('idx_next_dagrun_create_after', ['next_dagrun_create_after'], unique=False) try: from airflow.configuration import conf concurrency = conf.getint('core', 'dag_concurrency', fallback=16) except: # noqa concurrency = 16 # Set it to true here as it makes us take the slow/more complete path, and when it's next parsed by the # DagParser it will get set to correct value. op.execute( f ) with op.batch_alter_table('dag', schema=None) as batch_op: batch_op.alter_column('concurrency', type_=sa.Integer(), nullable=False) batch_op.alter_column('has_task_concurrency_limits', type_=sa.Boolean(), nullable=False) if is_sqlite: op.execute("PRAGMA foreign_keys=on") ``` ###Assistant : Apply Add ``scheduling_decision`` to ``DagRun`` and ``DAG`` UPDATE dag SET concurrency={concurrency}, has_task_concurrency_limits={1 if is_sqlite or is_mssql else sa.true()} where concurrency IS NULL
1,843
def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get("event_id") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries
Enriches a list of ledger entries with event metadata (applies only to decrements that have an event_id property set, i.e. automated decrements to the ledger applied by Orb).
28
50
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enrich_ledger_entries_with_event_data(self, ledger_entries): # Build up a list of the subset of ledger entries we are expected # to enrich with event metadata. event_id_to_ledger_entry = {} for entry in ledger_entries: maybe_event_id: Optional[str] = entry.get("event_id") if maybe_event_id: event_id_to_ledger_entry[maybe_event_id] = entry # Nothing to enrich; short-circuit if len(event_id_to_ledger_entry) == 0: return ledger_entries ``` ###Assistant : Enriches a list of ledger entries with event metadata (applies only to decrements that have an event_id property set, i.e. automated decrements to the ledger applied by Orb).
1,844
def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b"1322" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), "The first value in the cache is incorrect" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), "Has the DP cache been changed?" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), "Has the DP cache been changed?" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), "Has the DP cache been changed?" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), "Has the cache been changed?"
Ensure the most up to date RDP-to-epsilon cache is being used.
11
81
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_cache() -> None: ledger_store = DictLedgerStore() user_key = b"1322" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ( ledger._cache_constant2epsilon[0] == 0.05372712063485988 ), "The first value in the cache is incorrect" assert ( ledger._cache_constant2epsilon[1] == 0.07773597369831031 ), "Has the DP cache been changed?" rdp_700k = convert_constants_to_indices(np.array([700_000])) assert ( ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075 ), "Has the DP cache been changed?" rdp_50 = convert_constants_to_indices(np.array([50])) assert ( ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825 ), "Has the DP cache been changed?" assert ( len(ledger._cache_constant2epsilon) >= 1_200_000 ), "Has the cache been changed?" ``` ###Assistant : Ensure the most up to date RDP-to-epsilon cache is being used.
1,845
def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime)
Get a lexer for a mimetype. Raises ClassNotFound if not found.
11
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime) ``` ###Assistant : Get a lexer for a mimetype. Raises ClassNotFound if not found.
1,846
def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyway. # TODO: Expose solver as soon as we have a second solver to choose from. # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 5e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) # Same with sample_weight. model = ( clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) ) assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
Test that GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution.
21
127
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression(solver, fit_intercept, glm_dataset): model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset params = dict( alpha=alpha, fit_intercept=fit_intercept, # While _GeneralizedLinearRegressor exposes the solver parameter, public # estimators currently do not, and lbfgs is the only solver anyway. # TODO: Expose solver as soon as we have a second solver to choose from. # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) X = X[:, :-1] # remove intercept if fit_intercept: coef = coef_with_intercept intercept = coef[-1] coef = coef[:-1] else: coef = coef_without_intercept intercept = 0 model.fit(X, y) rtol = 5e-5 assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) # Same with sample_weight. model = ( clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) ) assert model.intercept_ == pytest.approx(intercept, rel=rtol) assert_allclose(model.coef_, coef, rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False]) ``` ###Assistant : Test that GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution.
1,847
def _cuda_check(self): with Popen("nvcc -V", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r".*release (?P<cuda>\d+\.\d+)", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get("cuda", None) locate = "where" if self._os == "windows" else "which" path = os.popen(f"{locate} nvcc").read() if path: path = path.split("\n")[0] # Split multiple entries and take first found while True: # Get Cuda root folder path, split = os.path.split(path) if split == "bin": break self.cuda_path = path return # Failed to load nvcc, manual check getattr(self, f"_cuda_check_{self._os}")()
Obtain the location and version of Cuda and populate :attr:`cuda_version` and :attr:`cuda_path` Initially just calls `nvcc -V` to get the installed version of Cuda currently in use. If this fails, drills down to more OS specific checking methods.
38
81
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _cuda_check(self): with Popen("nvcc -V", shell=True, stdout=PIPE, stderr=PIPE) as chk: stdout, stderr = chk.communicate() if not stderr: version = re.search(r".*release (?P<cuda>\d+\.\d+)", stdout.decode(locale.getpreferredencoding())) self.cuda_version = version.groupdict().get("cuda", None) locate = "where" if self._os == "windows" else "which" path = os.popen(f"{locate} nvcc").read() if path: path = path.split("\n")[0] # Split multiple entries and take first found while True: # Get Cuda root folder path, split = os.path.split(path) if split == "bin": break self.cuda_path = path return # Failed to load nvcc, manual check getattr(self, f"_cuda_check_{self._os}")() ``` ###Assistant : Obtain the location and version of Cuda and populate :attr:`cuda_version` and :attr:`cuda_path` Initially just calls `nvcc -V` to get the installed version of Cuda currently in use. If this fails, drills down to more OS specific checking methods.
1,848
def test_map_product_same(self, dag_maker, session): outputs = [] with dag_maker(dag_id="product_same", session=session) as dag:
Test a mapped task can refer to the same source multiple times.
12
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_map_product_same(self, dag_maker, session): outputs = [] with dag_maker(dag_id="product_same", session=session) as dag: ``` ###Assistant : Test a mapped task can refer to the same source multiple times.
1,849
def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return["value"] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return["value"] == -1: time.sleep(0.2) release_process() return ret
This method is for running only one image at a time for simple use. Used to the img2img interrogate.
19
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_deepbooru_tags(pil_image, threshold=0.5): from modules import shared # prevents circular reference create_deepbooru_process(threshold) shared.deepbooru_process_return["value"] = -1 shared.deepbooru_process_queue.put(pil_image) while shared.deepbooru_process_return["value"] == -1: time.sleep(0.2) release_process() return ret ``` ###Assistant : This method is for running only one image at a time for simple use. Used to the img2img interrogate.
1,850
def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == "update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb'))
update pg.table2 set a1 = df.a, c1 = df.c from ( SELECT model.a as a, model.b as b, model.p as c FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 ) as df where table2.a1 = df.a and table2.b1 = df.b # SELECT a, b FROM pg.tasks # UNION # SELECT b, a FROM pg.tasks #
57
101
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- predictor = { 'name': 'task_model', 'predict': 'p', 'dtypes': { 'p': dtype.float, 'a': dtype.integer, 'b': dtype.categorical, 'c': dtype.datetime }, 'predicted_value': 'ccc' } self.set_predictor(predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == "update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')" # @patch('mindsdb.integrations.handlers.postgres_handler.Handler') # def test_union_type_mismatch(self, mock_handler): # self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # # sql = # from mindsdb.api.mysql.mysql_proxy.utilities import ErSqlWrongArguments # with pytest.raises(ErSqlWrongArguments): # self.command_executor.execute_command(parse_sql(sql, dialect='mindsdb')) ``` ###Assistant : update pg.table2 set a1 = df.a, c1 = df.c from ( SELECT model.a as a, model.b as b, model.p as c FROM pg.tasks as t JOIN mindsdb.task_model as model WHERE t.a=1 ) as df where table2.a1 = df.a and table2.b1 = df.b # SELECT a, b FROM pg.tasks # UNION # SELECT b, a FROM pg.tasks #
1,851
def get_total_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not data: data = 0 return data
Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts) SELECT COUNT(po_item.base_amount) FROM `tabPurchase Order Item` po_item, `tabPurchase Order` po WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.docstatus = 1 AND po_item.parent = po.name
44
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_total_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates data = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not data: data = 0 return data ``` ###Assistant : Gets the total number of ordered shipments to arrive in the period (based on Purchase Receipts) SELECT COUNT(po_item.base_amount) FROM `tabPurchase Order Item` po_item, `tabPurchase Order` po WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.docstatus = 1 AND po_item.parent = po.name
1,852
def source(object): print('In file: %s' % inspect.getsourcefile(object)) print(inspect.getsource(object))
Prints the source code of a given object. .. deprecated:: 1.3 The ``source()`` function is deprecated. Use ``inspect.getsource()`` or ``??`` in IPython/Jupyter instead.
23
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def source(object): print('In file: %s' % inspect.getsourcefile(object)) print(inspect.getsource(object)) ``` ###Assistant : Prints the source code of a given object. .. deprecated:: 1.3 The ``source()`` function is deprecated. Use ``inspect.getsource()`` or ``??`` in IPython/Jupyter instead.
1,853
def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f"Input has undefined rank. Received: input_shape={input_shape}" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Validate axes for x in axis: if x < 0 or x >= rank: raise ValueError( "Invalid value for `axis` argument. " "Expected 0 <= axis < inputs.rank (with " f"inputs.rank={rank}). Received: axis={tuple(axis)}" ) if len(axis) != len(set(axis)): raise ValueError(f"Duplicate axis: {tuple(axis)}") return axis
Validate an axis value and returns its standardized form. Args: axis: Value to validate. Can be an integer or a list/tuple of integers. Integers may be negative. input_shape: Reference input shape that the axis/axes refer to. Returns: Normalized form of `axis`, i.e. a list with all-positive values.
47
98
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def validate_axis(axis, input_shape): input_shape = tf.TensorShape(input_shape) rank = input_shape.rank if not rank: raise ValueError( f"Input has undefined rank. Received: input_shape={input_shape}" ) # Convert axis to list and resolve negatives if isinstance(axis, int): axis = [axis] else: axis = list(axis) for idx, x in enumerate(axis): if x < 0: axis[idx] = rank + x # Validate axes for x in axis: if x < 0 or x >= rank: raise ValueError( "Invalid value for `axis` argument. " "Expected 0 <= axis < inputs.rank (with " f"inputs.rank={rank}). Received: axis={tuple(axis)}" ) if len(axis) != len(set(axis)): raise ValueError(f"Duplicate axis: {tuple(axis)}") return axis ``` ###Assistant : Validate an axis value and returns its standardized form. Args: axis: Value to validate. Can be an integer or a list/tuple of integers. Integers may be negative. input_shape: Reference input shape that the axis/axes refer to. Returns: Normalized form of `axis`, i.e. a list with all-positive values.
1,854
def _filter_to(self, it, pred): buf = '' idx = 0 for i, t, v in it: if pred(t): if buf: yield idx, None, buf buf = '' yield i, t, v else: if not buf: idx = i buf += v if buf: yield idx, None, buf
Keep only the tokens that match `pred`, merge the others together
11
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _filter_to(self, it, pred): buf = '' idx = 0 for i, t, v in it: if pred(t): if buf: yield idx, None, buf buf = '' yield i, t, v else: if not buf: idx = i buf += v if buf: yield idx, None, buf ``` ###Assistant : Keep only the tokens that match `pred`, merge the others together
1,855
def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT])
Return if the entity should be enabled when first added to the entity registry.
14
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def entity_registry_enabled_default(self) -> bool: return bool(self._config[CONF_ENABLED_BY_DEFAULT]) ``` ###Assistant : Return if the entity should be enabled when first added to the entity registry.
1,856
def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3}
The angle bisectors of the triangle. An angle bisector of a triangle is a straight line through a vertex which cuts the corresponding angle in half. Returns ======= bisectors : dict Each key is a vertex (Point) and each value is the corresponding bisector (Segment). See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Triangle, Segment >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1) >>> t = Triangle(p1, p2, p3) >>> from sympy import sqrt >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1)) True
91
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3} ``` ###Assistant : The angle bisectors of the triangle. An angle bisector of a triangle is a straight line through a vertex which cuts the corresponding angle in half. Returns ======= bisectors : dict Each key is a vertex (Point) and each value is the corresponding bisector (Segment). See Also ======== sympy.geometry.point.Point, sympy.geometry.line.Segment Examples ======== >>> from sympy import Point, Triangle, Segment >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1) >>> t = Triangle(p1, p2, p3) >>> from sympy import sqrt >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1)) True
1,857
def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f"{API_ENDPOINT}/{DOC_REQUEST}" params = {"filters": filters, "Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} req = {"query": query, "params": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f"{vars(response_raw)}") response = response_raw.json() if "errors" in response: raise Exception(", ".join(response["errors"])) # Format response results = [] answers = response["answers"] for answer in answers: if answer.get("answer", None): results.append( { "context": "..." + answer["context"] + "...", "answer": answer.get("answer", None), "source": answer["meta"]["name"], "relevance": round(answer["score"] * 100, 2), "document": [doc for doc in response["documents"] if doc["id"] == answer["document_id"]][0], "offset_start_in_doc": answer["offsets_in_document"][0]["start"], "_raw": answer, } ) else: results.append( { "context": None, "answer": None, "document": None, "relevance": round(answer["score"] * 100, 2), "_raw": answer, } ) return results, response
Send a query to the REST API and parse the answer. Returns both a ready-to-use representation of the results and the raw JSON.
23
124
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def query(query, filters={}, top_k_reader=5, top_k_retriever=5) -> Tuple[List[Dict[str, Any]], Dict[str, str]]: url = f"{API_ENDPOINT}/{DOC_REQUEST}" params = {"filters": filters, "Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} req = {"query": query, "params": params} response_raw = requests.post(url, json=req) if response_raw.status_code >= 400 and response_raw.status_code != 503: raise Exception(f"{vars(response_raw)}") response = response_raw.json() if "errors" in response: raise Exception(", ".join(response["errors"])) # Format response results = [] answers = response["answers"] for answer in answers: if answer.get("answer", None): results.append( { "context": "..." + answer["context"] + "...", "answer": answer.get("answer", None), "source": answer["meta"]["name"], "relevance": round(answer["score"] * 100, 2), "document": [doc for doc in response["documents"] if doc["id"] == answer["document_id"]][0], "offset_start_in_doc": answer["offsets_in_document"][0]["start"], "_raw": answer, } ) else: results.append( { "context": None, "answer": None, "document": None, "relevance": round(answer["score"] * 100, 2), "_raw": answer, } ) return results, response ``` ###Assistant : Send a query to the REST API and parse the answer. Returns both a ready-to-use representation of the results and the raw JSON.
1,858
def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).rstrip("0").rstrip(".")
Format target temperature to be sent to the Daikin unit, rounding to nearest half degree.
15
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def format_target_temperature(target_temperature): return str(round(float(target_temperature) * 2, 0) / 2).rstrip("0").rstrip(".") ``` ###Assistant : Format target temperature to be sent to the Daikin unit, rounding to nearest half degree.
1,859
def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters) return data, emirates, amounts_by_emirate
Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data.
16
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_data(filters=None): data = [] emirates, amounts_by_emirate = append_vat_on_sales(data, filters) append_vat_on_expenses(data, filters) return data, emirates, amounts_by_emirate ``` ###Assistant : Returns the list of dictionaries. Each dictionary is a row in the datatable and chart data.
1,860
def dict(self, *args, **kwargs): kwargs.setdefault("exclude_none", True) return super().dict(*args, **kwargs)
Exclude `None` fields by default to comply with the OpenAPI spec.
11
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dict(self, *args, **kwargs): kwargs.setdefault("exclude_none", True) return super().dict(*args, **kwargs) ``` ###Assistant : Exclude `None` fields by default to comply with the OpenAPI spec.
1,861
def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result
Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names The mapped value to a field property is a tuple where: - the first value is the normalized "raw" column name - the second value is the normalized quoted column name to be used in jinja context
54
83
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_column_names(self) -> Dict[str, Tuple[str, str]]: fields = [] for field in self.properties.keys(): if not is_airbyte_column(field): fields.append(field) result = {} field_names = set() for field in fields: field_name = self.name_transformer.normalize_column_name(field, in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True) if field_name_lookup in field_names: # TODO handle column name duplicates or collisions deterministically in this stream for i in range(1, 1000): field_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=False) field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name) jinja_name = self.name_transformer.normalize_column_name(f"{field}_{i}", in_jinja=True) if field_name_lookup not in field_names: break field_names.add(field_name_lookup) result[field] = (field_name, jinja_name) return result ``` ###Assistant : Generate a mapping of JSON properties to normalized SQL Column names, handling collisions and avoid duplicate names The mapped value to a field property is a tuple where: - the first value is the normalized "raw" column name - the second value is the normalized quoted column name to be used in jinja context
1,862
def _build_paths_from_predecessors(sources, target, pred): if target not in pred: raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") seen = {target} stack = [[target, 0]] top = 0 while top >= 0: node, i = stack[top] if node in sources: yield [p for p, n in reversed(stack[: top + 1])] if len(pred[node]) > i: stack[top][1] = i + 1 next = pred[node][i] if next in seen: continue else: seen.add(next) top += 1 if top == len(stack): stack.append([next, 0]) else: stack[top][:] = [next, 0] else: seen.discard(node) top -= 1
Compute all simple paths to target, given the predecessors found in pred, terminating when any source in sources is found. Parameters ---------- sources : set Starting nodes for path. target : node Ending node for path. pred : dict A dictionary of predecessor lists, keyed by node Returns ------- paths : generator of lists A generator of all paths between source and target. Raises ------ NetworkXNoPath If `target` cannot be reached from `source`. Notes ----- There may be many paths between the sources and target. If there are cycles among the predecessors, this function will not produce all possible paths because doing so would produce infinitely many paths of unbounded length -- instead, we only produce simple paths. See Also -------- shortest_path single_source_shortest_path all_pairs_shortest_path all_shortest_paths bellman_ford_path
126
88
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _build_paths_from_predecessors(sources, target, pred): if target not in pred: raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") seen = {target} stack = [[target, 0]] top = 0 while top >= 0: node, i = stack[top] if node in sources: yield [p for p, n in reversed(stack[: top + 1])] if len(pred[node]) > i: stack[top][1] = i + 1 next = pred[node][i] if next in seen: continue else: seen.add(next) top += 1 if top == len(stack): stack.append([next, 0]) else: stack[top][:] = [next, 0] else: seen.discard(node) top -= 1 ``` ###Assistant : Compute all simple paths to target, given the predecessors found in pred, terminating when any source in sources is found. Parameters ---------- sources : set Starting nodes for path. target : node Ending node for path. pred : dict A dictionary of predecessor lists, keyed by node Returns ------- paths : generator of lists A generator of all paths between source and target. Raises ------ NetworkXNoPath If `target` cannot be reached from `source`. Notes ----- There may be many paths between the sources and target. If there are cycles among the predecessors, this function will not produce all possible paths because doing so would produce infinitely many paths of unbounded length -- instead, we only produce simple paths. See Also -------- shortest_path single_source_shortest_path all_pairs_shortest_path all_shortest_paths bellman_ford_path
1,863
def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected
Return True if the client is connected to a device.
10
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_connected(self) -> bool: return self._backend is not None and self._backend.is_connected ``` ###Assistant : Return True if the client is connected to a device.
1,864
def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name)
Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name.
8
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_gnu_long_header(cls, name, type, encoding, errors): name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) ``` ###Assistant : Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name.
1,865
def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError("Cannot generate Genocchi polynomial of degree %s" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ), ZZ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr()
Generates the Genocchi polynomial `\operatorname{G}_n(x)`. `\operatorname{G}_n(x)` is twice the difference between the plain and central Bernoulli polynomials, so has degree `n-1`: .. math :: \operatorname{G}_n(x) = 2 (\operatorname{B}_n(x) - \operatorname{B}_n^c(x)) The factor of 2 in the definition endows `\operatorname{G}_n(x)` with integer coefficients. Parameters ========== n : int Degree of the polynomial plus one. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
70
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def genocchi_poly(n, x=None, polys=False): if n < 0: raise ValueError("Cannot generate Genocchi polynomial of degree %s" % (n-1)) poly = DMP(dup_genocchi(int(n), ZZ), ZZ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr() ``` ###Assistant : Generates the Genocchi polynomial `\operatorname{G}_n(x)`. `\operatorname{G}_n(x)` is twice the difference between the plain and central Bernoulli polynomials, so has degree `n-1`: .. math :: \operatorname{G}_n(x) = 2 (\operatorname{B}_n(x) - \operatorname{B}_n^c(x)) The factor of 2 in the definition endows `\operatorname{G}_n(x)` with integer coefficients. Parameters ========== n : int Degree of the polynomial plus one. x : optional polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
1,866
def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError("value specified for nperseg is different" " from length of window") return win, nperseg
Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters ---------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the 6 window.
182
118
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _triage_segments(window, nperseg, input_length): # parse window; if array like, then set nperseg = win.shape if isinstance(window, (str, tuple)): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn(f'nperseg = {nperseg} is greater than input length ' f' = {input_length}, using nperseg = {nperseg}') nperseg = input_length win = jnp.array(osp_signal.get_window(window, nperseg)) else: win = jnp.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError("value specified for nperseg is different" " from length of window") return win, nperseg ``` ###Assistant : Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters ---------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the 6 window.
1,867
def register_for_auto_class(cls, auto_class="AutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="AutoModel", object_files="model checkpoint" )
Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with.
47
57
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def register_for_auto_class(cls, auto_class="AutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format( object="model", object_class="AutoModel", object_files="model checkpoint" ) ``` ###Assistant : Register this class with a given auto class. This should only be used for custom models as the ones in the library are already mapped with an auto class. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`): The auto class to register this new model with.
1,868
def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ] )
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. {0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip install onnxruntime` {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode`
197
66
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_accelerate_available(): return _accelerate_available # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore INFLECT_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore ONNX_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore TRANSFORMERS_IMPORT_ERROR = # docstyle-ignore UNIDECODE_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), ] ) ``` ###Assistant : {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the installation page: https://github.com/google/flax and follow the ones that match your environment. {0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install inflect` {0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. {0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip install onnxruntime` {0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install scipy` {0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the installation page: https://www.tensorflow.org/install and follow the ones that match your environment. {0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip install transformers` {0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install Unidecode`
1,869
def set_horizontalalignment(self, align): _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True
Set the horizontal alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'left', 'center', 'right'}
19
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_horizontalalignment(self, align): _api.check_in_list(['center', 'right', 'left'], align=align) self._horizontalalignment = align self.stale = True ``` ###Assistant : Set the horizontal alignment relative to the anchor point. See also :doc:`/gallery/text_labels_and_annotations/text_alignment`. Parameters ---------- align : {'left', 'center', 'right'}
1,870
def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine
Helper for asyncio.ensure_future(). Wraps awaitable (an object with __await__) into a coroutine that will later be wrapped in a Task by ensure_future().
22
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _wrap_awaitable(awaitable): return (yield from awaitable.__await__()) _wrap_awaitable._is_coroutine = _is_coroutine ``` ###Assistant : Helper for asyncio.ensure_future(). Wraps awaitable (an object with __await__) into a coroutine that will later be wrapped in a Task by ensure_future().
1,871
def normalize_file(file, separators=None): # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS # Convert path object to string. norm_file = str(file) for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`).
75
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize_file(file, separators=None): # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS # Convert path object to string. norm_file = str(file) for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file ``` ###Assistant : Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`).
1,872
def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is None]
Returns a list of event types which require us to fetch all state keys. This will be empty unless `has_wildcards` returns True. Returns: A list of event types.
28
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wildcard_types(self) -> List[str]: return [t for t, state_keys in self.types.items() if state_keys is None] ``` ###Assistant : Returns a list of event types which require us to fetch all state keys. This will be empty unless `has_wildcards` returns True. Returns: A list of event types.
1,873
def get_columns(self, table_name) -> Response: q = f"SHOW COLUMNS IN TABLE {table_name};" result = self.native_query(q) return result
List the columns in the tabels for which the user have access
12
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_columns(self, table_name) -> Response: q = f"SHOW COLUMNS IN TABLE {table_name};" result = self.native_query(q) return result ``` ###Assistant : List the columns in the tabels for which the user have access
1,874
def get_on_time_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates total_items_delivered_on_time = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not total_items_delivered_on_time: total_items_delivered_on_time = 0 return total_items_delivered_on_time
Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs) SELECT COUNT(pr_item.qty) FROM `tabPurchase Order Item` po_item, `tabPurchase Receipt Item` pr_item, `tabPurchase Order` po, `tabPurchase Receipt` pr WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.schedule_date <= pr.posting_date AND po_item.qty = pr_item.qty AND pr_item.docstatus = 1 AND pr_item.purchase_order_item = po_item.name AND po_item.parent = po.name AND pr_item.parent = pr.name
69
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_on_time_shipments(scorecard): supplier = frappe.get_doc("Supplier", scorecard.supplier) # Look up all PO Items with delivery dates between our dates total_items_delivered_on_time = frappe.db.sql( , {"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date}, as_dict=0, )[0][0] if not total_items_delivered_on_time: total_items_delivered_on_time = 0 return total_items_delivered_on_time ``` ###Assistant : Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs) SELECT COUNT(pr_item.qty) FROM `tabPurchase Order Item` po_item, `tabPurchase Receipt Item` pr_item, `tabPurchase Order` po, `tabPurchase Receipt` pr WHERE po.supplier = %(supplier)s AND po_item.schedule_date BETWEEN %(start_date)s AND %(end_date)s AND po_item.schedule_date <= pr.posting_date AND po_item.qty = pr_item.qty AND pr_item.docstatus = 1 AND pr_item.purchase_order_item = po_item.name AND po_item.parent = po.name AND pr_item.parent = pr.name
1,875
def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook("commit_comment_deleted", TOPIC, expected_message)
[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~
17
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_commit_comment_deleted(self) -> None: expected_message = self.check_webhook("commit_comment_deleted", TOPIC, expected_message) ``` ###Assistant : [hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b67f1f](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~
1,876
async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False ) work_queues.append(work_queue) return work_queues
Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created.
21
42
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def get_work_queues(self) -> Optional[UUID]: work_queues = [] for name in self.work_queues: try: # support IDs and names if isinstance(name, UUID): work_queue = await self.client.read_work_queue(id=name) else: work_queue = await self.client.read_work_queue_by_name(name) except ObjectNotFound: work_queue = await self.client.create_work_queue( name=name, return_id=False ) work_queues.append(work_queue) return work_queues ``` ###Assistant : Loads the work queue objects corresponding to the agent's target work queues. If any of them don't exist, they are created.
1,877
def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name="s1") s2 = MockStream(name="this_stream_doesnt_exist_in_the_source") mocker.patch.object(MockStream, "get_json_schema", return_value={}) src = MockSource(streams=[s1]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)]) with pytest.raises(KeyError): list(src.read(logger, {}, catalog)) GLOBAL_EMITTED_AT = 1
Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception
20
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_read_nonexistent_stream_raises_exception(mocker): s1 = MockStream(name="s1") s2 = MockStream(name="this_stream_doesnt_exist_in_the_source") mocker.patch.object(MockStream, "get_json_schema", return_value={}) src = MockSource(streams=[s1]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s2, SyncMode.full_refresh)]) with pytest.raises(KeyError): list(src.read(logger, {}, catalog)) GLOBAL_EMITTED_AT = 1 ``` ###Assistant : Tests that attempting to sync a stream which the source does not return from the `streams` method raises an exception
1,878
def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result
Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping.
20
89
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _should_queue(self, link, referrer, rel): scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result ``` ###Assistant : Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping.
1,879
def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') _logger.debug('Received command, data: [%s]', data) return command, data
Receive a command from Training Service. Returns a tuple of command (CommandType) and payload (str)
15
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def receive(): header = _in_file.read(16) _logger.debug('Received command, header: [%s]', header) if header is None or len(header) < 16: # Pipe EOF encountered _logger.debug('Pipe EOF encountered') return None, None length = int(header[2:]) data = _in_file.read(length) command = CommandType(header[:2]) data = data.decode('utf8') _logger.debug('Received command, data: [%s]', data) return command, data ``` ###Assistant : Receive a command from Training Service. Returns a tuple of command (CommandType) and payload (str)
1,880
def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.split()]) @register.filter()
Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the original case of all others.
23
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bettertitle(value): return ' '.join([w[0].upper() + w[1:] for w in value.split()]) @register.filter() ``` ###Assistant : Alternative to the builtin title(). Ensures that the first letter of each word is uppercase but retains the original case of all others.
1,881
def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename)
This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross!
25
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_pascal_annotation(index, pascal_root): classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) ``` ###Assistant : This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross!
1,882
def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = UnidistWrapper.materialize(self._data) logger.debug(f"EXIT::Partition.get::{self._identity}") return result
Get the object wrapped by this partition out of the object store. Returns ------- pandas.DataFrame The object from the object store.
21
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get(self): logger = get_logger() logger.debug(f"ENTER::Partition.get::{self._identity}") if len(self.call_queue): self.drain_call_queue() result = UnidistWrapper.materialize(self._data) logger.debug(f"EXIT::Partition.get::{self._identity}") return result ``` ###Assistant : Get the object wrapped by this partition out of the object store. Returns ------- pandas.DataFrame The object from the object store.
1,883
def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], "action": "no_perm", "index": 0, } response = self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b"No permission to perform this action")
A custom action may returns an HttpResponse with a 403 code.
11
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_custom_function_action_no_perm_response(self): action_data = { ACTION_CHECKBOX_NAME: [self.s1.pk], "action": "no_perm", "index": 0, } response = self.client.post( reverse("admin:admin_views_externalsubscriber_changelist"), action_data ) self.assertEqual(response.status_code, 403) self.assertEqual(response.content, b"No permission to perform this action") ``` ###Assistant : A custom action may returns an HttpResponse with a 403 code.
1,884
def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854
Calculate the limit of each element in the matrix. ``args`` will be passed to the ``limit`` function. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) Matrix([ [2, y], [1, 0]]) See Also ======== integrate diff
50
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def limit(self, *args): return self.applyfunc(lambda x: x.limit(*args)) # https://github.com/sympy/sympy/pull/12854 ``` ###Assistant : Calculate the limit of each element in the matrix. ``args`` will be passed to the ``limit`` function. Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> M = Matrix([[x, y], [1, 0]]) >>> M.limit(x, 2) Matrix([ [2, y], [1, 0]]) See Also ======== integrate diff
1,885
def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value
Make sure all sensor device classes are also available in NumberDeviceClass.
11
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_device_classes_aligned(): non_numeric_device_classes = { SensorDeviceClass.DATE, SensorDeviceClass.DURATION, SensorDeviceClass.TIMESTAMP, } for device_class in SensorDeviceClass: if device_class in non_numeric_device_classes: continue assert hasattr(NumberDeviceClass, device_class.name) assert getattr(NumberDeviceClass, device_class.name).value == device_class.value ``` ###Assistant : Make sure all sensor device classes are also available in NumberDeviceClass.
1,886
def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg' with cfg_file.open('w') as f: f.write( ) yield cfg_file
\ load_system_dawg 0 language_model_penalty_non_dict_word 0 language_model_penalty_non_freq_dict_word 0
7
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def valid_tess_config(outdir): cfg_file = outdir / 'test.cfg' with cfg_file.open('w') as f: f.write( ) yield cfg_file ``` ###Assistant : \ load_system_dawg 0 language_model_penalty_non_dict_word 0 language_model_penalty_non_freq_dict_word 0
1,887
def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar( value + 1, 1, 1 ) - datetime.timedelta(microseconds=1) else: first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second]
Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
44
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def year_lookup_bounds_for_datetime_field(self, value, iso_year=False): if iso_year: first = datetime.datetime.fromisocalendar(value, 1, 1) second = datetime.datetime.fromisocalendar( value + 1, 1, 1 ) - datetime.timedelta(microseconds=1) else: first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] ``` ###Assistant : Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
1,888
def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_gegenbauer, None, "Gegenbauer polynomial", (x, a), polys)
Generates the Gegenbauer polynomial `C_n^{(a)}(x)`. Parameters ========== n : int Degree of the polynomial. x : optional a Decides minimal domain for the list of coefficients. polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
40
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def gegenbauer_poly(n, a, x=None, polys=False): r return named_poly(n, dup_gegenbauer, None, "Gegenbauer polynomial", (x, a), polys) ``` ###Assistant : Generates the Gegenbauer polynomial `C_n^{(a)}(x)`. Parameters ========== n : int Degree of the polynomial. x : optional a Decides minimal domain for the list of coefficients. polys : bool, optional If True, return a Poly, otherwise (default) return an expression.
1,889
async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( "HomeKit connection not polling any characteristics: %s", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( "HomeKit controller update skipped as previous poll still in" " flight: %s" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( "HomeKit controller no longer detecting back pressure - not" " skipping poll: %s" ), self.unique_id, ) self._polling_lock_warned = False
Poll state of all entities attached to this bridge/accessory.
9
68
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self, now=None): if not self.pollable_characteristics: self.async_update_available_state() _LOGGER.debug( "HomeKit connection not polling any characteristics: %s", self.unique_id ) return if self._polling_lock.locked(): if not self._polling_lock_warned: _LOGGER.warning( ( "HomeKit controller update skipped as previous poll still in" " flight: %s" ), self.unique_id, ) self._polling_lock_warned = True return if self._polling_lock_warned: _LOGGER.info( ( "HomeKit controller no longer detecting back pressure - not" " skipping poll: %s" ), self.unique_id, ) self._polling_lock_warned = False ``` ###Assistant : Poll state of all entities attached to this bridge/accessory.
1,890
def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end]
Return an iterator of intervals to be fetched. Args: start (int): Start of needed interval end (int): End of needed interval left (int): Index of first overlapping downloaded data right (int): Index after last overlapping downloaded data
37
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _merge(self, start, end, left, right): # type: (int, int, int, int) -> Iterator[Tuple[int, int]] lslice, rslice = self._left[left:right], self._right[left:right] i = start = min([start]+lslice[:1]) end = max([end]+rslice[-1:]) for j, k in zip(lslice, rslice): if j > i: yield i, j-1 i = k + 1 if i <= end: yield i, end self._left[left:right], self._right[left:right] = [start], [end] ``` ###Assistant : Return an iterator of intervals to be fetched. Args: start (int): Start of needed interval end (int): End of needed interval left (int): Index of first overlapping downloaded data right (int): Index after last overlapping downloaded data
1,891
def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/595220 return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self)) PB_EXTENSIONS = ('.yml', '.yaml')
Determine whether the given string is a Python identifier.
9
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_python_identifier(self): # type: (str) -> bool # Ref: https://stackoverflow.com/a/55802320/595220 return bool(re.match(_VALID_IDENTIFIER_STRING_REGEX, self)) PB_EXTENSIONS = ('.yml', '.yaml') ``` ###Assistant : Determine whether the given string is a Python identifier.
1,892
def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}' # TODO: Get range requests to be correctly cached headers['Cache-Control'] = 'no-cache' return self._session.get(self._url, headers=headers, stream=True)
Return HTTP response to a range request from start to end.
11
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _stream_response(self, start, end, base_headers=HEADERS): # type: (int, int, Dict[str, str]) -> Response headers = base_headers.copy() headers['Range'] = f'bytes={start}-{end}' # TODO: Get range requests to be correctly cached headers['Cache-Control'] = 'no-cache' return self._session.get(self._url, headers=headers, stream=True) ``` ###Assistant : Return HTTP response to a range request from start to end.
1,893
def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}, should be between 0 and " f"{qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}") continue new_script.setWorldId(world) # Corresponds to "@run-at document-end" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f"Forcing @run-at document-end for {script.name}") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script)
Register user JavaScript files with the current tab. Args: scripts: A list of GreasemonkeyScripts.
14
203
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return # Since we are inserting scripts into a per-tab collection, # rather than just injecting scripts on page load, we need to # make sure we replace existing scripts, not just add new ones. # While, taking care not to remove any other scripts that might # have been added elsewhere, like the one for stylesheets. page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while script.full_name() in seen_names: script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if not 0 <= world <= qtutils.MAX_WORLD_ID: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}, should be between 0 and " f"{qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error( f"script {script.name} has invalid value for '@qute-js-world'" f": {script.jsworld}") continue new_script.setWorldId(world) # Corresponds to "@run-at document-end" which is the default according to # https://wiki.greasespot.net/Metadata_Block#.40run-at - however, # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as # default. # # NOTE that this needs to be done before setSourceCode, so that # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a # @run-at comment. new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug( f"Forcing @run-at document-end for {script.name}") new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script) ``` ###Assistant : Register user JavaScript files with the current tab. Args: scripts: A list of GreasemonkeyScripts.
1,894
def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug("Loading config for: %s", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG
dict: The configuration dictionary for current plugin, as set by the user's configuration settings.
14
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def config(self) -> dict: global _CONFIG # pylint: disable=global-statement if not _CONFIG: model_name = self._config_section logger.debug("Loading config for: %s", model_name) _CONFIG = Config(model_name, configfile=self._configfile).config_dict return _CONFIG ``` ###Assistant : dict: The configuration dictionary for current plugin, as set by the user's configuration settings.
1,895
def execute(): frappe.reload_doc("stock", "doctype", "stock_entry") if frappe.db.has_column("Stock Entry", "add_to_transit"): frappe.db.sql( ) frappe.db.sql( ) frappe.reload_doc("stock", "doctype", "warehouse_type") if not frappe.db.exists("Warehouse Type", "Transit"): doc = frappe.new_doc("Warehouse Type") doc.name = "Transit" doc.insert() frappe.reload_doc("stock", "doctype", "stock_entry_type") frappe.delete_doc_if_exists("Stock Entry Type", "Send to Warehouse") frappe.delete_doc_if_exists("Stock Entry Type", "Receive at Warehouse")
UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer', add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse' UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer' WHERE stock_entry_type = 'Receive at Warehouse'
39
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc("stock", "doctype", "stock_entry") if frappe.db.has_column("Stock Entry", "add_to_transit"): frappe.db.sql( ) frappe.db.sql( ) frappe.reload_doc("stock", "doctype", "warehouse_type") if not frappe.db.exists("Warehouse Type", "Transit"): doc = frappe.new_doc("Warehouse Type") doc.name = "Transit" doc.insert() frappe.reload_doc("stock", "doctype", "stock_entry_type") frappe.delete_doc_if_exists("Stock Entry Type", "Send to Warehouse") frappe.delete_doc_if_exists("Stock Entry Type", "Receive at Warehouse") ``` ###Assistant : UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer', add_to_transit = 1 WHERE stock_entry_type = 'Send to Warehouse' UPDATE `tabStock Entry` SET stock_entry_type = 'Material Transfer', purpose = 'Material Transfer' WHERE stock_entry_type = 'Receive at Warehouse'
1,896
def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql( % (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"), (search_string, search_string, order_by_string, order_by_string, start, page_len), )
select name, subject from `tabTask` where (`%s` like %s or `subject` like %s) %s order by case when `subject` like %s then 0 else 1 end, case when `%s` like %s then 0 else 1 end, `%s`, subject limit %s, %s
41
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql( % (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"), (search_string, search_string, order_by_string, order_by_string, start, page_len), ) ``` ###Assistant : select name, subject from `tabTask` where (`%s` like %s or `subject` like %s) %s order by case when `subject` like %s then 0 else 1 end, case when `%s` like %s then 0 else 1 end, `%s`, subject limit %s, %s
1,897
def _parse_name(self, name): if name.endswith("_float32_vars"): error_msg = ( "Policies ending in '_float32_vars' have been removed " "from TensorFlow." ) if name in ("infer_float32_vars", "infer_with_float32_vars"): error_msg += ( " Please use the 'mixed_float16' or 'mixed_bfloat16' " "policy instead." ) elif name == "float16_with_float32_vars": error_msg += " Please use the 'mixed_float16' policy instead." elif name == "bfloat16_with_float32_vars": error_msg += " Please use the 'mixed_bfloat16' policy instead." error_msg += " Got policy name: '%s'" % name raise ValueError(error_msg) if name == "mixed_float16": return "float16", "float32" elif name == "mixed_bfloat16": return "bfloat16", "float32" elif name == "_infer": # The "_infer" policy exists only for compatibility with TF 1, where # "_infer" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With "_infer", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the "_infer" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = tf.as_dtype(name).name except TypeError: error = ( "Cannot convert value %s to a mixed precision Policy. " "Valid policies include 'mixed_float16', 'mixed_bfloat16', " "and the name of any dtype such as 'float32'." % (name,) ) raise ValueError(error) return dtype, dtype
Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair.
22
242
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_name(self, name): if name.endswith("_float32_vars"): error_msg = ( "Policies ending in '_float32_vars' have been removed " "from TensorFlow." ) if name in ("infer_float32_vars", "infer_with_float32_vars"): error_msg += ( " Please use the 'mixed_float16' or 'mixed_bfloat16' " "policy instead." ) elif name == "float16_with_float32_vars": error_msg += " Please use the 'mixed_float16' policy instead." elif name == "bfloat16_with_float32_vars": error_msg += " Please use the 'mixed_bfloat16' policy instead." error_msg += " Got policy name: '%s'" % name raise ValueError(error_msg) if name == "mixed_float16": return "float16", "float32" elif name == "mixed_bfloat16": return "bfloat16", "float32" elif name == "_infer": # The "_infer" policy exists only for compatibility with TF 1, where # "_infer" is the default. The behavior matches the behavior of TF 1's # behavior before policies were introduced. With "_infer", the computation # and variable dtype are inferred from the first input the first time the # layer is called. Once the layer is called for the first time, the # layer's policy will change to the dtype of the first input, and it will # no longer have the "_infer" policy. # # The infer policy should be considered an implementation detail and may # be removed in the future. return None, None try: dtype = tf.as_dtype(name).name except TypeError: error = ( "Cannot convert value %s to a mixed precision Policy. " "Valid policies include 'mixed_float16', 'mixed_bfloat16', " "and the name of any dtype such as 'float32'." % (name,) ) raise ValueError(error) return dtype, dtype ``` ###Assistant : Parses a Policy name into a compute and variable dtype. Args: name: The name of the policy: Returns: The (compute_dtype, variable_dtype) pair.
1,898
def reraise_exceptions_as_crashes(): try: yield except BaseException as exc: state = exception_to_crashed_state(exc) raise Crash(message=state.message, cause=exc, state=state) from exc
Detect crashes during this context, wrapping unexpected exceptions into `Crash` signals.
11
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reraise_exceptions_as_crashes(): try: yield except BaseException as exc: state = exception_to_crashed_state(exc) raise Crash(message=state.message, cause=exc, state=state) from exc ``` ###Assistant : Detect crashes during this context, wrapping unexpected exceptions into `Crash` signals.
1,899
def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print("\nChecking resources.\n") # Memory current available svmem = psutil.virtual_memory() mem_available = get_size(svmem.available) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partitions = psutil.disk_partitions() partition_usage = psutil.disk_usage(partitions[0].mountpoint) disk_available = get_size(partition_usage.free) resources: Dict[str, Resource] = { 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources
Use gsutil to get resources in bytes for memory and disk
11
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def resoure_check(): MINIMUM_ALLOWED_MEMORY = 4 MINIMUM_ALLOWED_CPUS = 2 MINIMUM_ALLOWED_DISK = 20 print("\nChecking resources.\n") # Memory current available svmem = psutil.virtual_memory() mem_available = get_size(svmem.available) # Cpus current available cpus_available = psutil.cpu_count(logical=True) # Disk current available partitions = psutil.disk_partitions() partition_usage = psutil.disk_usage(partitions[0].mountpoint) disk_available = get_size(partition_usage.free) resources: Dict[str, Resource] = { 'Memory': Resource(current=mem_available, minimumAllowed=MINIMUM_ALLOWED_MEMORY), 'Cpus': Resource(current=cpus_available, minimumAllowed=MINIMUM_ALLOWED_CPUS), 'Disk': Resource(current=disk_available, minimumAllowed=MINIMUM_ALLOWED_DISK), } return resources ``` ###Assistant : Use gsutil to get resources in bytes for memory and disk