Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
1,500
async def read_settings() -> prefect.settings.Settings: return prefect.settings.get_current_settings().with_obfuscated_secrets() @router.get("/version")
Get the current Orion settings. Secret setting values will be obfuscated.
11
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def read_settings() -> prefect.settings.Settings: return prefect.settings.get_current_settings().with_obfuscated_secrets() @router.get("/version") ``` ###Assistant : Get the current Orion settings. Secret setting values will be obfuscated.
1,501
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap( func, self, depth=depth, boundary=boundary, trim=trim, **kwargs )
Map a function over blocks of the array with some overlap We share neighboring zones between blocks of the array, then map a function, then trim away the neighboring strips. Note that this function will attempt to automatically determine the output array type before computing it, please refer to the ``meta`` keyword argument in :func:`map_blocks <dask.array.core.map_blocks>` if you expect that the function will not succeed when operating on 0-d arrays. Parameters ---------- func: function The function to apply to each extended block depth: int, tuple, or dict The number of elements that each block should share with its neighbors If a tuple or dict then this can be different per axis boundary: str, tuple, dict How to handle the boundaries. Values include 'reflect', 'periodic', 'nearest', 'none', or any constant value like 0 or np.nan trim: bool Whether or not to trim ``depth`` elements from each block after calling the map function. Set this to False if your mapping function already does this for you **kwargs: Other keyword arguments valid in :func:`map_blocks <dask.array.core.map_blocks>`. Examples -------- >>> import dask.array as da >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1]) >>> x = da.from_array(x, chunks=5) >>> def derivative(x): ... return x - np.roll(x, 1) >>> y = x.map_overlap(derivative, depth=1, boundary=0) >>> y.compute() array([ 1, 0, 1, 1, 0, 0, -1, -1, 0]) >>> import dask.array as da >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect') >>> y.compute() array([[16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]]) >>> func = lambda x: x + x.size >>> depth = {0: 1, 1: 1} >>> boundary = {0: 'reflect', 1: 'none'} >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27]]) >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(())) >>> y dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray> >>> y.compute() array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) >>> import cupy # doctest: +SKIP >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP >>> y # doctest: +SKIP dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray> >>> y.compute() # doctest: +SKIP array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]])
435
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs): from dask.array.overlap import map_overlap return map_overlap( func, self, depth=depth, boundary=boundary, trim=trim, **kwargs ) ``` ###Assistant : Map a function over blocks of the array with some overlap We share neighboring zones between blocks of the array, then map a function, then trim away the neighboring strips. Note that this function will attempt to automatically determine the output array type before computing it, please refer to the ``meta`` keyword argument in :func:`map_blocks <dask.array.core.map_blocks>` if you expect that the function will not succeed when operating on 0-d arrays. Parameters ---------- func: function The function to apply to each extended block depth: int, tuple, or dict The number of elements that each block should share with its neighbors If a tuple or dict then this can be different per axis boundary: str, tuple, dict How to handle the boundaries. Values include 'reflect', 'periodic', 'nearest', 'none', or any constant value like 0 or np.nan trim: bool Whether or not to trim ``depth`` elements from each block after calling the map function. Set this to False if your mapping function already does this for you **kwargs: Other keyword arguments valid in :func:`map_blocks <dask.array.core.map_blocks>`. Examples -------- >>> import dask.array as da >>> x = np.array([1, 1, 2, 3, 3, 3, 2, 1, 1]) >>> x = da.from_array(x, chunks=5) >>> def derivative(x): ... return x - np.roll(x, 1) >>> y = x.map_overlap(derivative, depth=1, boundary=0) >>> y.compute() array([ 1, 0, 1, 1, 0, 0, -1, -1, 0]) >>> import dask.array as da >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x.size, depth=1, boundary='reflect') >>> y.compute() array([[16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]]) >>> func = lambda x: x + x.size >>> depth = {0: 1, 1: 1} >>> boundary = {0: 'reflect', 1: 'none'} >>> d.map_overlap(func, depth, boundary).compute() # doctest: +NORMALIZE_WHITESPACE array([[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23], [24, 25, 26, 27]]) >>> x = np.arange(16).reshape((4, 4)) >>> d = da.from_array(x, chunks=(2, 2)) >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=np.array(())) >>> y dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray> >>> y.compute() array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]]) >>> import cupy # doctest: +SKIP >>> x = cupy.arange(16).reshape((4, 4)) # doctest: +SKIP >>> d = da.from_array(x, chunks=(2, 2)) # doctest: +SKIP >>> y = d.map_overlap(lambda x: x + x[2], depth=1, boundary='reflect', meta=cupy.array(())) # doctest: +SKIP >>> y # doctest: +SKIP dask.array<_trim, shape=(4, 4), dtype=float64, chunksize=(2, 2), chunktype=cupy.ndarray> >>> y.compute() # doctest: +SKIP array([[ 4, 6, 8, 10], [ 8, 10, 12, 14], [20, 22, 24, 26], [24, 26, 28, 30]])
1,502
def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( "Raster needs to be opened in write mode to change values." ) capi.flush_ds(self._ptr)
Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed.
35
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _flush(self): # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException( "Raster needs to be opened in write mode to change values." ) capi.flush_ds(self._ptr) ``` ###Assistant : Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed.
1,503
def _round(self, places, rounding): if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans
Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context.
39
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _round(self, places, rounding): if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans ``` ###Assistant : Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context.
1,504
def is_redirect(self): return "location" in self.headers and self.status_code in REDIRECT_STATI
True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`).
17
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_redirect(self): return "location" in self.headers and self.status_code in REDIRECT_STATI ``` ###Assistant : True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`).
1,505
def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s
Writes equalization graph to a file as Equalizer APO config.
10
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def write_eqapo_graphic_eq(self, file_path, normalize=True): file_path = os.path.abspath(file_path) s = self.eqapo_graphic_eq(normalize=normalize) with open(file_path, 'w', encoding='utf-8') as f: f.write(s) return s ``` ###Assistant : Writes equalization graph to a file as Equalizer APO config.
1,506
def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code == exported_code
Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np import pandas as pd from sklearn.feature_selection import SelectPercentile, f_classif from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from tpot.export_utils import set_param_recursive # NOTE: Make sure that the outcome column is labeled 'target' in the data file tpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1) training_features, testing_features, training_target, testing_target = \\ train_test_split(features, tpot_data['target'], random_state=42) exported_pipeline = make_pipeline( SelectPercentile(score_func=f_classif, percentile=20), DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5) ) # Fix random state for all the steps in exported pipeline set_param_recursive(exported_pipeline.steps, 'random_state', 42) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features)
102
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_export_pipeline_6(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = exported_code = export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset, random_state=42, data_file_path='test_path') assert expected_code == exported_code ``` ###Assistant : Assert that exported_pipeline() generated a compile source file with random_state and data_file_path.import numpy as np import pandas as pd from sklearn.feature_selection import SelectPercentile, f_classif from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from tpot.export_utils import set_param_recursive # NOTE: Make sure that the outcome column is labeled 'target' in the data file tpot_data = pd.read_csv('test_path', sep='COLUMN_SEPARATOR', dtype=np.float64) features = tpot_data.drop('target', axis=1) training_features, testing_features, training_target, testing_target = \\ train_test_split(features, tpot_data['target'], random_state=42) exported_pipeline = make_pipeline( SelectPercentile(score_func=f_classif, percentile=20), DecisionTreeClassifier(criterion="gini", max_depth=8, min_samples_leaf=5, min_samples_split=5) ) # Fix random state for all the steps in exported pipeline set_param_recursive(exported_pipeline.steps, 'random_state', 42) exported_pipeline.fit(training_features, training_target) results = exported_pipeline.predict(testing_features)
1,507
def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test "local" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, "k=1 failed alt" assert alt_sccs == ccs_subgraph, "k=1 failed alt" else: # Test special case methods are the same as the aux graph if k == 1: alt_ccs = fset(nx.connected_components(G)) assert alt_ccs == ccs_local, "k=1 failed alt" assert alt_ccs == ccs_subgraph, "k=1 failed alt" elif k == 2: alt_bridge_ccs = fset(bridge_components(G)) assert alt_bridge_ccs == ccs_local, "k=2 failed alt" assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" # if new methods for k == 3 or k == 4 are implemented add them here # Check the general subgraph method works by itself alt_subgraph_ccs = fset( [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] ) assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" # Stop once k is larger than all special case methods # and we cannot break down ccs any further. if k > 2 and all(len(cc) == 1 for cc in ccs_local): break # ---------------- # Misc tests # ----------------
Helper - generates all k-edge-components using the aux graph. Checks the both local and subgraph edge connectivity of each cc. Also checks that alternate methods of computing the k-edge-ccs generate the same result.
33
275
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_edge_connectivity(G): # Construct the auxiliary graph that can be used to make each k-cc or k-sub aux_graph = EdgeComponentAuxGraph.construct(G) # memoize the local connectivity in this graph memo = {} for k in it.count(1): # Test "local" k-edge-components and k-edge-subgraphs ccs_local = fset(aux_graph.k_edge_components(k)) ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) # Check connectivity properties that should be guaranteed by the # algorithms. _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) if k == 1 or k == 2 and not G.is_directed(): assert ( ccs_local == ccs_subgraph ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" if G.is_directed(): # Test special case methods are the same as the aux graph if k == 1: alt_sccs = fset(nx.strongly_connected_components(G)) assert alt_sccs == ccs_local, "k=1 failed alt" assert alt_sccs == ccs_subgraph, "k=1 failed alt" else: # Test special case methods are the same as the aux graph if k == 1: alt_ccs = fset(nx.connected_components(G)) assert alt_ccs == ccs_local, "k=1 failed alt" assert alt_ccs == ccs_subgraph, "k=1 failed alt" elif k == 2: alt_bridge_ccs = fset(bridge_components(G)) assert alt_bridge_ccs == ccs_local, "k=2 failed alt" assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" # if new methods for k == 3 or k == 4 are implemented add them here # Check the general subgraph method works by itself alt_subgraph_ccs = fset( [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] ) assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" # Stop once k is larger than all special case methods # and we cannot break down ccs any further. if k > 2 and all(len(cc) == 1 for cc in ccs_local): break # ---------------- # Misc tests # ---------------- ``` ###Assistant : Helper - generates all k-edge-components using the aux graph. Checks the both local and subgraph edge connectivity of each cc. Also checks that alternate methods of computing the k-edge-ccs generate the same result.
1,508
def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list)
Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901).
19
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sequence_name_length_limits_flush(self): # A full flush is expensive to the full test, so we dig into the # internals to generate the likely offending SQL and run it manually # Some convenience aliases VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ VLM_m2m = ( VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through ) tables = [ VLM._meta.db_table, VLM_m2m._meta.db_table, ] sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True) connection.ops.execute_sql_flush(sql_list) ``` ###Assistant : Sequence resetting as part of a flush with model with long name and long pk name doesn't error (#8901).
1,509
def test_delete_media(self) -> None: download_resource = self.media_repo.children[b"download"] upload_resource = self.media_repo.children[b"upload"] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split("/") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id) # Delete media channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, channel.json_body["deleted_media"][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path))
Tests that delete a media is successfully
7
159
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delete_media(self) -> None: download_resource = self.media_repo.children[b"download"] upload_resource = self.media_repo.children[b"upload"] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split("/") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id) # Delete media channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, channel.json_body["deleted_media"][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path)) ``` ###Assistant : Tests that delete a media is successfully
1,510
def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwargs.get("ground_truth_split", 2) ), ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f"Unable to open model statistics file {model_file_statistics}!") raise return stats_per_model
Load model file data in to list of . :param load_type: type of the data loader to be used. :param model_file_statistics: JSON file or list of json files containing any model experiment stats. :return List of training statistics loaded as json objects.
42
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_for_viz(load_type, model_file_statistics, **kwargs): supported_load_types = dict( load_json=load_json, load_from_file=partial( load_from_file, dtype=kwargs.get("dtype", int), ground_truth_split=kwargs.get("ground_truth_split", 2) ), ) loader = supported_load_types[load_type] try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f"Unable to open model statistics file {model_file_statistics}!") raise return stats_per_model ``` ###Assistant : Load model file data in to list of . :param load_type: type of the data loader to be used. :param model_file_statistics: JSON file or list of json files containing any model experiment stats. :return List of training statistics loaded as json objects.
1,511
def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info("collecting all words and their counts") if corpus_file is not None: corpus_iterable = TaggedLineDocument(corpus_file) total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule) logger.info( "collected %i word types and %i unique tags from a corpus of %i examples and %i words", len(self.raw_vocab), len(self.dv), corpus_count, total_words, ) return total_words, corpus_count
Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count. Parameters ---------- documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster). corpus_file : str, optional Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format. You may use this argument instead of `documents` to get performance boost. Only one of `documents` or `corpus_file` arguments need to be passed (not both of them). progress_per : int Progress will be logged every `progress_per` documents. trim_rule : function, optional Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`), or a callable that accepts parameters (word, count, min_count) and returns either :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`. The rule, if given, is only used to prune vocabulary during :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model. The input parameters are of the following types: * `word` (str) - the word we are examining * `count` (int) - the word's frequency count in the corpus * `min_count` (int) - the minimum count threshold. Returns ------- (int, int) Tuple of `(total words in the corpus, number of documents)`.
218
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def scan_vocab(self, corpus_iterable=None, corpus_file=None, progress_per=100000, trim_rule=None): logger.info("collecting all words and their counts") if corpus_file is not None: corpus_iterable = TaggedLineDocument(corpus_file) total_words, corpus_count = self._scan_vocab(corpus_iterable, progress_per, trim_rule) logger.info( "collected %i word types and %i unique tags from a corpus of %i examples and %i words", len(self.raw_vocab), len(self.dv), corpus_count, total_words, ) return total_words, corpus_count ``` ###Assistant : Create the model's vocabulary: a mapping from unique words in the corpus to their frequency count. Parameters ---------- documents : iterable of :class:`~gensim.models.doc2vec.TaggedDocument`, optional The tagged documents used to create the vocabulary. Their tags can be either str tokens or ints (faster). corpus_file : str, optional Path to a corpus file in :class:`~gensim.models.word2vec.LineSentence` format. You may use this argument instead of `documents` to get performance boost. Only one of `documents` or `corpus_file` arguments need to be passed (not both of them). progress_per : int Progress will be logged every `progress_per` documents. trim_rule : function, optional Vocabulary trimming rule, specifies whether certain words should remain in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count). Can be None (min_count will be used, look to :func:`~gensim.utils.keep_vocab_item`), or a callable that accepts parameters (word, count, min_count) and returns either :attr:`gensim.utils.RULE_DISCARD`, :attr:`gensim.utils.RULE_KEEP` or :attr:`gensim.utils.RULE_DEFAULT`. The rule, if given, is only used to prune vocabulary during :meth:`~gensim.models.doc2vec.Doc2Vec.build_vocab` and is not stored as part of the model. The input parameters are of the following types: * `word` (str) - the word we are examining * `count` (int) - the word's frequency count in the corpus * `min_count` (int) - the minimum count threshold. Returns ------- (int, int) Tuple of `(total words in the corpus, number of documents)`.
1,512
def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = ("command class %s must provide " "'user_options' attribute (a list of tuples)") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = ("command line", value) return args
Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command.
75
337
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _parse_command_opts(self, parser, args): # late import because of mutual dependence between these modules from distutils.cmd import Command # Pull the current command from the head of the command line command = args[0] if not command_re.match(command): raise SystemExit("invalid command name '%s'" % command) self.commands.append(command) # Dig up the command class that implements this command, so we # 1) know that it's a valid command, and 2) know which options # it takes. try: cmd_class = self.get_command_class(command) except DistutilsModuleError as msg: raise DistutilsArgError(msg) # Require that the command class be derived from Command -- want # to be sure that the basic "command" interface is implemented. if not issubclass(cmd_class, Command): raise DistutilsClassError( "command class %s must subclass Command" % cmd_class) # Also make sure that the command object provides a list of its # known options. if not (hasattr(cmd_class, 'user_options') and isinstance(cmd_class.user_options, list)): msg = ("command class %s must provide " "'user_options' attribute (a list of tuples)") raise DistutilsClassError(msg % cmd_class) # If the command class has a list of negative alias options, # merge it in with the global negative aliases. negative_opt = self.negative_opt if hasattr(cmd_class, 'negative_opt'): negative_opt = negative_opt.copy() negative_opt.update(cmd_class.negative_opt) # Check for help_options in command class. They have a different # format (tuple of four) so we need to preprocess them here. if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_options = fix_help_options(cmd_class.help_options) else: help_options = [] # All commands support the global options too, just by adding # in 'global_options'. parser.set_option_table(self.global_options + cmd_class.user_options + help_options) parser.set_negative_aliases(negative_opt) (args, opts) = parser.getopt(args[1:]) if hasattr(opts, 'help') and opts.help: self._show_help(parser, display_options=0, commands=[cmd_class]) return if (hasattr(cmd_class, 'help_options') and isinstance(cmd_class.help_options, list)): help_option_found=0 for (help_option, short, desc, func) in cmd_class.help_options: if hasattr(opts, parser.get_attr_name(help_option)): help_option_found=1 if callable(func): func() else: raise DistutilsClassError( "invalid help function %r for help option '%s': " "must be a callable object (function, etc.)" % (func, help_option)) if help_option_found: return # Put the options from the command-line into their official # holding pen, the 'command_options' dictionary. opt_dict = self.get_option_dict(command) for (name, value) in vars(opts).items(): opt_dict[name] = ("command line", value) return args ``` ###Assistant : Parse the command-line options for a single command. 'parser' must be a FancyGetopt instance; 'args' must be the list of arguments, starting with the current command (whose options we are about to parse). Returns a new version of 'args' with the next command at the front of the list; will be the empty list if there are no more commands on the command line. Returns None if the user asked for help on this command.
1,513
def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the function to accept both `alpha` and its # replacement `confidence` during a deprecation period; it can be # removed in the second release after 1.9.0. # See description of logic in `moment` method. has_shape_alpha = (self.shapes is not None and "alpha" in (self.shapes.split(", "))) got_confidence = confidence is not None got_keyword_alpha = kwds.get("alpha", None) is not None if not got_confidence and ((not got_keyword_alpha) or (got_keyword_alpha and has_shape_alpha)): message = ("interval() missing 1 required positional argument: " "`confidence`") raise TypeError(message) if got_keyword_alpha and not has_shape_alpha: if got_confidence: # this will change to "interval got unexpected argument alpha" message = "interval() got multiple values for first argument" raise TypeError(message) else: message = ("Use of keyword argument `alpha` for method " "`interval` is deprecated. Use first positional " "argument or keyword argument `confidence` " "instead.") confidence = kwds.pop("alpha") warnings.warn(message, DeprecationWarning, stacklevel=2) alpha = confidence alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b
Confidence interval with equal areas around the median. .. deprecated:: 1.9.0 Parameter `alpha` is replaced by parameter `confidence` to avoid name collisions with the shape parameter `alpha` of some distributions. Parameter `alpha` will be removed in the second release after 1.9.0. Parameters ---------- confidence : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values.
128
213
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def interval(self, confidence=None, *args, **kwds): # This function was originally written with parameter `alpha`, but # `alpha` is also the name of a shape parameter of two distributions. # This block allows the function to accept both `alpha` and its # replacement `confidence` during a deprecation period; it can be # removed in the second release after 1.9.0. # See description of logic in `moment` method. has_shape_alpha = (self.shapes is not None and "alpha" in (self.shapes.split(", "))) got_confidence = confidence is not None got_keyword_alpha = kwds.get("alpha", None) is not None if not got_confidence and ((not got_keyword_alpha) or (got_keyword_alpha and has_shape_alpha)): message = ("interval() missing 1 required positional argument: " "`confidence`") raise TypeError(message) if got_keyword_alpha and not has_shape_alpha: if got_confidence: # this will change to "interval got unexpected argument alpha" message = "interval() got multiple values for first argument" raise TypeError(message) else: message = ("Use of keyword argument `alpha` for method " "`interval` is deprecated. Use first positional " "argument or keyword argument `confidence` " "instead.") confidence = kwds.pop("alpha") warnings.warn(message, DeprecationWarning, stacklevel=2) alpha = confidence alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ``` ###Assistant : Confidence interval with equal areas around the median. .. deprecated:: 1.9.0 Parameter `alpha` is replaced by parameter `confidence` to avoid name collisions with the shape parameter `alpha` of some distributions. Parameter `alpha` will be removed in the second release after 1.9.0. Parameters ---------- confidence : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values.
1,514
def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc="Updating Alignments File from PNG Header", leave=False): src = meta["source"] alignment = meta["alignments"] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src["source_filename"]) if len(faces) < src["face_index"] + 1: # list index out of range logger.debug("Skipped face '%s'. Index does not exist in alignments file", src["original_filename"]) continue face = faces[src["face_index"]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info("Updated alignments file from PNG Data: %s", self._counts) return retval
Parse through the face data updating any entries in the alignments file. Returns ------- bool ``True`` if any alignment information was updated otherwise ``False``
24
80
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __call__(self) -> bool: for meta in tqdm(self._face_alignments, desc="Updating Alignments File from PNG Header", leave=False): src = meta["source"] alignment = meta["alignments"] if not any(alignment.get(key, {}) for key in self._updatable_keys): continue faces = self._alignments.get_faces_in_frame(src["source_filename"]) if len(faces) < src["face_index"] + 1: # list index out of range logger.debug("Skipped face '%s'. Index does not exist in alignments file", src["original_filename"]) continue face = faces[src["face_index"]] self._check_and_update(alignment, face) retval = False if self._counts: retval = True logger.info("Updated alignments file from PNG Data: %s", self._counts) return retval ``` ###Assistant : Parse through the face data updating any entries in the alignments file. Returns ------- bool ``True`` if any alignment information was updated otherwise ``False``
1,515
def test_dynamic_path(self): doc = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=2, checksum="2", storage_path=StoragePath.objects.create(path="TestFolder/{created}"), ) self.assertEqual(generate_filename(doc), "TestFolder/2020-06-25.pdf")
GIVEN: - A document with a defined storage path WHEN: - the filename is generated for the document THEN: - the generated filename uses the defined storage path for the document
31
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dynamic_path(self): doc = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=2, checksum="2", storage_path=StoragePath.objects.create(path="TestFolder/{created}"), ) self.assertEqual(generate_filename(doc), "TestFolder/2020-06-25.pdf") ``` ###Assistant : GIVEN: - A document with a defined storage path WHEN: - the filename is generated for the document THEN: - the generated filename uses the defined storage path for the document
1,516
async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error("Exception occurred in follower loop: ") logger.exception(e)
Main follower coroutine This starts all of the leader connection coros
11
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def follower_loop(self): try: await self._connect_to_leaders() except Exception as e: logger.error("Exception occurred in follower loop: ") logger.exception(e) ``` ###Assistant : Main follower coroutine This starts all of the leader connection coros
1,517
def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0)
\ import os, sys if sys.stdin: os.close(sys.stdin.fileno()) if sys.stdout: os.close(sys.stdout.fileno()) if sys.stderr: os.close(sys.stderr.fileno()) os.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')
34
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_launcher_ensures_stdio(self): from kitty.constants import kitty_exe import subprocess exe = kitty_exe() cp = subprocess.run([exe, '+runpy', ]) self.assertEqual(cp.returncode, 0) ``` ###Assistant : \ import os, sys if sys.stdin: os.close(sys.stdin.fileno()) if sys.stdout: os.close(sys.stdout.fileno()) if sys.stderr: os.close(sys.stderr.fileno()) os.execlp('kitty', 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')
1,518
def test_interface_label_count_mismatch(self): bad_interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-1]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(bad_interface_data) self.assertFalse(form.is_valid()) self.assertIn('label', form.errors)
Check that attempting to generate a differing number of names and labels results in a validation error.
17
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_interface_label_count_mismatch(self): bad_interface_data = { 'device': self.device.pk, 'name': 'eth[0-9]', 'label': 'Interface[0-1]', 'type': InterfaceTypeChoices.TYPE_1GE_GBIC, } form = InterfaceCreateForm(bad_interface_data) self.assertFalse(form.is_valid()) self.assertIn('label', form.errors) ``` ###Assistant : Check that attempting to generate a differing number of names and labels results in a validation error.
1,519
def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X)
Parametric, monotonic transformation to make data more Gaussian-like. Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. Currently, power_transform supports the Box-Cox transform and the Yeo-Johnson transform. The optimal parameter for stabilizing variance and minimizing skewness is estimated through maximum likelihood. Box-Cox requires input data to be strictly positive, while Yeo-Johnson supports both positive or negative data. By default, zero-mean, unit-variance normalization is applied to the transformed data. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' The power transform method. Available methods are: - 'yeo-johnson' [1]_, works with positive and negative values - 'box-cox' [2]_, only works with strictly positive values .. versionchanged:: 0.23 The default value of the `method` parameter changed from 'box-cox' to 'yeo-johnson' in 0.23. standardize : bool, default=True Set to True to apply zero-mean, unit-variance normalization to the transformed output. copy : bool, default=True Set to False to perform inplace computation during transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data. See Also -------- PowerTransformer : Equivalent transformation with the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). quantile_transform : Maps data to a standard normal distribution with the parameter `output_distribution='normal'`. Notes ----- NaNs are treated as missing values: disregarded in ``fit``, and maintained in ``transform``. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. References ---------- .. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to improve normality or symmetry." Biometrika, 87(4), pp.954-959, (2000). .. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import power_transform >>> data = [[1, 2], [3, 2], [4, 5]] >>> print(power_transform(data, method='box-cox')) [[-1.332... -0.707...] [ 0.256... -0.707...] [ 1.076... 1.414...]] .. warning:: Risk of data leak. Do not use :func:`~sklearn.preprocessing.power_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.PowerTransformer` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), LogisticRegression())`.
421
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def power_transform(X, method="yeo-johnson", *, standardize=True, copy=True): pt = PowerTransformer(method=method, standardize=standardize, copy=copy) return pt.fit_transform(X) ``` ###Assistant : Parametric, monotonic transformation to make data more Gaussian-like. Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. Currently, power_transform supports the Box-Cox transform and the Yeo-Johnson transform. The optimal parameter for stabilizing variance and minimizing skewness is estimated through maximum likelihood. Box-Cox requires input data to be strictly positive, while Yeo-Johnson supports both positive or negative data. By default, zero-mean, unit-variance normalization is applied to the transformed data. Read more in the :ref:`User Guide <preprocessing_transformer>`. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to be transformed using a power transformation. method : {'yeo-johnson', 'box-cox'}, default='yeo-johnson' The power transform method. Available methods are: - 'yeo-johnson' [1]_, works with positive and negative values - 'box-cox' [2]_, only works with strictly positive values .. versionchanged:: 0.23 The default value of the `method` parameter changed from 'box-cox' to 'yeo-johnson' in 0.23. standardize : bool, default=True Set to True to apply zero-mean, unit-variance normalization to the transformed output. copy : bool, default=True Set to False to perform inplace computation during transformation. Returns ------- X_trans : ndarray of shape (n_samples, n_features) The transformed data. See Also -------- PowerTransformer : Equivalent transformation with the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). quantile_transform : Maps data to a standard normal distribution with the parameter `output_distribution='normal'`. Notes ----- NaNs are treated as missing values: disregarded in ``fit``, and maintained in ``transform``. For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`. References ---------- .. [1] I.K. Yeo and R.A. Johnson, "A new family of power transformations to improve normality or symmetry." Biometrika, 87(4), pp.954-959, (2000). .. [2] G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> import numpy as np >>> from sklearn.preprocessing import power_transform >>> data = [[1, 2], [3, 2], [4, 5]] >>> print(power_transform(data, method='box-cox')) [[-1.332... -0.707...] [ 0.256... -0.707...] [ 1.076... 1.414...]] .. warning:: Risk of data leak. Do not use :func:`~sklearn.preprocessing.power_transform` unless you know what you are doing. A common mistake is to apply it to the entire data *before* splitting into training and test sets. This will bias the model evaluation because information would have leaked from the test set to the training set. In general, we recommend using :class:`~sklearn.preprocessing.PowerTransformer` within a :ref:`Pipeline <pipeline>` in order to prevent most risks of data leaking, e.g.: `pipe = make_pipeline(PowerTransformer(), LogisticRegression())`.
1,520
def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = "Desargues Graph" return G
Returns the Desargues Graph The Desargues Graph is a non-planar, distance-transitive cubic graph with 20 nodes and 30 edges [1]_. It is a symmetric graph. It can be represented in LCF notation as [5,-5,9,-9]^5 [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Desargues Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Desargues_graph .. [2] https://mathworld.wolfram.com/DesarguesGraph.html
77
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def desargues_graph(create_using=None): G = LCF_graph(20, [5, -5, 9, -9], 5, create_using) G.name = "Desargues Graph" return G ``` ###Assistant : Returns the Desargues Graph The Desargues Graph is a non-planar, distance-transitive cubic graph with 20 nodes and 30 edges [1]_. It is a symmetric graph. It can be represented in LCF notation as [5,-5,9,-9]^5 [2]_. Parameters ---------- create_using : NetworkX graph constructor, optional (default=nx.Graph) Graph type to create. If graph instance, then cleared before populated. Returns ------- G : networkx Graph Desargues Graph with 20 nodes and 30 edges References ---------- .. [1] https://en.wikipedia.org/wiki/Desargues_graph .. [2] https://mathworld.wolfram.com/DesarguesGraph.html
1,521
def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "type": {"type": "string", "enum": output_feature_types}, "column": {"type": "string"}, }, "additionalProperties": True, "allOf": get_output_feature_conds(), "required": ["name", "type"], }, }
This function returns a JSON schema structured to only requires a `type` key and then conditionally applies a corresponding output feature's field constraints. Returns: JSON Schema
26
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_output_feature_jsonschema(): output_feature_types = sorted(list(output_type_registry.keys())) return { "type": "array", "items": { "type": "object", "properties": { "name": {"type": "string"}, "type": {"type": "string", "enum": output_feature_types}, "column": {"type": "string"}, }, "additionalProperties": True, "allOf": get_output_feature_conds(), "required": ["name", "type"], }, } ``` ###Assistant : This function returns a JSON schema structured to only requires a `type` key and then conditionally applies a corresponding output feature's field constraints. Returns: JSON Schema
1,522
def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component("textarea") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo.launch()
You can make use of str shortcuts you use in Interface within Blocks as well. Interface shortcut example: Interface(greet, "textarea", "textarea") You can use 1. gr.component() 2. gr.templates.Template() 3. gr.Template() All the templates are listed in gradio/templates.py
37
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def greet(str): return str with gr.Blocks() as demo: with gr.Row(): text1 = gr.component("textarea") text2 = gr.TextArea() text3 = gr.templates.TextArea() text1.change(greet, text1, text2) text2.change(greet, text2, text3) text3.change(greet, text3, text1) demo.launch() ``` ###Assistant : You can make use of str shortcuts you use in Interface within Blocks as well. Interface shortcut example: Interface(greet, "textarea", "textarea") You can use 1. gr.component() 2. gr.templates.Template() 3. gr.Template() All the templates are listed in gradio/templates.py
1,523
def _impute_values(self, features): if self.verbosity > 1: print("Imputing missing values in feature set") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy="median") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features)
Impute missing values in a feature set. Parameters ---------- features: array-like {n_samples, n_features} A feature matrix Returns ------- array-like {n_samples, n_features}
21
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _impute_values(self, features): if self.verbosity > 1: print("Imputing missing values in feature set") if self._fitted_imputer is None: self._fitted_imputer = SimpleImputer(strategy="median") self._fitted_imputer.fit(features) return self._fitted_imputer.transform(features) ``` ###Assistant : Impute missing values in a feature set. Parameters ---------- features: array-like {n_samples, n_features} A feature matrix Returns ------- array-like {n_samples, n_features}
1,524
def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed("django.contrib.sites"): from .models import Site return Site.objects.get_current(request) else: return RequestSite(request)
Check if contrib.sites is installed and return either the current ``Site`` object or a ``RequestSite`` object based on the request.
20
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_current_site(request): # Import is inside the function because its point is to avoid importing the # Site models when django.contrib.sites isn't installed. if apps.is_installed("django.contrib.sites"): from .models import Site return Site.objects.get_current(request) else: return RequestSite(request) ``` ###Assistant : Check if contrib.sites is installed and return either the current ``Site`` object or a ``RequestSite`` object based on the request.
1,525
def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensure consistency with filesystem if module_name in sys.modules: del sys.modules[module_name] module = importer.find_module(module_name).load_module(module_name) if use_names and hasattr(module, 'name'): module_name = module.name module_scripts = OrderedDict() script_order = getattr(module, "script_order", ()) ordered_scripts = [cls for cls in script_order if is_script(cls)] unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order] for cls in [*ordered_scripts, *unordered_scripts]: module_scripts[cls.__name__] = cls if module_scripts: scripts[module_name] = module_scripts return scripts
Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human- defined name in place of the actual module name.
29
103
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_scripts(use_names=False): scripts = OrderedDict() # Iterate through all modules within the scripts path. These are the user-created files in which reports are # defined. for importer, module_name, _ in pkgutil.iter_modules([settings.SCRIPTS_ROOT]): # Remove cached module to ensure consistency with filesystem if module_name in sys.modules: del sys.modules[module_name] module = importer.find_module(module_name).load_module(module_name) if use_names and hasattr(module, 'name'): module_name = module.name module_scripts = OrderedDict() script_order = getattr(module, "script_order", ()) ordered_scripts = [cls for cls in script_order if is_script(cls)] unordered_scripts = [cls for _, cls in inspect.getmembers(module, is_script) if cls not in script_order] for cls in [*ordered_scripts, *unordered_scripts]: module_scripts[cls.__name__] = cls if module_scripts: scripts[module_name] = module_scripts return scripts ``` ###Assistant : Return a dict of dicts mapping all scripts to their modules. Set use_names to True to use each module's human- defined name in place of the actual module name.
1,526
def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError("extra_context must be a dictionary") return get_template(template_name).render({**self.context, **extra_context})
Convenience method for rendering the specified Django template using the default context data. An additional context dictionary may be passed as `extra_context`.
22
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError("extra_context must be a dictionary") return get_template(template_name).render({**self.context, **extra_context}) ``` ###Assistant : Convenience method for rendering the specified Django template using the default context data. An additional context dictionary may be passed as `extra_context`.
1,527
def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.mobilenet_v3.decode_predictions")
A placeholder method for backward compatibility. The preprocessing logic has been included in the mobilenet_v3 model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to "channels_last").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`.
95
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess_input(x, data_format=None): return x @keras_export("keras.applications.mobilenet_v3.decode_predictions") ``` ###Assistant : A placeholder method for backward compatibility. The preprocessing logic has been included in the mobilenet_v3 model implementation. Users are no longer required to call this method to normalize the input data. This method does nothing and only kept as a placeholder to align the API surface between old and new version of model. Args: x: A floating point `numpy.array` or a `tf.Tensor`. data_format: Optional data format of the image tensor/array. Defaults to None, in which case the global setting `tf.keras.backend.image_data_format()` is used (unless you changed it, it defaults to "channels_last").{mode} Returns: Unchanged `numpy.array` or `tf.Tensor`.
1,528
def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag
Return the tag for this message's level as defined in django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level MESSAGE_TAGS setting (which end-users might customise).
20
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def message_level_tag(message): return MESSAGE_TAGS.get(message.level) @register.simple_tag ``` ###Assistant : Return the tag for this message's level as defined in django.contrib.messages.constants.DEFAULT_TAGS, ignoring the project-level MESSAGE_TAGS setting (which end-users might customise).
1,529
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"): check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError( "y_true and y_pred have different number of output ({0}!={1})".format( y_true.shape[1], y_pred.shape[1] ) ) n_outputs = y_true.shape[1] allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted") if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError( "Allowed 'multioutput' string values are {}. " "You provided multioutput={!r}".format( allowed_multioutput_str, multioutput ) ) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError("Custom weights are useful only in multi-output cases.") elif n_outputs != len(multioutput): raise ValueError( "There must be equally many custom weights (%d) as outputs (%d)." % (len(multioutput), n_outputs) ) y_type = "continuous" if n_outputs == 1 else "continuous-multioutput" return y_type, y_true, y_pred, multioutput
Check that y_true and y_pred belong to the same regression task. Parameters ---------- y_true : array-like y_pred : array-like multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). dtype : str or list, default="numeric" the dtype argument passed to check_array. Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target'. y_true : array-like of shape (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword.
124
141
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"): check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False, dtype=dtype) y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError( "y_true and y_pred have different number of output ({0}!={1})".format( y_true.shape[1], y_pred.shape[1] ) ) n_outputs = y_true.shape[1] allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted") if isinstance(multioutput, str): if multioutput not in allowed_multioutput_str: raise ValueError( "Allowed 'multioutput' string values are {}. " "You provided multioutput={!r}".format( allowed_multioutput_str, multioutput ) ) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError("Custom weights are useful only in multi-output cases.") elif n_outputs != len(multioutput): raise ValueError( "There must be equally many custom weights (%d) as outputs (%d)." % (len(multioutput), n_outputs) ) y_type = "continuous" if n_outputs == 1 else "continuous-multioutput" return y_type, y_true, y_pred, multioutput ``` ###Assistant : Check that y_true and y_pred belong to the same regression task. Parameters ---------- y_true : array-like y_pred : array-like multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). dtype : str or list, default="numeric" the dtype argument passed to check_array. Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target'. y_true : array-like of shape (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword.
1,530
def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: copied_self = copy(self) copied_self._func_or_class = "dummpy.module" schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): return FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, ) else: return ClassNode( self._func_or_class, args, kwargs, cls_options=self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, )
Bind the provided arguments and return a class or function node. The returned bound deployment can be deployed or bound to other deployments to create a deployment graph.
28
73
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: copied_self = copy(self) copied_self._func_or_class = "dummpy.module" schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): return FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, ) else: return ClassNode( self._func_or_class, args, kwargs, cls_options=self._ray_actor_options or dict(), other_args_to_resolve={ "deployment_schema": schema_shell, "is_from_serve_deployment": True, }, ) ``` ###Assistant : Bind the provided arguments and return a class or function node. The returned bound deployment can be deployed or bound to other deployments to create a deployment graph.
1,531
def _obtain_mask(cls, detected_face, mask_type): mask = detected_face.mask.get(mask_type) if not mask: return None if mask.stored_centering != "face": face = AlignedFace(detected_face.landmarks_xy) mask.set_sub_crop(face.pose.offset[mask.stored_centering], face.pose.offset["face"], centering="face") return mask.mask.squeeze()
Obtain the mask for the correct "face" centering that is used in the thumbnail display. Parameters ----------- detected_face: :class:`lib.align.DetectedFace` The Detected Face object to obtain the mask for mask_type: str The type of mask to obtain Returns ------- :class:`numpy.ndarray` or ``None`` The single channel mask of requested mask type, if it exists, otherwise ``None``
54
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _obtain_mask(cls, detected_face, mask_type): mask = detected_face.mask.get(mask_type) if not mask: return None if mask.stored_centering != "face": face = AlignedFace(detected_face.landmarks_xy) mask.set_sub_crop(face.pose.offset[mask.stored_centering], face.pose.offset["face"], centering="face") return mask.mask.squeeze() ``` ###Assistant : Obtain the mask for the correct "face" centering that is used in the thumbnail display. Parameters ----------- detected_face: :class:`lib.align.DetectedFace` The Detected Face object to obtain the mask for mask_type: str The type of mask to obtain Returns ------- :class:`numpy.ndarray` or ``None`` The single channel mask of requested mask type, if it exists, otherwise ``None``
1,532
def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight
Args: input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence. mask (paddle.Tensor) of shape (batch, seq_len) : Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. Defaults to `None`.
45
104
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight ``` ###Assistant : Args: input (paddle.Tensor) of shape (batch, seq_len, input_size): Tensor containing the features of the input sequence. mask (paddle.Tensor) of shape (batch, seq_len) : Tensor is a bool tensor, whose each element identifies whether the input word id is pad token or not. Defaults to `None`.
1,533
def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results
Test without augmentaton. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w).
55
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def simple_test(self, feats, img_metas, **kwargs): all_cls_scores, all_mask_preds = self(feats, img_metas) mask_cls_results = all_cls_scores[-1] mask_pred_results = all_mask_preds[-1] # upsample masks img_shape = img_metas[0]['batch_input_shape'] mask_pred_results = F.interpolate( mask_pred_results, size=(img_shape[0], img_shape[1]), mode='bilinear', align_corners=False) return mask_cls_results, mask_pred_results ``` ###Assistant : Test without augmentaton. Args: feats (list[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. img_metas (list[dict]): List of image information. Returns: tuple: A tuple contains two tensors. - mask_cls_results (Tensor): Mask classification logits,\ shape (batch_size, num_queries, cls_out_channels). Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape \ (batch_size, num_queries, h, w).
1,534
def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, "") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f"{normalCol} {fns}" if name != "<module>": item += f" in {nameCol}{name}{normalCol}\n" else: item += "\n" if line: item += f"{lineCol} {line.strip()}{normalCol}\n" list.append(item) return list
Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py
75
76
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _format_list(self, extracted_list): Colors = self.Colors list = [] for ind, (filename, lineno, name, line) in enumerate(extracted_list): normalCol, nameCol, fileCol, lineCol = ( # Emphasize the last entry (Colors.normalEm, Colors.nameEm, Colors.filenameEm, Colors.line) if ind == len(extracted_list) - 1 else (Colors.Normal, Colors.name, Colors.filename, "") ) fns = _format_filename(filename, fileCol, normalCol, lineno=lineno) item = f"{normalCol} {fns}" if name != "<module>": item += f" in {nameCol}{name}{normalCol}\n" else: item += "\n" if line: item += f"{lineCol} {line.strip()}{normalCol}\n" list.append(item) return list ``` ###Assistant : Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. Lifted almost verbatim from traceback.py
1,535
def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError("Variables %s do not match args: %s" % (vars, f)) order = max([len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs] + [0]) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) new_eq = Eq(eq, 0) if isinstance(new_eq, Eq): eqns.append(new_eq) return eqns
Find the Euler-Lagrange equations [1]_ for a given Lagrangian. Parameters ========== L : Expr The Lagrangian that should be a function of the functions listed in the second argument and their derivatives. For example, in the case of two functions `f(x,y)`, `g(x,y)` and two independent variables `x`, `y` the Lagrangian would have the form: .. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x}, \frac{\partial f(x,y)}{\partial y}, \frac{\partial g(x,y)}{\partial x}, \frac{\partial g(x,y)}{\partial y},x,y\right) In many cases it is not necessary to provide anything, except the Lagrangian, it will be auto-detected (and an error raised if this couldn't be done). funcs : Function or an iterable of Functions The functions that the Lagrangian depends on. The Euler equations are differential equations for each of these functions. vars : Symbol or an iterable of Symbols The Symbols that are the independent variables of the functions. Returns ======= eqns : list of Eq The list of differential equations, one for each function. Examples ======== >>> from sympy import euler_equations, Symbol, Function >>> x = Function('x') >>> t = Symbol('t') >>> L = (x(t).diff(t))**2/2 - x(t)**2/2 >>> euler_equations(L, x(t), t) [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)] >>> u = Function('u') >>> x = Symbol('x') >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2 >>> euler_equations(L, u(t, x), [t, x]) [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)] References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
224
146
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def euler_equations(L, funcs=(), vars=()): r funcs = tuple(funcs) if iterable(funcs) else (funcs,) if not funcs: funcs = tuple(L.atoms(Function)) else: for f in funcs: if not isinstance(f, Function): raise TypeError('Function expected, got: %s' % f) vars = tuple(vars) if iterable(vars) else (vars,) if not vars: vars = funcs[0].args else: vars = tuple(sympify(var) for var in vars) if not all(isinstance(v, Symbol) for v in vars): raise TypeError('Variables are not symbols, got %s' % vars) for f in funcs: if not vars == f.args: raise ValueError("Variables %s do not match args: %s" % (vars, f)) order = max([len(d.variables) for d in L.atoms(Derivative) if d.expr in funcs] + [0]) eqns = [] for f in funcs: eq = diff(L, f) for i in range(1, order + 1): for p in combinations_with_replacement(vars, i): eq = eq + S.NegativeOne**i*diff(L, diff(f, *p), *p) new_eq = Eq(eq, 0) if isinstance(new_eq, Eq): eqns.append(new_eq) return eqns ``` ###Assistant : Find the Euler-Lagrange equations [1]_ for a given Lagrangian. Parameters ========== L : Expr The Lagrangian that should be a function of the functions listed in the second argument and their derivatives. For example, in the case of two functions `f(x,y)`, `g(x,y)` and two independent variables `x`, `y` the Lagrangian would have the form: .. math:: L\left(f(x,y),g(x,y),\frac{\partial f(x,y)}{\partial x}, \frac{\partial f(x,y)}{\partial y}, \frac{\partial g(x,y)}{\partial x}, \frac{\partial g(x,y)}{\partial y},x,y\right) In many cases it is not necessary to provide anything, except the Lagrangian, it will be auto-detected (and an error raised if this couldn't be done). funcs : Function or an iterable of Functions The functions that the Lagrangian depends on. The Euler equations are differential equations for each of these functions. vars : Symbol or an iterable of Symbols The Symbols that are the independent variables of the functions. Returns ======= eqns : list of Eq The list of differential equations, one for each function. Examples ======== >>> from sympy import euler_equations, Symbol, Function >>> x = Function('x') >>> t = Symbol('t') >>> L = (x(t).diff(t))**2/2 - x(t)**2/2 >>> euler_equations(L, x(t), t) [Eq(-x(t) - Derivative(x(t), (t, 2)), 0)] >>> u = Function('u') >>> x = Symbol('x') >>> L = (u(t, x).diff(t))**2/2 - (u(t, x).diff(x))**2/2 >>> euler_equations(L, u(t, x), [t, x]) [Eq(-Derivative(u(t, x), (t, 2)) + Derivative(u(t, x), (x, 2)), 0)] References ========== .. [1] https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
1,536
def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine values self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)]) else: self.extra_update_fields[key] = value
Stash fields that should be saved along with the job status change
12
65
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def delay_update(self, skip_if_already_set=False, **kwargs): for key, value in kwargs.items(): if key in self.extra_update_fields and skip_if_already_set: continue elif key in self.extra_update_fields and key in ('job_explanation', 'result_traceback'): if str(value) in self.extra_update_fields.get(key, ''): continue # if already set, avoid duplicating messages # In the case of these fields, we do not want to lose any prior information, so combine values self.extra_update_fields[key] = '\n'.join([str(self.extra_update_fields[key]), str(value)]) else: self.extra_update_fields[key] = value ``` ###Assistant : Stash fields that should be saved along with the job status change
1,537
def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix=".tmp", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license.
Return a file-like object pointing to a tmp file next to path. The file is created securely and is ensured to be written to disk after the context reaches its end. kwargs will be passed to tempfile.NamedTemporaryFile to control the way the temporary file will be opened.
47
68
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adjacent_tmp_file(path, **kwargs): # type: (str, **Any) -> Iterator[BinaryIO] with NamedTemporaryFile( delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix=".tmp", **kwargs, ) as f: result = cast(BinaryIO, f) try: yield result finally: result.flush() os.fsync(result.fileno()) # Tenacity raises RetryError by default, explictly raise the original exception _replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25)) replace = _replace_retry(os.replace) # test_writable_dir and _test_writable_dir_win are copied from Flit, # with the author's agreement to also place them under pip's license. ``` ###Assistant : Return a file-like object pointing to a tmp file next to path. The file is created securely and is ensured to be written to disk after the context reaches its end. kwargs will be passed to tempfile.NamedTemporaryFile to control the way the temporary file will be opened.
1,538
def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, "wb") as devnull: try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content)
Get the information items from the lsb_release command output. Returns: A dictionary containing all information items.
16
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _lsb_release_info(self): # type: () -> Dict[str, str] if not self.include_lsb: return {} with open(os.devnull, "wb") as devnull: try: cmd = ("lsb_release", "-a") stdout = subprocess.check_output(cmd, stderr=devnull) # Command not found or lsb_release returned error except (OSError, subprocess.CalledProcessError): return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) ``` ###Assistant : Get the information items from the lsb_release command output. Returns: A dictionary containing all information items.
1,539
def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable["uses"] = f"{node.uses.__module__}.{node.uses.__name__}" serializable_graph_schema["nodes"][node_name] = serializable return serializable_graph_schema
Returns graph schema in a serializable format. Returns: The graph schema in a format which can be dumped as JSON or other formats.
23
35
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable["uses"] = f"{node.uses.__module__}.{node.uses.__name__}" serializable_graph_schema["nodes"][node_name] = serializable return serializable_graph_schema ``` ###Assistant : Returns graph schema in a serializable format. Returns: The graph schema in a format which can be dumped as JSON or other formats.
1,540
def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state
Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True.
15
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_project_state(self, with_applied_migrations=False): state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state ``` ###Assistant : Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True.
1,541
def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', )
Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``).
33
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def revoke(state, task_id, terminate=False, signal=None, **kwargs): # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None task_ids = _revoke(state, task_ids, terminate, signal, **kwargs) return ok(f'tasks {task_ids} flagged as revoked') @control_command( variadic='headers', signature='[key1=value1 [key2=value2 [... [keyN=valueN]]]]', ) ``` ###Assistant : Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``).
1,542
def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n
Swap two rows of a CSC matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped.
56
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inplace_swap_row_csc(X, m, n): for t in [m, n]: if isinstance(t, np.ndarray): raise TypeError("m and n should be valid integers") if m < 0: m += X.shape[0] if n < 0: n += X.shape[0] m_mask = X.indices == m X.indices[X.indices == n] = m X.indices[m_mask] = n ``` ###Assistant : Swap two rows of a CSC matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped.
1,543
def test_render_empty_table(self): block = TableBlock() result = block.render( { "first_row_is_table_header": False, "first_col_is_header": False, "data": [[None, None, None], [None, None, None], [None, None, None]], } ) expected = self.assertHTMLEqual(result, expected)
An empty table should render okay. <table> <tbody> <tr><td></td><td></td><td></td></tr> <tr><td></td><td></td><td></td></tr> <tr><td></td><td></td><td></td></tr> </tbody> </table>
13
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_render_empty_table(self): block = TableBlock() result = block.render( { "first_row_is_table_header": False, "first_col_is_header": False, "data": [[None, None, None], [None, None, None], [None, None, None]], } ) expected = self.assertHTMLEqual(result, expected) ``` ###Assistant : An empty table should render okay. <table> <tbody> <tr><td></td><td></td><td></td></tr> <tr><td></td><td></td><td></td></tr> <tr><td></td><td></td><td></td></tr> </tbody> </table>
1,544
def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil)
Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis. Examples ======== >>> from sympy import pi, rot_axis3 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis3(theta) Matrix([ [ 1/2, sqrt(3)/2, 0], [-sqrt(3)/2, 1/2, 0], [ 0, 0, 1]]) If we rotate by pi/2 (90 degrees): >>> rot_axis3(pi/2) Matrix([ [ 0, 1, 0], [-1, 0, 0], [ 0, 0, 1]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis
100
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), (-st, ct, 0), (0, 0, 1)) return Matrix(lil) ``` ###Assistant : Returns a rotation matrix for a rotation of theta (in radians) about the 3-axis. Examples ======== >>> from sympy import pi, rot_axis3 A rotation of pi/3 (60 degrees): >>> theta = pi/3 >>> rot_axis3(theta) Matrix([ [ 1/2, sqrt(3)/2, 0], [-sqrt(3)/2, 1/2, 0], [ 0, 0, 1]]) If we rotate by pi/2 (90 degrees): >>> rot_axis3(pi/2) Matrix([ [ 0, 1, 0], [-1, 0, 0], [ 0, 0, 1]]) See Also ======== rot_axis1: Returns a rotation matrix for a rotation of theta (in radians) about the 1-axis rot_axis2: Returns a rotation matrix for a rotation of theta (in radians) about the 2-axis
1,545
def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0')
Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(3, start='100').current '100'
21
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def current(self): rv = self._current or '0' if not isinstance(rv, str): rv = bin(rv)[2:] return rv.rjust(self.n, '0') ``` ###Assistant : Returns the currently referenced Gray code as a bit string. Examples ======== >>> from sympy.combinatorics import GrayCode >>> GrayCode(3, start='100').current '100'
1,546
def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x = tf.nn.conv2d_transpose( # x, # w, # output_shape=output_shape, # strides=stride, # padding='VALID', # data_format=data_format) # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
139
210
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) output_padding = ( output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = _shape(x, 1) // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) # Original TF code. # x = tf.nn.conv2d_transpose( # x, # w, # output_shape=output_shape, # strides=stride, # padding='VALID', # data_format=data_format) # JAX equivalent return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) ``` ###Assistant : Fused `upsample_2d()` followed by `tf.nn.conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
1,547
def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf
Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF.
24
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, size=None): if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf ``` ###Assistant : Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF.
1,548
def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, "include_deleted": self._include_deleted, } return {}
State getter, get current state and serialize it to emmit Airbyte STATE message
13
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, "include_deleted": self._include_deleted, } return {} ``` ###Assistant : State getter, get current state and serialize it to emmit Airbyte STATE message
1,549
def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory
Set a task factory that will be used by loop.create_task(). If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching '(loop, coro)', where 'loop' will be a reference to the active event loop, 'coro' will be a coroutine object. The callable must return a Future.
57
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_task_factory(self, factory): if factory is not None and not callable(factory): raise TypeError('task factory must be a callable or None') self._task_factory = factory ``` ###Assistant : Set a task factory that will be used by loop.create_task(). If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching '(loop, coro)', where 'loop' will be a reference to the active event loop, 'coro' will be a coroutine object. The callable must return a Future.
1,550
def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="date")
Returns numpy array of python :class:`datetime.date` objects. Namely, the date part of Timestamps without time and timezone information.
18
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def date(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="date") ``` ###Assistant : Returns numpy array of python :class:`datetime.date` objects. Namely, the date part of Timestamps without time and timezone information.
1,551
def apply_transparency(self): if self.mode != "P" or "transparency" not in self.info: return from . import ImagePalette palette = self.getpalette("RGBA") transparency = self.info["transparency"] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette)) self.palette.dirty = 1 del self.info["transparency"]
If a P mode image has a "transparency" key in the info dictionary, remove the key and apply the transparency to the palette instead.
24
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_transparency(self): if self.mode != "P" or "transparency" not in self.info: return from . import ImagePalette palette = self.getpalette("RGBA") transparency = self.info["transparency"] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette)) self.palette.dirty = 1 del self.info["transparency"] ``` ###Assistant : If a P mode image has a "transparency" key in the info dictionary, remove the key and apply the transparency to the palette instead.
1,552
def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/")) return "{}/{}/{}".format(*args)
Returns an asset URL that is unversioned. These assets should have a `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin server before using their locally cached asset. Example: {% frontend_app_asset_url 'sentry' 'sentry.css' %} => "/_static/dist/sentry/sentry.css"
38
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_frontend_app_asset_url(module, key): args = (settings.STATIC_FRONTEND_APP_URL.rstrip("/"), module, key.lstrip("/")) return "{}/{}/{}".format(*args) ``` ###Assistant : Returns an asset URL that is unversioned. These assets should have a `Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin server before using their locally cached asset. Example: {% frontend_app_asset_url 'sentry' 'sentry.css' %} => "/_static/dist/sentry/sentry.css"
1,553
def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype: raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and ' f'b={b.dtype} must be the same dtype,') t = dl.dtype if t not in (np.float32, np.float64): raise ValueError(f'Only f32/f64 are supported, got {t}') return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t) # Schur Decomposition
Computes the solution of a tridiagonal linear system. This function computes the solution of a tridiagonal linear system: .. math:: A . X = B Args: dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``. Note that ``dl[0] = 0``. d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``. du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``. Note that ``dl[m - 1] = 0``. b: Right hand side matrix. Returns: Solution ``X`` of tridiagonal system.
91
139
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tridiagonal_solve(dl, d, du, b): r if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1: raise ValueError('dl, d and du must be vectors') if dl.shape != d.shape or d.shape != du.shape: raise ValueError( f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`') if b.ndim != 2: raise ValueError(f'b={b.shape} must be a matrix') m, = dl.shape if m < 3: raise ValueError(f'm ({m}) must be >= 3') ldb, n = b.shape if ldb < max(1, m): raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})') if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype: raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and ' f'b={b.dtype} must be the same dtype,') t = dl.dtype if t not in (np.float32, np.float64): raise ValueError(f'Only f32/f64 are supported, got {t}') return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t) # Schur Decomposition ``` ###Assistant : Computes the solution of a tridiagonal linear system. This function computes the solution of a tridiagonal linear system: .. math:: A . X = B Args: dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``. Note that ``dl[0] = 0``. d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``. du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``. Note that ``dl[m - 1] = 0``. b: Right hand side matrix. Returns: Solution ``X`` of tridiagonal system.
1,554
def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2", # Other, not imported. "IPython", "pandas_datareader", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors="ignore") result[modname] = get_version(mod) if mod else None return result
Returns dependency information as a JSON serializable dictionary.
8
72
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2", # Other, not imported. "IPython", "pandas_datareader", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors="ignore") result[modname] = get_version(mod) if mod else None return result ``` ###Assistant : Returns dependency information as a JSON serializable dictionary.
1,555
def execute(): company_list = frappe.get_all("Company", filters={"country": "Germany"}) for company in company_list: party_account_list = frappe.get_all( "Party Account", filters={"company": company.name}, fields=["name", "account", "debtor_creditor_number"], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value("Account", party_account.account, "account_number") if not account_number: continue frappe.db.set_value( "Party Account", party_account.name, "debtor_creditor_number", account_number ) frappe.db.set_value("Party Account", party_account.name, "account", "")
Move account number into the new custom field debtor_creditor_number. German companies used to use a dedicated payable/receivable account for every party to mimick party accounts in the external accounting software "DATEV". This is no longer necessary. The reference ID for DATEV will be stored in a new custom field "debtor_creditor_number".
50
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): company_list = frappe.get_all("Company", filters={"country": "Germany"}) for company in company_list: party_account_list = frappe.get_all( "Party Account", filters={"company": company.name}, fields=["name", "account", "debtor_creditor_number"], ) for party_account in party_account_list: if (not party_account.account) or party_account.debtor_creditor_number: # account empty or debtor_creditor_number already filled continue account_number = frappe.db.get_value("Account", party_account.account, "account_number") if not account_number: continue frappe.db.set_value( "Party Account", party_account.name, "debtor_creditor_number", account_number ) frappe.db.set_value("Party Account", party_account.name, "account", "") ``` ###Assistant : Move account number into the new custom field debtor_creditor_number. German companies used to use a dedicated payable/receivable account for every party to mimick party accounts in the external accounting software "DATEV". This is no longer necessary. The reference ID for DATEV will be stored in a new custom field "debtor_creditor_number".
1,556
def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32)
compute pitch and return a numpy array of pitch values
10
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def compute_or_load(self, wav_file): pitch_file = self.create_pitch_file_path(wav_file, self.cache_path) if not os.path.exists(pitch_file): pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file) else: pitch = np.load(pitch_file) return pitch.astype(np.float32) ``` ###Assistant : compute pitch and return a numpy array of pitch values
1,557
def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False
Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags.
55
272
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def check_output(self, want, got, optionflags): # Handle the common case first, for efficiency: # if they're string-identical, always return true. if got == want: return True # TODO parse integers as well ? # Parse floats and compare them. If some of the parsed floats contain # ellipses, skip the comparison. matches = self.num_got_rgx.finditer(got) numbers_got = [match.group(1) for match in matches] # list of strs matches = self.num_want_rgx.finditer(want) numbers_want = [match.group(1) for match in matches] # list of strs if len(numbers_got) != len(numbers_want): return False if len(numbers_got) > 0: nw_ = [] for ng, nw in zip(numbers_got, numbers_want): if '...' in nw: nw_.append(ng) continue else: nw_.append(nw) if abs(float(ng)-float(nw)) > 1e-5: return False got = self.num_got_rgx.sub(r'%s', got) got = got % tuple(nw_) # <BLANKLINE> can be used as a special sequence to signify a # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE): # Replace <BLANKLINE> in want with a blank line. want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True # This flag causes doctest to ignore any differences in the # contents of whitespace strings. Note that this can be used # in conjunction with the ELLIPSIS flag. if optionflags & pdoctest.NORMALIZE_WHITESPACE: got = ' '.join(got.split()) want = ' '.join(want.split()) if got == want: return True # The ELLIPSIS flag says to let the sequence "..." in `want` # match any substring in `got`. if optionflags & pdoctest.ELLIPSIS: if pdoctest._ellipsis_match(want, got): return True # We didn't find any match; return false. return False ``` ###Assistant : Return True iff the actual output from an example (`got`) matches the expected output (`want`). These strings are always considered to match if they are identical; but depending on what option flags the test runner is using, several non-exact match types are also possible. See the documentation for `TestRunner` for more information about option flags.
1,558
def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state
Start this task on the provided workflow state by creating an instance of TaskState
14
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state ``` ###Assistant : Start this task on the provided workflow state by creating an instance of TaskState
1,559
def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result["Belegdatum"] = pd.to_datetime(result["Belegdatum"]) result["Beleginfo - Inhalt 6"] = pd.to_datetime(result["Beleginfo - Inhalt 6"]) result["Beleginfo - Inhalt 6"] = result["Beleginfo - Inhalt 6"].dt.strftime("%d%m%Y") result["Fälligkeit"] = pd.to_datetime(result["Fälligkeit"]) result["Fälligkeit"] = result["Fälligkeit"].dt.strftime("%d%m%y") result.sort_values(by="Belegdatum", inplace=True, kind="stable", ignore_index=True) if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES: result["Sprach-ID"] = "de-DE" data = result.to_csv( # Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035 sep=";", # European decimal seperator decimal=",", # Windows "ANSI" encoding encoding="latin_1", # format date as DDMM date_format="%d%m", # Windows line terminator line_terminator="\r\n", # Do not number rows index=False, # Use all columns defined above columns=csv_class.COLUMNS, # Quote most fields, even currency values with "," separator quoting=QUOTE_NONNUMERIC, ) data = data.encode("latin_1", errors="replace") header = get_header(filters, csv_class) header = ";".join(header).encode("latin_1", errors="replace") # 1st Row: Header with meta data # 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here. # 3rd - nth Row: Data (Nutzdaten) return header + b"\r\n" + data
Fill in missing columns and return a CSV in DATEV Format. For automatic processing, DATEV requires the first line of the CSV file to hold meta data such as the length of account numbers oder the category of the data. Arguments: data -- array of dictionaries filters -- dict csv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS
56
155
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_datev_csv(data, filters, csv_class): empty_df = pd.DataFrame(columns=csv_class.COLUMNS) data_df = pd.DataFrame.from_records(data) result = empty_df.append(data_df, sort=True) if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS: result["Belegdatum"] = pd.to_datetime(result["Belegdatum"]) result["Beleginfo - Inhalt 6"] = pd.to_datetime(result["Beleginfo - Inhalt 6"]) result["Beleginfo - Inhalt 6"] = result["Beleginfo - Inhalt 6"].dt.strftime("%d%m%Y") result["Fälligkeit"] = pd.to_datetime(result["Fälligkeit"]) result["Fälligkeit"] = result["Fälligkeit"].dt.strftime("%d%m%y") result.sort_values(by="Belegdatum", inplace=True, kind="stable", ignore_index=True) if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES: result["Sprach-ID"] = "de-DE" data = result.to_csv( # Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035 sep=";", # European decimal seperator decimal=",", # Windows "ANSI" encoding encoding="latin_1", # format date as DDMM date_format="%d%m", # Windows line terminator line_terminator="\r\n", # Do not number rows index=False, # Use all columns defined above columns=csv_class.COLUMNS, # Quote most fields, even currency values with "," separator quoting=QUOTE_NONNUMERIC, ) data = data.encode("latin_1", errors="replace") header = get_header(filters, csv_class) header = ";".join(header).encode("latin_1", errors="replace") # 1st Row: Header with meta data # 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here. # 3rd - nth Row: Data (Nutzdaten) return header + b"\r\n" + data ``` ###Assistant : Fill in missing columns and return a CSV in DATEV Format. For automatic processing, DATEV requires the first line of the CSV file to hold meta data such as the length of account numbers oder the category of the data. Arguments: data -- array of dictionaries filters -- dict csv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS
1,560
def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
Represents Circular Symplectic Ensembles. Examples ======== >>> from sympy.stats import CircularSymplecticEnsemble as CSE >>> from sympy.stats import joint_eigen_distribution >>> C = CSE('S', 1) >>> joint_eigen_distribution(C) Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi)) Note ==== As can be seen above in the example, density of CiruclarSymplecticEnsemble is not evaluated because the exact definition is based on haar measure of unitary group which is not unique.
69
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def CircularSymplecticEnsemble(sym, dim): sym, dim = _symbol_converter(sym), _sympify(dim) model = CircularSymplecticEnsembleModel(sym, dim) rmp = RandomMatrixPSpace(sym, model=model) return RandomMatrixSymbol(sym, dim, dim, pspace=rmp) ``` ###Assistant : Represents Circular Symplectic Ensembles. Examples ======== >>> from sympy.stats import CircularSymplecticEnsemble as CSE >>> from sympy.stats import joint_eigen_distribution >>> C = CSE('S', 1) >>> joint_eigen_distribution(C) Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi)) Note ==== As can be seen above in the example, density of CiruclarSymplecticEnsemble is not evaluated because the exact definition is based on haar measure of unitary group which is not unique.
1,561
def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False
The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects. If the installed library version has changed since the object was originally pickled, due to the underlying ``__eq__`` method on these objects (which converts them to JSON), we may encounter attribute errors. In this case we should replace the stored object.
53
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _executor_config_comparator(x, y): try: return x == y except AttributeError: return False ``` ###Assistant : The TaskInstance.executor_config attribute is a pickled object that may contain kubernetes objects. If the installed library version has changed since the object was originally pickled, due to the underlying ``__eq__`` method on these objects (which converts them to JSON), we may encounter attribute errors. In this case we should replace the stored object.
1,562
def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ( 0, 0, 0, 128, 0, ) # Warmest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == ( 0, 0, 0, 0, 255, ) assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == ( 0, 0, 0, 0, 128, ) # Warmer than mid point color temperature -> More warm than cold channel enabled assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == ( 0, 0, 0, 112, 143, ) assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == ( 0, 0, 0, 56, 72, )
Test color temp to warm, cold conversion. Temperature values must be in mireds Home Assistant uses rgbcw for rgbww
19
112
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_color_temperature_to_rgbww(): # Coldest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(6535, 255, 2000, 6535) == ( 0, 0, 0, 255, 0, ) assert color_util.color_temperature_to_rgbww(6535, 128, 2000, 6535) == ( 0, 0, 0, 128, 0, ) # Warmest color temperature -> only cold channel enabled assert color_util.color_temperature_to_rgbww(2000, 255, 2000, 6535) == ( 0, 0, 0, 0, 255, ) assert color_util.color_temperature_to_rgbww(2000, 128, 2000, 6535) == ( 0, 0, 0, 0, 128, ) # Warmer than mid point color temperature -> More warm than cold channel enabled assert color_util.color_temperature_to_rgbww(2881, 255, 2000, 6535) == ( 0, 0, 0, 112, 143, ) assert color_util.color_temperature_to_rgbww(2881, 128, 2000, 6535) == ( 0, 0, 0, 56, 72, ) ``` ###Assistant : Test color temp to warm, cold conversion. Temperature values must be in mireds Home Assistant uses rgbcw for rgbww
1,563
def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( "TestSessionID", fake_enqueue, "", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = "foo" msg = ForwardMsg() msg.page_config_changed.title = "foo" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg)
st.set_page_config must be called before other st commands when the script has been marked as started
16
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_set_page_config_first(self): fake_enqueue = lambda msg: None ctx = ScriptRunContext( "TestSessionID", fake_enqueue, "", SessionState(), UploadedFileManager(), ) ctx.on_script_start() markdown_msg = ForwardMsg() markdown_msg.delta.new_element.markdown.body = "foo" msg = ForwardMsg() msg.page_config_changed.title = "foo" ctx.enqueue(markdown_msg) with self.assertRaises(StreamlitAPIException): ctx.enqueue(msg) ``` ###Assistant : st.set_page_config must be called before other st commands when the script has been marked as started
1,564
def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars["smoothgraph"] = smoothgraph raw_var = tk.BooleanVar() raw_var.set(True) tk_vars["raw_data"] = raw_var smooth_var = tk.BooleanVar() smooth_var.set(True) tk_vars["smooth_data"] = smooth_var iterations_var = tk.IntVar() iterations_var.set(10000) tk_vars["display_iterations"] = iterations_var logger.debug(tk_vars) return tk_vars
Add graphing specific variables to the default variables. Overrides original method. Returns ------- dict The variable names with their corresponding tkinter variable
22
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_vars(self) -> None: tk_vars = super().set_vars() smoothgraph = tk.DoubleVar() smoothgraph.set(0.900) tk_vars["smoothgraph"] = smoothgraph raw_var = tk.BooleanVar() raw_var.set(True) tk_vars["raw_data"] = raw_var smooth_var = tk.BooleanVar() smooth_var.set(True) tk_vars["smooth_data"] = smooth_var iterations_var = tk.IntVar() iterations_var.set(10000) tk_vars["display_iterations"] = iterations_var logger.debug(tk_vars) return tk_vars ``` ###Assistant : Add graphing specific variables to the default variables. Overrides original method. Returns ------- dict The variable names with their corresponding tkinter variable
1,565
def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, allowing # any further ConfigExceptions to bubble up. try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config()
Set the correct kubernetes client configuration. WARNING: This action is not threadsafe and may override the configuration specified by another `KubernetesJob` instance.
22
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _configure_kubernetes_library_client(self) -> None: # TODO: Investigate returning a configured client so calls on other threads # will not invalidate the config needed here # if a k8s cluster block is provided to the flow runner, use that if self.cluster_config: self.cluster_config.configure_client() else: # If no block specified, try to load Kubernetes configuration within a cluster. If that doesn't # work, try to load the configuration from the local environment, allowing # any further ConfigExceptions to bubble up. try: kubernetes.config.load_incluster_config() except kubernetes.config.ConfigException: kubernetes.config.load_kube_config() ``` ###Assistant : Set the correct kubernetes client configuration. WARNING: This action is not threadsafe and may override the configuration specified by another `KubernetesJob` instance.
1,566
def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name="Pied Piper", organization=project.organization, schema={"elements": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug="pied-piper", organization=project.organization ) actions = [ { "id": "sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction", "settings": [ {"name": "title", "value": "Team Rocket"}, {"name": "summary", "value": "We're blasting off again."}, ], "sentryAppInstallationUuid": install.uuid, "hasSchemaFormConfig": True, }, ] url = reverse( "sentry-api-0-project-rules", kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug}, ) response = self.client.post( url, data={ "name": "my super cool rule", "owner": f"user:{self.user.id}", "conditions": [], "filters": [], "actions": actions, "filterMatch": "any", "actionMatch": "any", "frequency": 30, }, format="json", ) assert response.status_code == 200, response.content assert response.data["id"] rule = Rule.objects.get(id=response.data["id"]) assert rule.data["actions"] == actions kwargs = { "install": install, "fields": actions[0].get("settings"), } call_kwargs = mock_alert_rule_action_creator.call_args[1] assert call_kwargs["install"].id == kwargs["install"].id assert call_kwargs["fields"] == kwargs["fields"]
Ensures that Sentry Apps with schema forms (UI components) receive a payload when an alert rule is created with them.
20
116
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_runs_alert_rule_action_creator(self, mock_alert_rule_action_creator): self.login_as(user=self.user) project = self.create_project() self.create_sentry_app( name="Pied Piper", organization=project.organization, schema={"elements": [self.create_alert_rule_action_schema()]}, ) install = self.create_sentry_app_installation( slug="pied-piper", organization=project.organization ) actions = [ { "id": "sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction", "settings": [ {"name": "title", "value": "Team Rocket"}, {"name": "summary", "value": "We're blasting off again."}, ], "sentryAppInstallationUuid": install.uuid, "hasSchemaFormConfig": True, }, ] url = reverse( "sentry-api-0-project-rules", kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug}, ) response = self.client.post( url, data={ "name": "my super cool rule", "owner": f"user:{self.user.id}", "conditions": [], "filters": [], "actions": actions, "filterMatch": "any", "actionMatch": "any", "frequency": 30, }, format="json", ) assert response.status_code == 200, response.content assert response.data["id"] rule = Rule.objects.get(id=response.data["id"]) assert rule.data["actions"] == actions kwargs = { "install": install, "fields": actions[0].get("settings"), } call_kwargs = mock_alert_rule_action_creator.call_args[1] assert call_kwargs["install"].id == kwargs["install"].id assert call_kwargs["fields"] == kwargs["fields"] ``` ###Assistant : Ensures that Sentry Apps with schema forms (UI components) receive a payload when an alert rule is created with them.
1,567
def _determine_interval(self) -> int: intervals = {"default": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids("zone")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance( device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], zone_state_lat, zone_state_long, ) if zone_distance is not None: distances.append(round(zone_distance / 1000, 1)) # Max interval if no zone if not distances: continue mindistance = min(distances) # Calculate out how long it would take for the device to drive # to the nearest zone at 120 km/h: interval = round(mindistance / 2) # Never poll more than once per minute interval = max(interval, 1) if interval > 180: # Three hour drive? # This is far enough that they might be flying interval = self._max_interval if ( device.battery_level is not None and device.battery_level <= 33 and mindistance > 3 ): # Low battery - let's check half as often interval = interval * 2 intervals[device.name] = interval return max( int(min(intervals.items(), key=operator.itemgetter(1))[1]), self._max_interval, )
Calculate new interval between two API fetch (in minutes).
9
199
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _determine_interval(self) -> int: intervals = {"default": self._max_interval} for device in self._devices.values(): # Max interval if no location if device.location is None: continue current_zone = run_callback_threadsafe( self.hass.loop, async_active_zone, self.hass, device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY], ).result() # Max interval if in zone if current_zone is not None: continue zones = ( self.hass.states.get(entity_id) for entity_id in sorted(self.hass.states.entity_ids("zone")) ) distances = [] for zone_state in zones: if zone_state is None: continue zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE] zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE] zone_distance = distance( device.location[DEVICE_LOCATION_LATITUDE], device.location[DEVICE_LOCATION_LONGITUDE], zone_state_lat, zone_state_long, ) if zone_distance is not None: distances.append(round(zone_distance / 1000, 1)) # Max interval if no zone if not distances: continue mindistance = min(distances) # Calculate out how long it would take for the device to drive # to the nearest zone at 120 km/h: interval = round(mindistance / 2) # Never poll more than once per minute interval = max(interval, 1) if interval > 180: # Three hour drive? # This is far enough that they might be flying interval = self._max_interval if ( device.battery_level is not None and device.battery_level <= 33 and mindistance > 3 ): # Low battery - let's check half as often interval = interval * 2 intervals[device.name] = interval return max( int(min(intervals.items(), key=operator.itemgetter(1))[1]), self._max_interval, ) ``` ###Assistant : Calculate new interval between two API fetch (in minutes).
1,568
def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers["Cookie"] = cookie_header
Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand.
66
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_cookies(self, cookies): if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers["Cookie"] = cookie_header ``` ###Assistant : Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand.
1,569
def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s", )
Explicit wait until the element's text contains the expected `text`. timeout if not set, equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with customized `contains_text` condition.
26
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_for_contains_text(self, selector, text, timeout=None): return self._wait_for( method=contains_text, args=(selector, text), timeout=timeout, msg=f"text -> {text} not found inside element within {timeout or self._wait_timeout}s", ) ``` ###Assistant : Explicit wait until the element's text contains the expected `text`. timeout if not set, equals to the fixture's `wait_timeout` shortcut to `WebDriverWait` with customized `contains_text` condition.
1,570
def remove_member(self, label): if label not in list(self._members): raise ValueError("No such member exists in the Truss") else: self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]])) self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]])) self._members.pop(label) self._internal_forces.pop(label)
This method removes a member from the given truss. Parameters ========== label: String or Symbol The label for the member to be removed. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_node('C', 2, 2) >>> t.add_member('AB', 'A', 'B') >>> t.add_member('AC', 'A', 'C') >>> t.add_member('BC', 'B', 'C') >>> t.members {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']} >>> t.remove_member('AC') >>> t.members {'AB': ['A', 'B'], 'BC': ['B', 'C']}
79
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def remove_member(self, label): if label not in list(self._members): raise ValueError("No such member exists in the Truss") else: self._nodes_occupied.pop(tuple([self._members[label][0], self._members[label][1]])) self._nodes_occupied.pop(tuple([self._members[label][1], self._members[label][0]])) self._members.pop(label) self._internal_forces.pop(label) ``` ###Assistant : This method removes a member from the given truss. Parameters ========== label: String or Symbol The label for the member to be removed. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.add_node('C', 2, 2) >>> t.add_member('AB', 'A', 'B') >>> t.add_member('AC', 'A', 'C') >>> t.add_member('BC', 'B', 'C') >>> t.members {'AB': ['A', 'B'], 'AC': ['A', 'C'], 'BC': ['B', 'C']} >>> t.remove_member('AC') >>> t.members {'AB': ['A', 'B'], 'BC': ['B', 'C']}
1,571
def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session
Create a mock AppSession. Each mocked instance will have its own unique ID.
13
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session ``` ###Assistant : Create a mock AppSession. Each mocked instance will have its own unique ID.
1,572
def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield "\n\n" ###################################################################### # { Test Suites ######################################################################
A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = "This is a foobar sentence. Is that right?" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format.
204
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def taggedsents_to_conll(sentences): for sentence in sentences: yield from taggedsent_to_conll(sentence) yield "\n\n" ###################################################################### # { Test Suites ###################################################################### ``` ###Assistant : A module to convert the a POS tagged document stream (i.e. list of list of tuples, a list of sentences) and yield lines in CONLL format. This module yields one line per word and two newlines for end of sentence. >>> from nltk import word_tokenize, sent_tokenize, pos_tag >>> text = "This is a foobar sentence. Is that right?" >>> sentences = [pos_tag(word_tokenize(sent)) for sent in sent_tokenize(text)] >>> for line in taggedsents_to_conll(sentences): # doctest: +NORMALIZE_WHITESPACE ... if line: ... print(line, end="") 1 This _ DT DT _ 0 a _ _ 2 is _ VBZ VBZ _ 0 a _ _ 3 a _ DT DT _ 0 a _ _ 4 foobar _ JJ JJ _ 0 a _ _ 5 sentence _ NN NN _ 0 a _ _ 6 . _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> 1 Is _ VBZ VBZ _ 0 a _ _ 2 that _ IN IN _ 0 a _ _ 3 right _ NN NN _ 0 a _ _ 4 ? _ . . _ 0 a _ _ <BLANKLINE> <BLANKLINE> :param sentences: Input sentences to parse :type sentence: list(list(tuple(str, str))) :rtype: iter(str) :return: a generator yielding sentences in CONLL format.
1,573
def _get_input_shape(self): arch = self.config["enc_architecture"] enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weights", False) default_size = _MODEL_MAPPING[arch]["default_size"] scaling = self.config["enc_scaling"] / 100 min_size = _MODEL_MAPPING[arch].get("min_size", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config["enc_load_weights"] and enforce_size and scaling != 1.0: logger.warning("%s requires input size to be %spx when loading imagenet weights. " "Adjusting input size from %spx to %spx", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug("Encoder input set to: %s", retval) return retval
Obtain the input shape for the model. Input shape is calculated from the selected Encoder's input size, scaled to the user selected Input Scaling, rounded down to the nearest 16 pixels. Notes ----- Some models (NasNet) require the input size to be of a certain dimension if loading imagenet weights. In these instances resize inputs and raise warning message Returns ------- tuple The shape tuple for the input size to the Phaze-A model
73
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_shape(self): arch = self.config["enc_architecture"] enforce_size = _MODEL_MAPPING[arch].get("enforce_for_weights", False) default_size = _MODEL_MAPPING[arch]["default_size"] scaling = self.config["enc_scaling"] / 100 min_size = _MODEL_MAPPING[arch].get("min_size", 32) size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16))) if self.config["enc_load_weights"] and enforce_size and scaling != 1.0: logger.warning("%s requires input size to be %spx when loading imagenet weights. " "Adjusting input size from %spx to %spx", arch, default_size, size, default_size) retval = (default_size, default_size, 3) else: retval = (size, size, 3) logger.debug("Encoder input set to: %s", retval) return retval ``` ###Assistant : Obtain the input shape for the model. Input shape is calculated from the selected Encoder's input size, scaled to the user selected Input Scaling, rounded down to the nearest 16 pixels. Notes ----- Some models (NasNet) require the input size to be of a certain dimension if loading imagenet weights. In these instances resize inputs and raise warning message Returns ------- tuple The shape tuple for the input size to the Phaze-A model
1,574
def get_form_options(self): options = {} if not getattr(self.widget_overrides, "is_original_method", False): warn( "The `widget_overrides` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`widgets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["widgets"] = self.widget_overrides() if not getattr(self.required_fields, "is_original_method", False): warn( "The `required_fields` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`fields` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["fields"] = self.required_fields() if not getattr(self.required_formsets, "is_original_method", False): warn( "The `required_formsets` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`formsets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["formsets"] = self.required_formsets() if not getattr(self.field_permissions, "is_original_method", False): warn( "The `field_permissions` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`field_permissions` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["field_permissions"] = self.field_permissions() return options # RemovedInWagtail219Warning - edit handlers should override get_form_options instead
Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets' which should be incorporated into the form class definition to generate a form that this EditHandler can use. This will only be called after binding to a model (i.e. self.model is available).
43
148
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_form_options(self): options = {} if not getattr(self.widget_overrides, "is_original_method", False): warn( "The `widget_overrides` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`widgets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["widgets"] = self.widget_overrides() if not getattr(self.required_fields, "is_original_method", False): warn( "The `required_fields` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`fields` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["fields"] = self.required_fields() if not getattr(self.required_formsets, "is_original_method", False): warn( "The `required_formsets` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`formsets` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["formsets"] = self.required_formsets() if not getattr(self.field_permissions, "is_original_method", False): warn( "The `field_permissions` method (on %r) is deprecated; " "these should be returned from `get_form_options` as a " "`field_permissions` item instead." % type(self), category=RemovedInWagtail219Warning, ) options["field_permissions"] = self.field_permissions() return options # RemovedInWagtail219Warning - edit handlers should override get_form_options instead ``` ###Assistant : Return a dictionary of attributes such as 'fields', 'formsets' and 'widgets' which should be incorporated into the form class definition to generate a form that this EditHandler can use. This will only be called after binding to a model (i.e. self.model is available).
1,575
def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates
Convert links that are candidates to InstallationCandidate objects.
8
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def evaluate_links(self, link_evaluator, links): # type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate] candidates = [] for link in self._sort_links(links): candidate = self.get_install_candidate(link_evaluator, link) if candidate is not None: candidates.append(candidate) return candidates ``` ###Assistant : Convert links that are candidates to InstallationCandidate objects.
1,576
def columnarize(self): if len(self.columns) != 1 or ( len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL ): return self.transpose() return self
Transpose this QueryCompiler if it has a single row but multiple columns. This method should be called for QueryCompilers representing a Series object, i.e. ``self.is_series_like()`` should be True. Returns ------- BaseQueryCompiler Transposed new QueryCompiler or self.
36
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def columnarize(self): if len(self.columns) != 1 or ( len(self.index) == 1 and self.index[0] == MODIN_UNNAMED_SERIES_LABEL ): return self.transpose() return self ``` ###Assistant : Transpose this QueryCompiler if it has a single row but multiple columns. This method should be called for QueryCompilers representing a Series object, i.e. ``self.is_series_like()`` should be True. Returns ------- BaseQueryCompiler Transposed new QueryCompiler or self.
1,577
def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.file_tags)
Return the wheel's tags as a sorted list of strings.
10
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_formatted_file_tags(self): # type: () -> List[str] return sorted(str(tag) for tag in self.file_tags) ``` ###Assistant : Return the wheel's tags as a sorted list of strings.
1,578
def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer="python", line_numbers=True, line_range=(2, 10), theme="monokai", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n╰─────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected
Iterate and generate a tuple with a flag for first \x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34mand last value.
15
85
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_python_render(): syntax = Panel.fit( Syntax( CODE, lexer="python", line_numbers=True, line_range=(2, 10), theme="monokai", code_width=60, word_wrap=True, ), padding=0, ) rendered_syntax = render(syntax) print(repr(rendered_syntax)) expected = '╭─────────────────────────────────────────────────────────────────╮\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mfirst\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n╰─────────────────────────────────────────────────────────────────╯\n' assert rendered_syntax == expected ``` ###Assistant : Iterate and generate a tuple with a flag for first \x1b[0m\x1b[48;2;39;40;34m \x1b[0m│\n│\x1b[48;2;39;40;34m \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34mand last value.
1,579
def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f"sensor.{i}": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value
Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results.
24
73
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_state_changes_during_period_multiple_entities_single_test(hass_recorder): hass = hass_recorder() start = dt_util.utcnow() test_entites = {f"sensor.{i}": str(i) for i in range(30)} for entity_id, value in test_entites.items(): hass.states.set(entity_id, value) wait_recording_done(hass) end = dt_util.utcnow() hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value for entity_id, value in test_entites.items(): hist = history.state_changes_during_period(hass, start, end, entity_id) assert len(hist) == 1 hist[entity_id][0].state == value hist = history.state_changes_during_period(hass, start, end, None) for entity_id, value in test_entites.items(): hist[entity_id][0].state == value ``` ###Assistant : Test state change during period with multiple entities in the same test. This test ensures the sqlalchemy query cache does not generate incorrect results.
1,580
def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" seconds = self.Z() sign = "-" if seconds < 0 else "+" seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string.
19
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def O(self): # NOQA: E743, E741 if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" seconds = self.Z() sign = "-" if seconds < 0 else "+" seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60) ``` ###Assistant : Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string.
1,581
def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 10**18, 'Pi': lambda x: x * 2**50, 'P': lambda x: x * 10**15, 'Ti': lambda x: x * 2**40, 'T': lambda x: x * 10**12, 'Gi': lambda x: x * 2**30, 'G': lambda x: x * 10**9, 'Mi': lambda x: x * 2**20, 'M': lambda x: x * 10**6, 'Ki': lambda x: x * 2**10, 'K': lambda x: x * 10**3, } mem = 0 mem_unit = None for i, char in enumerate(mem_str): if not char.isdigit(): mem_unit = mem_str[i:] mem = int(mem_str[:i]) break if not mem_unit or mem_unit not in conversions.keys(): error = f"Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte" logger.warning(error) return 1 return max(1, conversions[mem_unit](mem))
Convert string with suffix indicating units to memory in bytes (base 2) Useful for dealing with memory setting that may be expressed in units compatible with kubernetes. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory
29
155
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_mem_str_to_bytes(mem_str): # If there is no suffix, the memory sourced from the request is in bytes if mem_str.isdigit(): return int(mem_str) conversions = { 'Ei': lambda x: x * 2**60, 'E': lambda x: x * 10**18, 'Pi': lambda x: x * 2**50, 'P': lambda x: x * 10**15, 'Ti': lambda x: x * 2**40, 'T': lambda x: x * 10**12, 'Gi': lambda x: x * 2**30, 'G': lambda x: x * 10**9, 'Mi': lambda x: x * 2**20, 'M': lambda x: x * 10**6, 'Ki': lambda x: x * 2**10, 'K': lambda x: x * 10**3, } mem = 0 mem_unit = None for i, char in enumerate(mem_str): if not char.isdigit(): mem_unit = mem_str[i:] mem = int(mem_str[:i]) break if not mem_unit or mem_unit not in conversions.keys(): error = f"Unsupported value for SYSTEM_TASK_ABS_MEM: {mem_str}, memory must be expressed in bytes or with known suffix: {conversions.keys()}. Falling back to 1 byte" logger.warning(error) return 1 return max(1, conversions[mem_unit](mem)) ``` ###Assistant : Convert string with suffix indicating units to memory in bytes (base 2) Useful for dealing with memory setting that may be expressed in units compatible with kubernetes. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory
1,582
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue["fields"]["project"] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project["id"], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue["fields"]["status"]["id"] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError("Could not reach host to get transitions.") try: transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( "jira.status-sync-fail", extra={ "organization_id": external_issue.organization_id, "integration_id": external_issue.integration_id, "issue_key": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition["id"])
Propagate a sentry issue's status to a linked issue's status.
10
103
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue["fields"]["project"] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project["id"], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue["fields"]["status"]["id"] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError("Could not reach host to get transitions.") try: transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( "jira.status-sync-fail", extra={ "organization_id": external_issue.organization_id, "integration_id": external_issue.integration_id, "issue_key": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition["id"]) ``` ###Assistant : Propagate a sentry issue's status to a linked issue's status.
1,583
def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError("ContourSet must be in current Axes") elif im not in self._children: raise ValueError("Argument must be an image, collection, or " "ContourSet in this Axes") self._current_image = im
Set the current image. This image will be the target of colormap functions like ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The current image is an attribute of the current Axes.
31
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _sci(self, im): _api.check_isinstance( (mpl.contour.ContourSet, mcoll.Collection, mimage.AxesImage), im=im) if isinstance(im, mpl.contour.ContourSet): if im.collections[0] not in self._children: raise ValueError("ContourSet must be in current Axes") elif im not in self._children: raise ValueError("Argument must be an image, collection, or " "ContourSet in this Axes") self._current_image = im ``` ###Assistant : Set the current image. This image will be the target of colormap functions like ``pyplot.viridis``, and other functions such as `~.pyplot.clim`. The current image is an attribute of the current Axes.
1,584
def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError("Inheriting class must implement this method.")
Rebuilder function and optional arguments to contruct a persisted collection. Returns ------- PostPersistCallable Callable that rebuilds the collection. The signature should be ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``. The callable should return an equivalent Dask collection with the same keys as `self`, but with results that are computed through a different graph. In the case of :py:func:`dask.persist`, the new graph will have just the output keys and the values already computed. tuple[Any, ...] Optional arugments passed to the rebuild callable. If no additional arguments are to be passed then this must be an empty tuple.
98
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __dask_postpersist__(self) -> tuple[PostPersistCallable, tuple]: raise NotImplementedError("Inheriting class must implement this method.") ``` ###Assistant : Rebuilder function and optional arguments to contruct a persisted collection. Returns ------- PostPersistCallable Callable that rebuilds the collection. The signature should be ``rebuild(dsk: Mapping, *args: Any, rename: Mapping[str, str] | None)``. The callable should return an equivalent Dask collection with the same keys as `self`, but with results that are computed through a different graph. In the case of :py:func:`dask.persist`, the new graph will have just the output keys and the values already computed. tuple[Any, ...] Optional arugments passed to the rebuild callable. If no additional arguments are to be passed then this must be an empty tuple.
1,585
def execute(): for doctype in ("Sales Order Item", "Bin"): frappe.reload_doctype(doctype) repost_for = frappe.db.sql() for item_code, warehouse in repost_for: if not (item_code and warehouse): continue update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) }) frappe.db.sql()
select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) delete from tabBin where exists( select name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0 )
62
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): for doctype in ("Sales Order Item", "Bin"): frappe.reload_doctype(doctype) repost_for = frappe.db.sql() for item_code, warehouse in repost_for: if not (item_code and warehouse): continue update_bin_qty(item_code, warehouse, { "reserved_qty": get_reserved_qty(item_code, warehouse) }) frappe.db.sql() ``` ###Assistant : select distinct item_code, warehouse from ( ( select distinct item_code, warehouse from `tabSales Order Item` where docstatus=1 ) UNION ( select distinct item_code, warehouse from `tabPacked Item` where docstatus=1 and parenttype='Sales Order' ) ) so_item where exists(select name from tabItem where name=so_item.item_code and ifnull(is_stock_item, 0)=1) delete from tabBin where exists( select name from tabItem where name=tabBin.item_code and ifnull(is_stock_item, 0) = 0 )
1,586
def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model("auth", "Group") schema_editor.alter_db_table( Group, "account_group", "auth_group", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, "account_group_permissions", "auth_group_permissions", ) RENAME_CONSTRAINTS_AND_INDEX_REVERSE = DROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = DROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 =
ALTER TABLE account_group RENAME CONSTRAINT account_group_pkey TO auth_group_pkey; ALTER TABLE account_group RENAME CONSTRAINT account_group_name_key TO auth_group_name_key; ALTER INDEX IF EXISTS account_group_name_034e9f3f_like RENAME TO auth_group_name_a6ea08ec_like; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq UNIQUE (group_id, permission_id); ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_groups ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_user_permissions ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_app_permissions ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appextension_permissions ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appinstallation_permissions ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED;
138
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rename_group_tables_reverse(apps, schema_editor): Group = apps.get_model("auth", "Group") schema_editor.alter_db_table( Group, "account_group", "auth_group", ) PermissionGroup = Group.permissions.through schema_editor.alter_db_table( PermissionGroup, "account_group_permissions", "auth_group_permissions", ) RENAME_CONSTRAINTS_AND_INDEX_REVERSE = DROP_OLD_CONSTRAINTS_REVERSE_FROM_0072 = DROP_OLD_CONSTRAINTS_REVERSE_FROM_APP_0018 = ``` ###Assistant : ALTER TABLE account_group RENAME CONSTRAINT account_group_pkey TO auth_group_pkey; ALTER TABLE account_group RENAME CONSTRAINT account_group_name_key TO auth_group_name_key; ALTER INDEX IF EXISTS account_group_name_034e9f3f_like RENAME TO auth_group_name_a6ea08ec_like; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_permission_id_0cd325b0_uniq UNIQUE (group_id, permission_id); ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissions_group_id_b120cbf9_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE auth_group_permissions ADD CONSTRAINT auth_group_permissio_permission_id_84c5c92e_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_groups ADD CONSTRAINT userprofile_user_groups_group_id_c7eec74e_fk_auth_group_id FOREIGN KEY (group_id) REFERENCES auth_group (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE account_user_user_permissions ADD CONSTRAINT userprofile_user_use_permission_id_1caa8a71_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_app_permissions ADD CONSTRAINT account_serviceaccou_permission_id_449791f0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appextension_permissions ADD CONSTRAINT app_appextension_per_permission_id_cb6c3ce0_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE app_appinstallation_permissions ADD CONSTRAINT app_appinstallation__permission_id_4ee9f6c8_fk_auth_perm FOREIGN KEY (permission_id) REFERENCES auth_permission (id) DEFERRABLE INITIALLY DEFERRED;
1,587
def test_session_is_accessed(self): response = self.client.get("/auth_processor_attr_access/") self.assertContains(response, "Session accessed")
The session is accessed if the auth context processor is used and relevant attributes accessed.
15
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_session_is_accessed(self): response = self.client.get("/auth_processor_attr_access/") self.assertContains(response, "Session accessed") ``` ###Assistant : The session is accessed if the auth context processor is used and relevant attributes accessed.
1,588
def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), "-latest", "-prerelease", "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-property", "installationPath", "-products", "*", ], encoding="mbcs", errors="strict").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, "VC", "Auxiliary", "Build") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' }
Returns "15, path" based on the result of invoking vswhere.exe If no install is found, returns "None, None" The version is returned to avoid unnecessarily changing the function result. It may be ignored when the path is not None. If vswhere.exe is not available, by definition, VS 2017 is not installed.
51
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find_vc2017(): root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") if not root: return None, None try: path = subprocess.check_output([ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"), "-latest", "-prerelease", "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", "-property", "installationPath", "-products", "*", ], encoding="mbcs", errors="strict").strip() except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): return None, None path = os.path.join(path, "VC", "Auxiliary", "Build") if os.path.isdir(path): return 15, path return None, None PLAT_SPEC_TO_RUNTIME = { 'x86' : 'x86', 'x86_amd64' : 'x64', 'x86_arm' : 'arm', 'x86_arm64' : 'arm64' } ``` ###Assistant : Returns "15, path" based on the result of invoking vswhere.exe If no install is found, returns "None, None" The version is returned to avoid unnecessarily changing the function result. It may be ignored when the path is not None. If vswhere.exe is not available, by definition, VS 2017 is not installed.
1,589
def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because they " f"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data
Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved.
65
127
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save_attributes_to_hdf5_group(group, name, data): # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because they " f"are larger than {HDF5_OBJECT_HEADER_LIMIT} bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data ``` ###Assistant : Saves attributes (data) of the specified name into the HDF5 group. This method deals with an inherent problem of HDF5 file which is not able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes. Args: group: A pointer to a HDF5 group. name: A name of the attributes to save. data: Attributes data to store. Raises: RuntimeError: If any single attribute is too large to be saved.
1,590
def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack("BB", _pkt[:2]) s = kargs.get("tls_session", None) if byte0 not in _tls_type or byte1 != 3: # Unknown type # Check SSLv2: either the session is already SSLv2, # either the packet looks like one. As said above, this # isn't 100% reliable, but Wireshark does the same if s and (s.tls_version == 0x0002 or s.advertised_tls_version == 0x0002) or \ (_ssl_looks_like_sslv2(_pkt) and (not s or s.tls_version is None)): from scapy.layers.tls.record_sslv2 import SSLv2 return SSLv2 # Not SSLv2: continuation return _TLSEncryptedContent # Check TLS 1.3 if s and _tls_version_check(s.tls_version, 0x0304): _has_cipher = lambda x: ( x and not isinstance(x.cipher, Cipher_NULL) ) if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \ byte0 == 0x17: from scapy.layers.tls.record_tls13 import TLS13 return TLS13 if plen < 5: # Layer detected as TLS but too small to be a # parsed. Scapy should not try to decode them return _TLSEncryptedContent return TLS # Parsing methods
If the TLS class was called on raw SSLv2 data, we want to return an SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted as SSLv2 records but TLS ones instead, but hey, we can't be held responsible for low-minded extensibility choices.
57
165
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt is not None: plen = len(_pkt) if plen >= 2: byte0, byte1 = struct.unpack("BB", _pkt[:2]) s = kargs.get("tls_session", None) if byte0 not in _tls_type or byte1 != 3: # Unknown type # Check SSLv2: either the session is already SSLv2, # either the packet looks like one. As said above, this # isn't 100% reliable, but Wireshark does the same if s and (s.tls_version == 0x0002 or s.advertised_tls_version == 0x0002) or \ (_ssl_looks_like_sslv2(_pkt) and (not s or s.tls_version is None)): from scapy.layers.tls.record_sslv2 import SSLv2 return SSLv2 # Not SSLv2: continuation return _TLSEncryptedContent # Check TLS 1.3 if s and _tls_version_check(s.tls_version, 0x0304): _has_cipher = lambda x: ( x and not isinstance(x.cipher, Cipher_NULL) ) if (_has_cipher(s.rcs) or _has_cipher(s.prcs)) and \ byte0 == 0x17: from scapy.layers.tls.record_tls13 import TLS13 return TLS13 if plen < 5: # Layer detected as TLS but too small to be a # parsed. Scapy should not try to decode them return _TLSEncryptedContent return TLS # Parsing methods ``` ###Assistant : If the TLS class was called on raw SSLv2 data, we want to return an SSLv2 record instance. We acknowledge the risk of SSLv2 packets with a msglen of 0x1403, 0x1503, 0x1603 or 0x1703 which will never be casted as SSLv2 records but TLS ones instead, but hey, we can't be held responsible for low-minded extensibility choices.
1,591
def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "company": company}, as_dict=1, ) if leave_period: return leave_period
select name, from_date, to_date from `tabLeave Period` where company=%(company)s and is_active=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s))
31
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_leave_period(from_date, to_date, company): leave_period = frappe.db.sql( , {"from_date": from_date, "to_date": to_date, "company": company}, as_dict=1, ) if leave_period: return leave_period ``` ###Assistant : select name, from_date, to_date from `tabLeave Period` where company=%(company)s and is_active=1 and (from_date between %(from_date)s and %(to_date)s or to_date between %(from_date)s and %(to_date)s or (from_date < %(from_date)s and to_date > %(to_date)s))
1,592
def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, )
Returns an iterator over data rows, i.e. dicts with column values. Note that if `seq_lens` is set in self, we set it to [1] in the rows. Yields: The column values of the row in this iteration. Examples: >>> batch = SampleBatch({ ... "a": [1, 2, 3], ... "b": [4, 5, 6], ... "seq_lens": [1, 2] ... }) >>> for row in batch.rows(): print(row) {"a": 1, "b": 4, "seq_lens": [1]} {"a": 2, "b": 5, "seq_lens": [1]} {"a": 3, "b": 6, "seq_lens": [1]}
82
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def rows(self) -> Iterator[Dict[str, TensorType]]: # Do we add seq_lens=[1] to each row? seq_lens = None if self.get(SampleBatch.SEQ_LENS) is None else np.array([1]) self_as_dict = {k: v for k, v in self.items()} for i in range(self.count): yield tree.map_structure_with_path( lambda p, v: v[i] if p[0] != self.SEQ_LENS else seq_lens, self_as_dict, ) ``` ###Assistant : Returns an iterator over data rows, i.e. dicts with column values. Note that if `seq_lens` is set in self, we set it to [1] in the rows. Yields: The column values of the row in this iteration. Examples: >>> batch = SampleBatch({ ... "a": [1, 2, 3], ... "b": [4, 5, 6], ... "seq_lens": [1, 2] ... }) >>> for row in batch.rows(): print(row) {"a": 1, "b": 4, "seq_lens": [1]} {"a": 2, "b": 5, "seq_lens": [1]} {"a": 3, "b": 6, "seq_lens": [1]}
1,593
def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details.
Dedupe weights while maintaining order as much as possible.
9
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dedup_weights(self, weights): output, seen_ids = [], set() for w in weights: if id(w) not in seen_ids: output.append(w) # Track the Variable's identity to avoid __eq__ issues. seen_ids.add(id(w)) return output # SavedModel properties. Please see keras/saving/saved_model for details. ``` ###Assistant : Dedupe weights while maintaining order as much as possible.
1,594
def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None: add_node(node) node = node._parent return cast("list[DOMNode]", nodes)
list[DOMNode]: A list of Nodes by tracing a path all the way back to App. Note: This is inclusive of ``self``.
21
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def ancestors_with_self(self) -> list[DOMNode]: nodes: list[MessagePump | None] = [] add_node = nodes.append node: MessagePump | None = self while node is not None: add_node(node) node = node._parent return cast("list[DOMNode]", nodes) ``` ###Assistant : list[DOMNode]: A list of Nodes by tracing a path all the way back to App. Note: This is inclusive of ``self``.
1,595
def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p90", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p95", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p99", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p100", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "max", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "maxIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "min", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "minIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sum", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sumIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_counter=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "percentile", required_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange("percentile", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "count_unique", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "uniq", snql_set=lambda args, alias: Function( "uniq", [Column("value")], alias, ), ), fields.MetricsFunction( "uniqIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count_web_vitals", required_args=[ fields.MetricArg( "column", allowed_columns=[ "measurements.fp", "measurements.fcp", "measurements.lcp", "measurements.fid", "measurements.cls", ], allow_custom_measurements=False, ), fields.SnQLStringArg( "quality", allowed_strings=["good", "meh", "poor", "any"] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type="integer", ), fields.MetricsFunction( "epm", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), Function("divide", [args["interval"], 60]), ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "eps", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), args["interval"], ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "failure_count", snql_distribution=self._resolve_failure_count, default_result_type="integer", ), fields.MetricsFunction( "failure_rate", snql_distribution=lambda args, alias: Function( "divide", [ self._resolve_failure_count(args), Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), ], alias, ), default_result_type="percentage", ), fields.MetricsFunction( "histogram", required_args=[fields.MetricArg("column")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type="number", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter # Field Aliases
While the final functions in clickhouse must have their -Merge combinators in order to function, we don't need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles) Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the Metric Layer will actually handle which dataset each function goes to
68
747
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def function_converter(self) -> Mapping[str, fields.MetricsFunction]: resolve_metric_id = { "name": "metric_id", "fn": lambda args: self.resolve_metric(args["column"]), } function_converter = { function.name: function for function in [ # Note while the discover version of apdex, count_miserable, user_misery # accepts arguments, because this is precomputed with tags no parameters # are available fields.MetricsFunction( "apdex", optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], snql_distribution=self._resolve_apdex_function, default_result_type="number", ), fields.MetricsFunction( "avg", required_args=[ fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS, ) ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "avgIf", [ Column("value"), Function( "equals", [ Column("metric_id"), args["metric_id"], ], ), ], alias, ), result_type_fn=self.reflective_result_type(), default_result_type="integer", ), fields.MetricsFunction( "count_miserable", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], optional_args=[fields.NullableNumberRange("satisfaction", 0, None)], calculated_args=[resolve_metric_id], snql_set=self._resolve_count_miserable_function, default_result_type="integer", ), fields.MetricsFunction( "count_unparameterized_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), self.builder.resolve_tag_value("<< unparameterized >>"), ], ), ], ), ], alias, ), # Not yet exposed, need to add far more validation around tag&value private=True, default_result_type="integer", ), fields.MetricsFunction( "count_null_transactions", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "equals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), ], ), ], alias, ), private=True, ), fields.MetricsFunction( "count_has_transaction_name", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "and", [ Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), Function( "and", [ Function( "notEquals", [ self.builder.column("transaction"), "" if self.builder.tag_values_are_strings else 0, ], ), Function( "notEquals", [ self.builder.column("transaction"), self.builder.resolve_tag_value( "<< unparameterized >>" ), ], ), ], ), ], ), ], alias, ), private=True, default_result_type="integer", ), fields.MetricsFunction( "user_misery", optional_args=[ fields.NullableNumberRange("satisfaction", 0, None), fields.with_default( constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None) ), fields.with_default( constants.MISERY_BETA, fields.NumberRange("beta", 0, None) ), ], calculated_args=[], snql_set=self._resolve_user_misery_function, default_result_type="number", ), fields.MetricsFunction( "p50", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.5 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p75", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.75 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p90", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.90 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p95", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.95 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p99", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile( args, alias, 0.99 ), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "p100", optional_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1), result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "max", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "maxIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "min", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "minIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sum", required_args=[ fields.MetricArg("column"), ], calculated_args=[resolve_metric_id], snql_distribution=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), result_type_fn=self.reflective_result_type(), ), fields.MetricsFunction( "sumIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_counter=lambda args, alias: Function( "sumIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "percentile", required_args=[ fields.with_default( "transaction.duration", fields.MetricArg( "column", allowed_columns=constants.METRIC_DURATION_COLUMNS ), ), fields.NumberRange("percentile", 0, 1), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_percentile, result_type_fn=self.reflective_result_type(), default_result_type="duration", ), fields.MetricsFunction( "count_unique", required_args=[ fields.MetricArg( "column", allowed_columns=["user"], allow_custom_measurements=False ) ], calculated_args=[resolve_metric_id], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [Column("metric_id"), args["metric_id"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "uniq", snql_set=lambda args, alias: Function( "uniq", [Column("value")], alias, ), ), fields.MetricsFunction( "uniqIf", required_args=[ fields.ColumnTagArg("if_col"), fields.FunctionArg("if_val"), ], calculated_args=[ { "name": "resolved_val", "fn": lambda args: self.builder.resolve_tag_value(args["if_val"]), } ], snql_set=lambda args, alias: Function( "uniqIf", [ Column("value"), Function("equals", [args["if_col"], args["resolved_val"]]), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count", snql_distribution=lambda args, alias: Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], alias, ), default_result_type="integer", ), fields.MetricsFunction( "count_web_vitals", required_args=[ fields.MetricArg( "column", allowed_columns=[ "measurements.fp", "measurements.fcp", "measurements.lcp", "measurements.fid", "measurements.cls", ], allow_custom_measurements=False, ), fields.SnQLStringArg( "quality", allowed_strings=["good", "meh", "poor", "any"] ), ], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_web_vital_function, default_result_type="integer", ), fields.MetricsFunction( "epm", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), Function("divide", [args["interval"], 60]), ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "eps", snql_distribution=lambda args, alias: Function( "divide", [ Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), args["interval"], ], alias, ), optional_args=[fields.IntervalDefault("interval", 1, None)], default_result_type="number", ), fields.MetricsFunction( "failure_count", snql_distribution=self._resolve_failure_count, default_result_type="integer", ), fields.MetricsFunction( "failure_rate", snql_distribution=lambda args, alias: Function( "divide", [ self._resolve_failure_count(args), Function( "countIf", [ Column("value"), Function( "equals", [ Column("metric_id"), self.resolve_metric("transaction.duration"), ], ), ], ), ], alias, ), default_result_type="percentage", ), fields.MetricsFunction( "histogram", required_args=[fields.MetricArg("column")], calculated_args=[resolve_metric_id], snql_distribution=self._resolve_histogram_function, default_result_type="number", private=True, ), ] } for alias, name in constants.FUNCTION_ALIASES.items(): if name in function_converter: function_converter[alias] = function_converter[name].alias_as(alias) return function_converter # Field Aliases ``` ###Assistant : While the final functions in clickhouse must have their -Merge combinators in order to function, we don't need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles) Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the Metric Layer will actually handle which dataset each function goes to
1,596
def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_filename1 == dll_filename2: continue if dll_filename2 in removed_dlls: continue # Colliding basenames are an issue to us. if os.path.basename(dll_filename1) != os.path.basename(dll_filename2): continue # May already have been removed earlier if dll_filename1 not in used_dlls: continue if dll_filename2 not in used_dlls: continue dll_name = os.path.basename(dll_filename1) if Options.isShowInclusion(): inclusion_logger.info( % (dll_name, dll_filename1, dll_filename2) ) # Check that if a DLL has the same name, if it's identical, then it's easy. if haveSameFileContents(dll_filename1, dll_filename2): del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) continue # For Win32 we can check out file versions. if Utils.isWin32Windows(): dll_version1 = getWindowsDLLVersion(dll_filename1) dll_version2 = getWindowsDLLVersion(dll_filename2) if dll_version2 < dll_version1: del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) solved = True elif dll_version1 < dll_version2: del used_dlls[dll_filename1] removed_dlls.add(dll_filename1) solved = True else: solved = False if solved: if dll_name not in warned_about and dll_name not in ms_runtime_dlls: warned_about.add(dll_name) inclusion_logger.warning( "Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best." % dll_name ) continue # So we have conflicting DLLs, in which case we do report the fact. inclusion_logger.warning( % ( dll_name, dll_filename1, "\n ".join(sources1), dll_filename2, "\n ".join(sources2), ) ) del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) dll_map = [] for dll_filename, sources in iterItems(used_dlls): dll_name = os.path.basename(dll_filename) target_path = os.path.join(dist_dir, dll_name) # Sometimes DLL dependencies were copied there already. if not os.path.exists(target_path): copyFile(dll_filename, target_path) dll_map.append((dll_filename, dll_name)) if Options.isShowInclusion(): inclusion_logger.info( "Included used shared library '%s' (used by %s)." % (dll_filename, ", ".join(sources)) ) if Utils.isMacOS(): # For macOS, the binary and the DLLs needs to be changed to reflect # the relative DLL location in the ".dist" folder. for standalone_entry_point in standalone_entry_points: fixupBinaryDLLPathsMacOS( binary_filename=standalone_entry_point.dest_path, dll_map=dll_map, original_location=standalone_entry_point.source_path, ) for original_path, dll_filename in dll_map: fixupBinaryDLLPathsMacOS( binary_filename=os.path.join(dist_dir, dll_filename), dll_map=dll_map, original_location=original_path, ) # Remove code signature from CPython installed library candidate = os.path.join( dist_dir, "Python", ) if os.path.exists(candidate): removeMacOSCodeSignature(candidate) # Remove or update rpath settings. if Utils.getOS() in ("Linux", "Darwin"): # For Linux, the "rpath" of libraries may be an issue and must be # removed. if Utils.isMacOS(): start = 0 else: start = 1 for standalone_entry_point in standalone_entry_points[start:]: count = relpath( path=standalone_entry_point.dest_path, start=dist_dir ).count(os.path.sep) rpath = os.path.join("$ORIGIN", *([".."] * count)) setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath) for _original_path, dll_filename in dll_map: setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), "$ORIGIN") if Utils.isWin32Windows(): if python_version < 0x300: # For Win32, we might have to remove SXS paths for standalone_entry_point in standalone_entry_points[1:]: removeSxsFromDLL(standalone_entry_point.dest_path) for _original_path, dll_filename in dll_map: removeSxsFromDLL(os.path.join(dist_dir, dll_filename))
Colliding DLL names for %s, checking identity of \ '%s' <-> '%s'.\ Ignoring non-identical DLLs for '%s'. %s used by: %s different from %s used by %s
27
477
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copyUsedDLLs(source_dir, dist_dir, standalone_entry_points): # This is terribly complex, because we check the list of used DLLs # trying to avoid duplicates, and detecting errors with them not # being binary identical, so we can report them. And then of course # we also need to handle OS specifics. # pylint: disable=too-many-branches,too-many-locals,too-many-statements used_dlls = detectUsedDLLs( source_dir=source_dir, standalone_entry_points=standalone_entry_points, use_cache=not Options.shallNotUseDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", update_cache=not Options.shallNotStoreDependsExeCachedResults() and not Options.getWindowsDependencyTool() == "depends.exe", ) removed_dlls = set() warned_about = set() # Fist make checks and remove some. for dll_filename1, sources1 in tuple(iterItems(used_dlls)): if dll_filename1 in removed_dlls: continue for dll_filename2, sources2 in tuple(iterItems(used_dlls)): if dll_filename1 == dll_filename2: continue if dll_filename2 in removed_dlls: continue # Colliding basenames are an issue to us. if os.path.basename(dll_filename1) != os.path.basename(dll_filename2): continue # May already have been removed earlier if dll_filename1 not in used_dlls: continue if dll_filename2 not in used_dlls: continue dll_name = os.path.basename(dll_filename1) if Options.isShowInclusion(): inclusion_logger.info( % (dll_name, dll_filename1, dll_filename2) ) # Check that if a DLL has the same name, if it's identical, then it's easy. if haveSameFileContents(dll_filename1, dll_filename2): del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) continue # For Win32 we can check out file versions. if Utils.isWin32Windows(): dll_version1 = getWindowsDLLVersion(dll_filename1) dll_version2 = getWindowsDLLVersion(dll_filename2) if dll_version2 < dll_version1: del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) solved = True elif dll_version1 < dll_version2: del used_dlls[dll_filename1] removed_dlls.add(dll_filename1) solved = True else: solved = False if solved: if dll_name not in warned_about and dll_name not in ms_runtime_dlls: warned_about.add(dll_name) inclusion_logger.warning( "Conflicting DLLs for '%s' in your installation, newest file version used, hoping for the best." % dll_name ) continue # So we have conflicting DLLs, in which case we do report the fact. inclusion_logger.warning( % ( dll_name, dll_filename1, "\n ".join(sources1), dll_filename2, "\n ".join(sources2), ) ) del used_dlls[dll_filename2] removed_dlls.add(dll_filename2) dll_map = [] for dll_filename, sources in iterItems(used_dlls): dll_name = os.path.basename(dll_filename) target_path = os.path.join(dist_dir, dll_name) # Sometimes DLL dependencies were copied there already. if not os.path.exists(target_path): copyFile(dll_filename, target_path) dll_map.append((dll_filename, dll_name)) if Options.isShowInclusion(): inclusion_logger.info( "Included used shared library '%s' (used by %s)." % (dll_filename, ", ".join(sources)) ) if Utils.isMacOS(): # For macOS, the binary and the DLLs needs to be changed to reflect # the relative DLL location in the ".dist" folder. for standalone_entry_point in standalone_entry_points: fixupBinaryDLLPathsMacOS( binary_filename=standalone_entry_point.dest_path, dll_map=dll_map, original_location=standalone_entry_point.source_path, ) for original_path, dll_filename in dll_map: fixupBinaryDLLPathsMacOS( binary_filename=os.path.join(dist_dir, dll_filename), dll_map=dll_map, original_location=original_path, ) # Remove code signature from CPython installed library candidate = os.path.join( dist_dir, "Python", ) if os.path.exists(candidate): removeMacOSCodeSignature(candidate) # Remove or update rpath settings. if Utils.getOS() in ("Linux", "Darwin"): # For Linux, the "rpath" of libraries may be an issue and must be # removed. if Utils.isMacOS(): start = 0 else: start = 1 for standalone_entry_point in standalone_entry_points[start:]: count = relpath( path=standalone_entry_point.dest_path, start=dist_dir ).count(os.path.sep) rpath = os.path.join("$ORIGIN", *([".."] * count)) setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath) for _original_path, dll_filename in dll_map: setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), "$ORIGIN") if Utils.isWin32Windows(): if python_version < 0x300: # For Win32, we might have to remove SXS paths for standalone_entry_point in standalone_entry_points[1:]: removeSxsFromDLL(standalone_entry_point.dest_path) for _original_path, dll_filename in dll_map: removeSxsFromDLL(os.path.join(dist_dir, dll_filename)) ``` ###Assistant : Colliding DLL names for %s, checking identity of \ '%s' <-> '%s'.\ Ignoring non-identical DLLs for '%s'. %s used by: %s different from %s used by %s
1,597
def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k-1)*y) r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints) return ma1[a]*(r - Add(*d)) return None
This internal helper function tries to transform an expression containing a derivative of an undefined function and returns `None` if it cannot do it.
24
81
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _laplace_rule_diff(f, t, s, doit=True, **hints): hints.pop('simplify', True) a = Wild('a', exclude=[t]) y = Wild('y') n = Wild('n', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(a*Derivative(g, (t, n))) if ma1 and ma1[g].args[0] == t and ma1[n].is_integer: debug('_laplace_apply_rules match:') debug(' f: %s'%(f,)) debug(' rule: time derivative (1.11, 1.12)') d = [] for k in range(ma1[n]): if k==0: y = ma1[g].func(t).subs(t, 0) else: y = Derivative(ma1[g].func(t), (t, k)).subs(t, 0) d.append(s**(ma1[n]-k-1)*y) r = s**ma1[n]*_laplace_apply_rules(ma1[g].func(t), t, s, doit=doit, **hints) return ma1[a]*(r - Add(*d)) return None ``` ###Assistant : This internal helper function tries to transform an expression containing a derivative of an undefined function and returns `None` if it cannot do it.
1,598
def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = "is_%s_role" % (flag) is_attr_key = "is_%s_attr" % (flag) is_value_key = "is_%s_value" % (flag) remove_setting = "remove_%ss" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug("User %s has %s role(s) %s" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug("User %s is missing the %s role(s) %s" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value: # If so, check and see if the value of the attr matches the required value saml_user_attribute_value = attributes.get(attr_setting, None) matching_values = _get_matches(required_value, saml_user_attribute_value) if matching_values: logger.debug("Giving %s %s from attribute %s with matching values %s" % (user.username, flag, attr_setting, ', '.join(matching_values))) new_flag = True # if they don't match make sure that new_flag is false else: logger.debug( "Refusing %s for %s because attr %s (%s) did not match value(s) %s" % (flag, user.username, attr_setting, ", ".join(saml_user_attribute_value), ', '.join(required_value)) ) new_flag = False # If there was no required value then we can just allow them in because of the attribute else: logger.debug("Giving %s %s from attribute %s" % (user.username, flag, attr_setting)) new_flag = True # Get the users old flag old_value = getattr(user, "is_%s" % (flag)) # If we are not removing the flag and they were a system admin and now we don't want them to be just return remove_flag = user_flags_settings.get(remove_setting, True) if not remove_flag and (old_value and not new_flag): logger.debug("Remove flag %s preventing removal of %s for %s" % (remove_flag, flag, user.username)) return old_value, False # If the user was flagged and we are going to make them not flagged make sure there is a message if old_value and not new_flag: logger.debug("Revoking %s from %s" % (flag, user.username)) return new_flag, old_value != new_flag
Helper function to set the is_superuser is_system_auditor flags for the SAML adapter Returns the new flag and whether or not it changed the flag
24
374
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_flag(user, flag, attributes, user_flags_settings): new_flag = False is_role_key = "is_%s_role" % (flag) is_attr_key = "is_%s_attr" % (flag) is_value_key = "is_%s_value" % (flag) remove_setting = "remove_%ss" % (flag) # Check to see if we are respecting a role and, if so, does our user have that role? required_roles = user_flags_settings.get(is_role_key, None) if required_roles: matching_roles = _get_matches(required_roles, attributes.get('Role', [])) # We do a 2 layer check here so that we don't spit out the else message if there is no role defined if matching_roles: logger.debug("User %s has %s role(s) %s" % (user.username, flag, ', '.join(matching_roles))) new_flag = True else: logger.debug("User %s is missing the %s role(s) %s" % (user.username, flag, ', '.join(required_roles))) # Next, check to see if we are respecting an attribute; this will take priority over the role if its defined attr_setting = user_flags_settings.get(is_attr_key, None) if attr_setting and attributes.get(attr_setting, None): # Do we have a required value for the attribute required_value = user_flags_settings.get(is_value_key, None) if required_value: # If so, check and see if the value of the attr matches the required value saml_user_attribute_value = attributes.get(attr_setting, None) matching_values = _get_matches(required_value, saml_user_attribute_value) if matching_values: logger.debug("Giving %s %s from attribute %s with matching values %s" % (user.username, flag, attr_setting, ', '.join(matching_values))) new_flag = True # if they don't match make sure that new_flag is false else: logger.debug( "Refusing %s for %s because attr %s (%s) did not match value(s) %s" % (flag, user.username, attr_setting, ", ".join(saml_user_attribute_value), ', '.join(required_value)) ) new_flag = False # If there was no required value then we can just allow them in because of the attribute else: logger.debug("Giving %s %s from attribute %s" % (user.username, flag, attr_setting)) new_flag = True # Get the users old flag old_value = getattr(user, "is_%s" % (flag)) # If we are not removing the flag and they were a system admin and now we don't want them to be just return remove_flag = user_flags_settings.get(remove_setting, True) if not remove_flag and (old_value and not new_flag): logger.debug("Remove flag %s preventing removal of %s for %s" % (remove_flag, flag, user.username)) return old_value, False # If the user was flagged and we are going to make them not flagged make sure there is a message if old_value and not new_flag: logger.debug("Revoking %s from %s" % (flag, user.username)) return new_flag, old_value != new_flag ``` ###Assistant : Helper function to set the is_superuser is_system_auditor flags for the SAML adapter Returns the new flag and whether or not it changed the flag
1,599
def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] )
Test to make sure we get some backfill points
9
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_backfill_points_in_room(self): setup_info = self._setup_room_for_backfill_tests() room_id = setup_info.room_id backfill_points = self.get_success( self.store.get_backfill_points_in_room(room_id) ) backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points] self.assertListEqual( backfill_event_ids, ["b6", "b5", "b4", "2", "b3", "b2", "b1"] ) ``` ###Assistant : Test to make sure we get some backfill points