Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
2,800 | async def test_missing_tones_list(hass):
siren = MockSirenEntity(SirenEntityFeature.TONES, ["a", "b"])
siren.hass = hass
with pytest.raises(ValueError):
process_turn_on_params(siren, {"tone": "test"})
| Test ValueError when setting a tone that is missing from available_tones list. | 12 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_missing_tones_list(hass):
siren = MockSirenEntity(SirenEntityFeature.TONES, ["a", "b"])
siren.hass = hass
with pytest.raises(ValueError):
process_turn_on_params(siren, {"tone": "test"})
```
###Assistant : Test ValueError when setting a tone that is missing from available_tones list.
|
2,801 | def task_failed_deps(args):
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning |
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
| 73 | 52 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def task_failed_deps(args):
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, args.map_index)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_cli(check_db=False)
@suppress_logs_and_warning
```
###Assistant :
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
|
2,802 | def load(self, rawdata):
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
| Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
| 24 | 24 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def load(self, rawdata):
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
```
###Assistant : Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
|
2,803 | def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):
backup = {name: value for name, value in self._major_tick_kw.items()
if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}
self._major_tick_kw.clear()
if keep_tick_and_label_visibility:
self._major_tick_kw.update(backup)
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
|
Reset major tick params to defaults.
Shared subplots pre-configure tick and label visibility. To keep this
beyond an Axis.clear() operation, we may
*keep_tick_and_label_visibility*.
| 23 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):
backup = {name: value for name, value in self._major_tick_kw.items()
if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}
self._major_tick_kw.clear()
if keep_tick_and_label_visibility:
self._major_tick_kw.update(backup)
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
```
###Assistant :
Reset major tick params to defaults.
Shared subplots pre-configure tick and label visibility. To keep this
beyond an Axis.clear() operation, we may
*keep_tick_and_label_visibility*.
|
2,804 | def test_class_variable_to_instance(self):
logger = self.logger
self.assertEqual(logger._to_exclude, logger._exclude_results)
self.assertEqual(logger._to_system, logger._system_results)
self.assertEqual(logger._to_other, logger._other_results)
self.assertEqual(logger._to_episodes, logger._episode_results)
| Test that class variables get properly assigned to instance
variables.
| 10 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_class_variable_to_instance(self):
logger = self.logger
self.assertEqual(logger._to_exclude, logger._exclude_results)
self.assertEqual(logger._to_system, logger._system_results)
self.assertEqual(logger._to_other, logger._other_results)
self.assertEqual(logger._to_episodes, logger._episode_results)
```
###Assistant : Test that class variables get properly assigned to instance
variables.
|
2,805 | def detect_all(byte_str, ignore_threshold=False):
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError(
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
)
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
detector.close()
if detector.input_state == InputState.HIGH_BYTE:
results = []
probers = []
for prober in detector.charset_probers:
if hasattr(prober, "probers"):
probers.extend(p for p in prober.probers)
else:
probers.append(prober)
for prober in probers:
if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
charset_name = prober.charset_name or ""
lower_charset_name = charset_name.lower()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
charset_name = detector.ISO_WIN_MAP.get(
lower_charset_name, charset_name
)
results.append(
{
"encoding": charset_name,
"confidence": prober.get_confidence(),
"language": prober.language,
}
)
if len(results) > 0:
return sorted(results, key=lambda result: -result["confidence"])
return [detector.result]
|
Detect all the possible encodings of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
:param ignore_threshold: Include encodings that are below
``UniversalDetector.MINIMUM_THRESHOLD``
in results.
:type ignore_threshold: ``bool``
| 35 | 120 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def detect_all(byte_str, ignore_threshold=False):
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError(
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
)
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
detector.close()
if detector.input_state == InputState.HIGH_BYTE:
results = []
probers = []
for prober in detector.charset_probers:
if hasattr(prober, "probers"):
probers.extend(p for p in prober.probers)
else:
probers.append(prober)
for prober in probers:
if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
charset_name = prober.charset_name or ""
lower_charset_name = charset_name.lower()
# Use Windows encoding name instead of ISO-8859 if we saw any
# extra Windows-specific bytes
if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
charset_name = detector.ISO_WIN_MAP.get(
lower_charset_name, charset_name
)
results.append(
{
"encoding": charset_name,
"confidence": prober.get_confidence(),
"language": prober.language,
}
)
if len(results) > 0:
return sorted(results, key=lambda result: -result["confidence"])
return [detector.result]
```
###Assistant :
Detect all the possible encodings of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
:param ignore_threshold: Include encodings that are below
``UniversalDetector.MINIMUM_THRESHOLD``
in results.
:type ignore_threshold: ``bool``
|
2,806 | def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(
value,
names,
module=module,
qualname=qualname,
type=type,
start=start,
)
|
Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='RED GREEN BLUE')).
When used for the functional API:
`value` will be the name of the new class.
`names` should be either a string of white-space/comma delimited names
(values will start at `start`), or an iterator/mapping of name, value pairs.
`module` should be set to the module this class is being created in;
if it is not set, an attempt to find that module will be made, but if
it fails the class will not be picklable.
`qualname` should be set to the actual location this class can be found
at in its module; by default it is set to the global scope. If this is
not correct, unpickling will fail in some circumstances.
`type`, if set, will be mixed in as the first base class.
| 167 | 39 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(
value,
names,
module=module,
qualname=qualname,
type=type,
start=start,
)
```
###Assistant :
Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='RED GREEN BLUE')).
When used for the functional API:
`value` will be the name of the new class.
`names` should be either a string of white-space/comma delimited names
(values will start at `start`), or an iterator/mapping of name, value pairs.
`module` should be set to the module this class is being created in;
if it is not set, an attempt to find that module will be made, but if
it fails the class will not be picklable.
`qualname` should be set to the actual location this class can be found
at in its module; by default it is set to the global scope. If this is
not correct, unpickling will fail in some circumstances.
`type`, if set, will be mixed in as the first base class.
|
2,807 | def edges2dot(edges, shapes=None, attr=None):
if not shapes:
shapes = dict()
if not attr:
attr = dict()
dot_string = "digraph G {\n"
for pair in attr.items():
dot_string += f"{pair[0]} = {pair[1]};\n"
for edge in edges:
for shape in shapes.items():
for node in range(2):
if shape[0] in repr(edge[node]):
dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n'
dot_string += f'"{edge[0]}" -> "{edge[1]}";\n'
dot_string += "}\n"
return dot_string
|
:param edges: the set (or list) of edges of a directed graph.
:return dot_string: a representation of 'edges' as a string in the DOT
graph language, which can be converted to an image by the 'dot' program
from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string).
:param shapes: dictionary of strings that trigger a specified shape.
:param attr: dictionary with global graph attributes
>>> import nltk
>>> from nltk.util import edges2dot
>>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]))
digraph G {
"A" -> "B";
"A" -> "C";
"B" -> "C";
"C" -> "B";
}
<BLANKLINE>
| 94 | 62 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def edges2dot(edges, shapes=None, attr=None):
if not shapes:
shapes = dict()
if not attr:
attr = dict()
dot_string = "digraph G {\n"
for pair in attr.items():
dot_string += f"{pair[0]} = {pair[1]};\n"
for edge in edges:
for shape in shapes.items():
for node in range(2):
if shape[0] in repr(edge[node]):
dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n'
dot_string += f'"{edge[0]}" -> "{edge[1]}";\n'
dot_string += "}\n"
return dot_string
```
###Assistant :
:param edges: the set (or list) of edges of a directed graph.
:return dot_string: a representation of 'edges' as a string in the DOT
graph language, which can be converted to an image by the 'dot' program
from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string).
:param shapes: dictionary of strings that trigger a specified shape.
:param attr: dictionary with global graph attributes
>>> import nltk
>>> from nltk.util import edges2dot
>>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')]))
digraph G {
"A" -> "B";
"A" -> "C";
"B" -> "C";
"C" -> "B";
}
<BLANKLINE>
|
2,808 | def _draw_line_numbers(self):
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
|
Create drawables for the line numbers.
| 6 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _draw_line_numbers(self):
if not self.line_numbers:
return
for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
```
###Assistant :
Create drawables for the line numbers.
|
2,809 | def export(ctx, directory):
directory_path = Path(directory)
if directory_path.absolute == Path.cwd().absolute:
console.print(
,
style="red bold",
end="",
)
proceed = input().lower() == "y"
else:
proceed = True
if proceed:
if not directory_path.is_dir():
console.print(f"Creating folder: {directory}.", style="red bold")
directory_path.mkdir(parents=True)
ctx.invoke(write)
from_path = Path.cwd() / "manim.cfg"
to_path = directory_path / "manim.cfg"
console.print(f"Exported final Config at {from_path} to {to_path}.")
else:
console.print("Aborted...", style="red bold")
| You are reading the config from the same directory you are exporting to.
This means that the exported config will overwrite the config for this directory.
Are you sure you want to continue? (y/n) | 34 | 58 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def export(ctx, directory):
directory_path = Path(directory)
if directory_path.absolute == Path.cwd().absolute:
console.print(
,
style="red bold",
end="",
)
proceed = input().lower() == "y"
else:
proceed = True
if proceed:
if not directory_path.is_dir():
console.print(f"Creating folder: {directory}.", style="red bold")
directory_path.mkdir(parents=True)
ctx.invoke(write)
from_path = Path.cwd() / "manim.cfg"
to_path = directory_path / "manim.cfg"
console.print(f"Exported final Config at {from_path} to {to_path}.")
else:
console.print("Aborted...", style="red bold")
```
###Assistant : You are reading the config from the same directory you are exporting to.
This means that the exported config will overwrite the config for this directory.
Are you sure you want to continue? (y/n)
|
2,810 | def update_keymap(self, name, key):
if name not in self._tools:
raise KeyError(f'{name!r} not in Tools')
self._remove_keys(name)
if isinstance(key, str):
key = [key]
for k in key:
if k in self._keys:
_api.warn_external(
f'Key {k} changed from {self._keys[k]} to {name}')
self._keys[k] = name
|
Set the keymap to associate with the specified tool.
Parameters
----------
name : str
Name of the Tool.
key : str or list of str
Keys to associate with the tool.
| 31 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def update_keymap(self, name, key):
if name not in self._tools:
raise KeyError(f'{name!r} not in Tools')
self._remove_keys(name)
if isinstance(key, str):
key = [key]
for k in key:
if k in self._keys:
_api.warn_external(
f'Key {k} changed from {self._keys[k]} to {name}')
self._keys[k] = name
```
###Assistant :
Set the keymap to associate with the specified tool.
Parameters
----------
name : str
Name of the Tool.
key : str or list of str
Keys to associate with the tool.
|
2,811 | def named_parameters(self, *args, **kwargs):
arch = kwargs.pop('arch', False)
for name, p in super().named_parameters(*args, **kwargs):
if any(name == par_name for par_name in self._arch_parameter_names):
if arch:
yield name, p
else:
if not arch:
yield name, p
| Named parameters excluding architecture parameters. | 5 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def named_parameters(self, *args, **kwargs):
arch = kwargs.pop('arch', False)
for name, p in super().named_parameters(*args, **kwargs):
if any(name == par_name for par_name in self._arch_parameter_names):
if arch:
yield name, p
else:
if not arch:
yield name, p
```
###Assistant : Named parameters excluding architecture parameters.
|
2,812 | def unzip3(xyzs):
# Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,
# is too permissive about inputs, and does not guarantee a length-3 output.
xs = []
ys = []
zs = []
for x, y, z in xyzs:
xs.append(x)
ys.append(y)
zs.append(z)
return tuple(xs), tuple(ys), tuple(zs)
| Unzip sequence of length-3 tuples into three tuples. | 8 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def unzip3(xyzs):
# Note: we deliberately don't use zip(*xyzs) because it is lazily evaluated,
# is too permissive about inputs, and does not guarantee a length-3 output.
xs = []
ys = []
zs = []
for x, y, z in xyzs:
xs.append(x)
ys.append(y)
zs.append(z)
return tuple(xs), tuple(ys), tuple(zs)
```
###Assistant : Unzip sequence of length-3 tuples into three tuples.
|
2,813 | def test_json_get_subscribers_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed
# A guest user can only see never subscribed streams that are web-public.
# For Polonius, the only web-public stream that he is not subscribed at
# this point is Rome.
self.assert_length(never_subscribed, 1)
web_public_stream_id = never_subscribed[0]["stream_id"]
result = self.client_get(f"/json/streams/{web_public_stream_id}/members")
self.assert_json_success(result)
result_dict = result.json()
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
self.assertGreater(len(result_dict["subscribers"]), 0)
|
Guest users should have access to subscribers of web-public streams, even
if they aren't subscribed or have never subscribed to that stream.
| 22 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_json_get_subscribers_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
never_subscribed = gather_subscriptions_helper(guest_user, True).never_subscribed
# A guest user can only see never subscribed streams that are web-public.
# For Polonius, the only web-public stream that he is not subscribed at
# this point is Rome.
self.assert_length(never_subscribed, 1)
web_public_stream_id = never_subscribed[0]["stream_id"]
result = self.client_get(f"/json/streams/{web_public_stream_id}/members")
self.assert_json_success(result)
result_dict = result.json()
self.assertIn("subscribers", result_dict)
self.assertIsInstance(result_dict["subscribers"], list)
self.assertGreater(len(result_dict["subscribers"]), 0)
```
###Assistant :
Guest users should have access to subscribers of web-public streams, even
if they aren't subscribed or have never subscribed to that stream.
|
2,814 | def _reconnect(self):
if not self._isconnected():
try:
self._connect()
except http_client.socket.error as e:
# Attempt to reconnect if the connection was refused
if e.errno == 61 or e.errno == 10061:
# errno 61 is the "Connection Refused" error
time.sleep(self._delay)
self._delay += self._delay # fibonacii delays
self._tries += 1
if self._tries < self.maxtries:
self._reconnect()
else:
self._reset_retries()
raise e
else:
# Unknown scenario
raise e
# Reconnect worked - reset _closed
self._closed = False
| Connect if disconnected.
Retry self.maxtries times with delays
| 8 | 70 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _reconnect(self):
if not self._isconnected():
try:
self._connect()
except http_client.socket.error as e:
# Attempt to reconnect if the connection was refused
if e.errno == 61 or e.errno == 10061:
# errno 61 is the "Connection Refused" error
time.sleep(self._delay)
self._delay += self._delay # fibonacii delays
self._tries += 1
if self._tries < self.maxtries:
self._reconnect()
else:
self._reset_retries()
raise e
else:
# Unknown scenario
raise e
# Reconnect worked - reset _closed
self._closed = False
```
###Assistant : Connect if disconnected.
Retry self.maxtries times with delays
|
2,815 | def list_master(saltenv=None, prefix=""):
if not saltenv:
saltenv = __opts__["saltenv"] or "base"
return _client().file_list(saltenv, prefix)
|
.. versionchanged:: 3005
``saltenv`` will use value from config if not explicitly set
List all of the files stored on the master
CLI Example:
.. code-block:: bash
salt '*' cp.list_master
| 30 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def list_master(saltenv=None, prefix=""):
if not saltenv:
saltenv = __opts__["saltenv"] or "base"
return _client().file_list(saltenv, prefix)
```
###Assistant :
.. versionchanged:: 3005
``saltenv`` will use value from config if not explicitly set
List all of the files stored on the master
CLI Example:
.. code-block:: bash
salt '*' cp.list_master
|
2,816 | def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):
from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer
inputs = "Hello, ``I'm'' ónë of 1,205,000 sentences!"
hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)
torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)
# Ensure that the tokenizer is scriptable
tokenizer_path = os.path.join(tmpdir, "tokenizer.pt")
torch.jit.script(torchtext_tokenizer).save(tokenizer_path)
torchtext_tokenizer = torch.jit.load(tokenizer_path)
token_ids_expected = hf_tokenizer(inputs)
token_ids = torchtext_tokenizer(inputs)
assert token_ids_expected == token_ids
| Tests the BERTTokenizer implementation.
Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a
HF tokenizer.
| 26 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_bert_hf_tokenizer_parity(tmpdir, pretrained_model_name_or_path):
from ludwig.utils.tokenizers import get_hf_tokenizer, HFTokenizer
inputs = "Hello, ``I'm'' ónë of 1,205,000 sentences!"
hf_tokenizer = HFTokenizer(pretrained_model_name_or_path)
torchtext_tokenizer = get_hf_tokenizer(pretrained_model_name_or_path)
# Ensure that the tokenizer is scriptable
tokenizer_path = os.path.join(tmpdir, "tokenizer.pt")
torch.jit.script(torchtext_tokenizer).save(tokenizer_path)
torchtext_tokenizer = torch.jit.load(tokenizer_path)
token_ids_expected = hf_tokenizer(inputs)
token_ids = torchtext_tokenizer(inputs)
assert token_ids_expected == token_ids
```
###Assistant : Tests the BERTTokenizer implementation.
Asserts both tokens and token IDs are the same by initializing the BERTTokenizer as a standalone tokenizer and as a
HF tokenizer.
|
2,817 | def get_pe_matching_query(amount_condition, account_from_to, transaction):
# get matching payment entries query
if transaction.deposit > 0:
currency_field = "paid_to_account_currency as currency"
else:
currency_field = "paid_from_account_currency as currency"
return f
|
SELECT
(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END
+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END
+ 1 ) AS rank,
'Payment Entry' as doctype,
name,
paid_amount,
reference_no,
reference_date,
party,
party_type,
posting_date,
{currency_field}
FROM
`tabPayment Entry`
WHERE
paid_amount {amount_condition} %(amount)s
AND docstatus = 1
AND payment_type IN (%(payment_type)s, 'Internal Transfer')
AND ifnull(clearance_date, '') = ""
AND {account_from_to} = %(bank_account)s
| 68 | 27 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_pe_matching_query(amount_condition, account_from_to, transaction):
# get matching payment entries query
if transaction.deposit > 0:
currency_field = "paid_to_account_currency as currency"
else:
currency_field = "paid_from_account_currency as currency"
return f
```
###Assistant :
SELECT
(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END
+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END
+ 1 ) AS rank,
'Payment Entry' as doctype,
name,
paid_amount,
reference_no,
reference_date,
party,
party_type,
posting_date,
{currency_field}
FROM
`tabPayment Entry`
WHERE
paid_amount {amount_condition} %(amount)s
AND docstatus = 1
AND payment_type IN (%(payment_type)s, 'Internal Transfer')
AND ifnull(clearance_date, '') = ""
AND {account_from_to} = %(bank_account)s
|
2,818 | def _check_list_display_links(self, obj):
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be(
"a list, a tuple, or None",
option="list_display_links",
obj=obj,
id="admin.E110",
)
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(
chain.from_iterable(
self._check_list_display_links_item(
obj, field_name, "list_display_links[%d]" % index
)
for index, field_name in enumerate(obj.list_display_links)
)
)
return []
| Check that list_display_links is a unique subset of list_display. | 9 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _check_list_display_links(self, obj):
from django.contrib.admin.options import ModelAdmin
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be(
"a list, a tuple, or None",
option="list_display_links",
obj=obj,
id="admin.E110",
)
# Check only if ModelAdmin.get_list_display() isn't overridden.
elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:
return list(
chain.from_iterable(
self._check_list_display_links_item(
obj, field_name, "list_display_links[%d]" % index
)
for index, field_name in enumerate(obj.list_display_links)
)
)
return []
```
###Assistant : Check that list_display_links is a unique subset of list_display.
|
2,819 | def min_mireds(self) -> int:
min_mireds = self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
if min_mireds == 0:
self.warning(
"[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected",
self.MIN_MIREDS,
)
min_mireds = self.MIN_MIREDS
return min_mireds
| Return the coldest color_temp that this channel supports. | 8 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def min_mireds(self) -> int:
min_mireds = self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
if min_mireds == 0:
self.warning(
"[Min mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected",
self.MIN_MIREDS,
)
min_mireds = self.MIN_MIREDS
return min_mireds
```
###Assistant : Return the coldest color_temp that this channel supports.
|
2,820 | def pop_screen(self) -> Screen:
screen_stack = self._screen_stack
if len(screen_stack) <= 1:
raise ScreenStackError(
"Can't pop screen; there must be at least one screen on the stack"
)
screen = screen_stack.pop()
screen.post_message_no_wait(events.ScreenSuspend(self))
self.screen._screen_resized(self.size)
self.screen.post_message_no_wait(events.ScreenResume(self))
return screen
| Pop the current screen from the stack, and switch to the previous screen.
Returns:
Screen: The screen that was replaced.
| 20 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def pop_screen(self) -> Screen:
screen_stack = self._screen_stack
if len(screen_stack) <= 1:
raise ScreenStackError(
"Can't pop screen; there must be at least one screen on the stack"
)
screen = screen_stack.pop()
screen.post_message_no_wait(events.ScreenSuspend(self))
self.screen._screen_resized(self.size)
self.screen.post_message_no_wait(events.ScreenResume(self))
return screen
```
###Assistant : Pop the current screen from the stack, and switch to the previous screen.
Returns:
Screen: The screen that was replaced.
|
2,821 | def test_notify_interested_services_ephemeral(self):
interested_service = self._mkservice(is_interested=True)
services = [interested_service]
self.mock_store.get_app_services.return_value = services
self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(
579
)
event = Mock(event_id="event_1")
self.event_source.sources.receipt.get_new_events_as.return_value = (
make_awaitable(([event], None))
)
self.handler.notify_interested_services_ephemeral(
"receipt_key", 580, ["@fakerecipient:example.com"]
)
self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
interested_service, ephemeral=[event]
)
self.mock_store.set_appservice_stream_type_pos.assert_called_once_with(
interested_service,
"read_receipt",
580,
)
|
Test sending ephemeral events to the appservice handler are scheduled
to be pushed out to interested appservices, and that the stream ID is
updated accordingly.
| 25 | 39 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_notify_interested_services_ephemeral(self):
interested_service = self._mkservice(is_interested=True)
services = [interested_service]
self.mock_store.get_app_services.return_value = services
self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable(
579
)
event = Mock(event_id="event_1")
self.event_source.sources.receipt.get_new_events_as.return_value = (
make_awaitable(([event], None))
)
self.handler.notify_interested_services_ephemeral(
"receipt_key", 580, ["@fakerecipient:example.com"]
)
self.mock_scheduler.enqueue_for_appservice.assert_called_once_with(
interested_service, ephemeral=[event]
)
self.mock_store.set_appservice_stream_type_pos.assert_called_once_with(
interested_service,
"read_receipt",
580,
)
```
###Assistant :
Test sending ephemeral events to the appservice handler are scheduled
to be pushed out to interested appservices, and that the stream ID is
updated accordingly.
|
2,822 | def convert(self, value):
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
|
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
| 33 | 74 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def convert(self, value):
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
```
###Assistant :
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
|
2,823 | def test_parse_transition(duration, parsed_duration):
css = f
stylesheet = Stylesheet()
stylesheet.parse(css)
rule = stylesheet.rules[0].styles
assert len(stylesheet.rules) == 1
assert len(stylesheet.rules[0].errors) == 0
assert rule.transitions == {
"offset": Transition(duration=parsed_duration, easing="in_out_cubic", delay=0.0)
}
| #some-widget {{
transition: offset {duration} in_out_cubic;
}}
| 7 | 30 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_parse_transition(duration, parsed_duration):
css = f
stylesheet = Stylesheet()
stylesheet.parse(css)
rule = stylesheet.rules[0].styles
assert len(stylesheet.rules) == 1
assert len(stylesheet.rules[0].errors) == 0
assert rule.transitions == {
"offset": Transition(duration=parsed_duration, easing="in_out_cubic", delay=0.0)
}
```
###Assistant : #some-widget {{
transition: offset {duration} in_out_cubic;
}}
|
2,824 | def test_meta_charset(self) -> None:
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
# A less well-formed version.
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
| A character encoding is found via the meta tag.
<html>
<head><meta charset="ascii">
</head>
</html>
<html>
<head>< meta charset = ascii>
</head>
</html>
| 22 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_meta_charset(self) -> None:
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
# A less well-formed version.
encodings = _get_html_media_encodings(
b,
"text/html",
)
self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"])
```
###Assistant : A character encoding is found via the meta tag.
<html>
<head><meta charset="ascii">
</head>
</html>
<html>
<head>< meta charset = ascii>
</head>
</html>
|
2,825 | def _build_amps_list(self, amp_value, processlist):
ret = []
try:
# Search in both cmdline and name (for kernel thread, see #1261)
for p in processlist:
if (re.search(amp_value.regex(), p['name']) is not None) or (
p['cmdline'] is not None
and p['cmdline'] != []
and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None
):
ret.append(
{'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}
)
except (TypeError, KeyError) as e:
logger.debug("Can not build AMPS list ({})".format(e))
return ret
| Return the AMPS process list according to the amp_value
Search application monitored processes by a regular expression
| 17 | 69 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _build_amps_list(self, amp_value, processlist):
ret = []
try:
# Search in both cmdline and name (for kernel thread, see #1261)
for p in processlist:
if (re.search(amp_value.regex(), p['name']) is not None) or (
p['cmdline'] is not None
and p['cmdline'] != []
and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None
):
ret.append(
{'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}
)
except (TypeError, KeyError) as e:
logger.debug("Can not build AMPS list ({})".format(e))
return ret
```
###Assistant : Return the AMPS process list according to the amp_value
Search application monitored processes by a regular expression
|
2,826 | def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
return inputs
| This is where the layer's logic lives.
The `call()` method may not create state (except in its first invocation,
wrapping the creation of variables or other resources in `tf.init_scope()`).
It is recommended to create state in `__init__()`, or the `build()` method
that is called automatically before `call()` executes the first time.
Args:
inputs: Input tensor, or dict/list/tuple of input tensors.
The first positional `inputs` argument is subject to special rules:
- `inputs` must be explicitly passed. A layer cannot have zero
arguments, and `inputs` cannot be provided via the default value
of a keyword argument.
- NumPy array or Python scalar values in `inputs` get cast as tensors.
- Keras mask metadata is only collected from `inputs`.
- Layers are built (`build(input_shape)` method)
using shape info from `inputs` only.
- `input_spec` compatibility is only checked against `inputs`.
- Mixed precision input casting is only applied to `inputs`.
If a layer has tensor arguments in `*args` or `**kwargs`, their
casting behavior in mixed precision should be handled manually.
- The SavedModel input specification is generated using `inputs` only.
- Integration with various ecosystem packages like TFMOT, TFLite,
TF.js, etc is only supported for `inputs` and not for tensors in
positional and keyword arguments.
*args: Additional positional arguments. May contain tensors, although
this is not recommended, for the reasons above.
**kwargs: Additional keyword arguments. May contain tensors, although
this is not recommended, for the reasons above.
The following optional keyword arguments are reserved:
- `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
- `mask`: Boolean input mask. If the layer's `call()` method takes a
`mask` argument, its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from a layer
that generated a corresponding mask, i.e. if it came from a Keras
layer with masking support).
Returns:
A tensor or list/tuple of tensors.
| 319 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
return inputs
```
###Assistant : This is where the layer's logic lives.
The `call()` method may not create state (except in its first invocation,
wrapping the creation of variables or other resources in `tf.init_scope()`).
It is recommended to create state in `__init__()`, or the `build()` method
that is called automatically before `call()` executes the first time.
Args:
inputs: Input tensor, or dict/list/tuple of input tensors.
The first positional `inputs` argument is subject to special rules:
- `inputs` must be explicitly passed. A layer cannot have zero
arguments, and `inputs` cannot be provided via the default value
of a keyword argument.
- NumPy array or Python scalar values in `inputs` get cast as tensors.
- Keras mask metadata is only collected from `inputs`.
- Layers are built (`build(input_shape)` method)
using shape info from `inputs` only.
- `input_spec` compatibility is only checked against `inputs`.
- Mixed precision input casting is only applied to `inputs`.
If a layer has tensor arguments in `*args` or `**kwargs`, their
casting behavior in mixed precision should be handled manually.
- The SavedModel input specification is generated using `inputs` only.
- Integration with various ecosystem packages like TFMOT, TFLite,
TF.js, etc is only supported for `inputs` and not for tensors in
positional and keyword arguments.
*args: Additional positional arguments. May contain tensors, although
this is not recommended, for the reasons above.
**kwargs: Additional keyword arguments. May contain tensors, although
this is not recommended, for the reasons above.
The following optional keyword arguments are reserved:
- `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
- `mask`: Boolean input mask. If the layer's `call()` method takes a
`mask` argument, its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from a layer
that generated a corresponding mask, i.e. if it came from a Keras
layer with masking support).
Returns:
A tensor or list/tuple of tensors.
|
2,827 | def test_use_predictor_with_view(self, mock_handler):
# set integration data
df = pd.DataFrame([
{'a': 1, 'b': 'one'},
{'a': 2, 'b': 'two'},
{'a': 1, 'b': 'three'},
])
self.set_handler(mock_handler, name='pg', tables={'tasks': df})
view_name = 'vtasks'
# --- create view ---
ret = self.command_executor.execute_command(parse_sql(
f'create view {view_name} (select * from pg (select * from tasks))',
dialect='mindsdb')
)
assert ret.error_code is None
# --- use predictor ---
predicted_value = 3.14
predictor = {
'name': 'task_model',
'predict': 'p',
'dtypes': {
'p': dtype.float,
'a': dtype.integer,
'b': dtype.categorical
},
'predicted_value': predicted_value
}
self.set_predictor(predictor)
ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))
assert ret.error_code is None
# native query was called
assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'
# check predictor call
# model was called
assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'
# input = one row whit a==2
when_data = self.mock_model_interface.predict.mock_calls[0].args[1]
assert len(when_data) == 1
assert when_data[0]['a'] == 2
# check prediction
assert ret.data[0][0] == predicted_value
assert len(ret.data) == 1
|
select task_model.p
from views.{view_name}
join mindsdb.task_model
where {view_name}.a = 2
| 10 | 144 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_use_predictor_with_view(self, mock_handler):
# set integration data
df = pd.DataFrame([
{'a': 1, 'b': 'one'},
{'a': 2, 'b': 'two'},
{'a': 1, 'b': 'three'},
])
self.set_handler(mock_handler, name='pg', tables={'tasks': df})
view_name = 'vtasks'
# --- create view ---
ret = self.command_executor.execute_command(parse_sql(
f'create view {view_name} (select * from pg (select * from tasks))',
dialect='mindsdb')
)
assert ret.error_code is None
# --- use predictor ---
predicted_value = 3.14
predictor = {
'name': 'task_model',
'predict': 'p',
'dtypes': {
'p': dtype.float,
'a': dtype.integer,
'b': dtype.categorical
},
'predicted_value': predicted_value
}
self.set_predictor(predictor)
ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))
assert ret.error_code is None
# native query was called
assert mock_handler().native_query.mock_calls[0].args[0] == 'select * from tasks'
# check predictor call
# model was called
assert self.mock_model_interface.predict.mock_calls[0].args[0] == 'task_model'
# input = one row whit a==2
when_data = self.mock_model_interface.predict.mock_calls[0].args[1]
assert len(when_data) == 1
assert when_data[0]['a'] == 2
# check prediction
assert ret.data[0][0] == predicted_value
assert len(ret.data) == 1
```
###Assistant :
select task_model.p
from views.{view_name}
join mindsdb.task_model
where {view_name}.a = 2
|
2,828 | def test_vr_connector_shift_by_one(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
"next_state": ViewRequirement(
"obs", shift=1, used_for_compute_actions=False
),
"prev_state": ViewRequirement("obs", shift=-1),
}
obs_arrs = np.arange(10)[:, None] + 1
config = PPOConfig().to_dict()
ctx = ConnectorContext(
view_requirements=view_rq_dict, config=config, is_policy_recurrent=True
)
c = ViewRequirementAgentConnector(ctx)
# keep a running list of observations
obs_list = []
for t, obs in enumerate(obs_arrs):
# t=0 is the next state of t=-1
data = AgentConnectorDataType(
0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}
)
processed = c([data]) # env.reset() for t == -1 else env.step()
for_action = processed[0].data.for_action
# add cur obs to the list
obs_list.append(obs)
if t == 0:
check(for_action["prev_state"], for_action["state"])
else:
# prev state should be equal to the prev time step obs
check(for_action["prev_state"], obs_list[-2][None])
| Test that the ViewRequirementConnector can handle shift by one correctly and
can ignore future referencing view_requirements to respect causality | 19 | 114 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_vr_connector_shift_by_one(self):
view_rq_dict = {
"state": ViewRequirement("obs"),
"next_state": ViewRequirement(
"obs", shift=1, used_for_compute_actions=False
),
"prev_state": ViewRequirement("obs", shift=-1),
}
obs_arrs = np.arange(10)[:, None] + 1
config = PPOConfig().to_dict()
ctx = ConnectorContext(
view_requirements=view_rq_dict, config=config, is_policy_recurrent=True
)
c = ViewRequirementAgentConnector(ctx)
# keep a running list of observations
obs_list = []
for t, obs in enumerate(obs_arrs):
# t=0 is the next state of t=-1
data = AgentConnectorDataType(
0, 1, {SampleBatch.NEXT_OBS: obs, SampleBatch.T: t - 1}
)
processed = c([data]) # env.reset() for t == -1 else env.step()
for_action = processed[0].data.for_action
# add cur obs to the list
obs_list.append(obs)
if t == 0:
check(for_action["prev_state"], for_action["state"])
else:
# prev state should be equal to the prev time step obs
check(for_action["prev_state"], obs_list[-2][None])
```
###Assistant : Test that the ViewRequirementConnector can handle shift by one correctly and
can ignore future referencing view_requirements to respect causality
|
2,829 | def adapt_error(cls, e):
return e._replace(message=e.message.format(*e.vars))[:4]
error = namedtuple("error", "lineno col message type vars")
Error = partial(partial, error, message="", type=SentryCheck, vars=())
S001 = Error(
message="S001: Avoid using the {} mock call as it is "
"confusing and prone to causing invalid test "
"behavior."
)
S001.methods = {
"not_called",
"called_once",
"called_once_with",
}
S002 = Error(message="S002: print functions or statements are not allowed.")
S003 = Error(message="S003: Use ``from sentry.utils import json`` instead.")
S003.modules = {"json", "simplejson"}
S003.names = {
"load",
"loads",
"dump",
"dumps",
"JSONEncoder",
"JSONDecodeError",
"_default_encoder",
}
| Adapts the extended error namedtuple to be compatible with Flake8. | 10 | 85 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def adapt_error(cls, e):
return e._replace(message=e.message.format(*e.vars))[:4]
error = namedtuple("error", "lineno col message type vars")
Error = partial(partial, error, message="", type=SentryCheck, vars=())
S001 = Error(
message="S001: Avoid using the {} mock call as it is "
"confusing and prone to causing invalid test "
"behavior."
)
S001.methods = {
"not_called",
"called_once",
"called_once_with",
}
S002 = Error(message="S002: print functions or statements are not allowed.")
S003 = Error(message="S003: Use ``from sentry.utils import json`` instead.")
S003.modules = {"json", "simplejson"}
S003.names = {
"load",
"loads",
"dump",
"dumps",
"JSONEncoder",
"JSONDecodeError",
"_default_encoder",
}
```
###Assistant : Adapts the extended error namedtuple to be compatible with Flake8.
|
2,830 | def on_train_begin(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
| Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
| 28 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def on_train_begin(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
```
###Assistant : Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
|
2,831 | def test_decorate_validated_function():
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfer with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(ValueError, match=r"The 'c' parameter of _func must be"):
decorated_function(1, 2, c="wrong")
| Check that validate_params functions can be decorated | 7 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_decorate_validated_function():
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfer with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(ValueError, match=r"The 'c' parameter of _func must be"):
decorated_function(1, 2, c="wrong")
```
###Assistant : Check that validate_params functions can be decorated
|
2,832 | def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)
axes.set_xscale(xscale), axes.set_yscale(yscale)
axes.set_xlim(xlim), axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
| Set the axes for matplotlib.
Defined in :numref:`sec_calculus` | 8 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
axes.set_xlabel(xlabel), axes.set_ylabel(ylabel)
axes.set_xscale(xscale), axes.set_yscale(yscale)
axes.set_xlim(xlim), axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
```
###Assistant : Set the axes for matplotlib.
Defined in :numref:`sec_calculus`
|
2,833 | def register(self, addon):
api_changes = {
# mitmproxy 6 -> mitmproxy 7
"clientconnect": "client_connected",
"clientdisconnect": "client_disconnected",
"serverconnect": "server_connect and server_connected",
"serverdisconnect": "server_disconnected",
}
for a in traverse([addon]):
for old, new in api_changes.items():
if hasattr(a, old):
ctx.log.warn(f"The {old} event has been removed, use {new} instead. "
f"For more details, see https://docs.mitmproxy.org/stable/addons-events/.")
name = _get_name(a)
if name in self.lookup:
raise exceptions.AddonManagerError(
"An addon called '%s' already exists." % name
)
l = Loader(self.master)
self.invoke_addon_sync(addon, LoadHook(l))
for a in traverse([addon]):
name = _get_name(a)
self.lookup[name] = a
for a in traverse([addon]):
self.master.commands.collect_commands(a)
self.master.options.process_deferred()
return addon
|
Register an addon, call its load event, and then register all its
sub-addons. This should be used by addons that dynamically manage
addons.
If the calling addon is already running, it should follow with
running and configure events. Must be called within a current
context.
| 45 | 91 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def register(self, addon):
api_changes = {
# mitmproxy 6 -> mitmproxy 7
"clientconnect": "client_connected",
"clientdisconnect": "client_disconnected",
"serverconnect": "server_connect and server_connected",
"serverdisconnect": "server_disconnected",
}
for a in traverse([addon]):
for old, new in api_changes.items():
if hasattr(a, old):
ctx.log.warn(f"The {old} event has been removed, use {new} instead. "
f"For more details, see https://docs.mitmproxy.org/stable/addons-events/.")
name = _get_name(a)
if name in self.lookup:
raise exceptions.AddonManagerError(
"An addon called '%s' already exists." % name
)
l = Loader(self.master)
self.invoke_addon_sync(addon, LoadHook(l))
for a in traverse([addon]):
name = _get_name(a)
self.lookup[name] = a
for a in traverse([addon]):
self.master.commands.collect_commands(a)
self.master.options.process_deferred()
return addon
```
###Assistant :
Register an addon, call its load event, and then register all its
sub-addons. This should be used by addons that dynamically manage
addons.
If the calling addon is already running, it should follow with
running and configure events. Must be called within a current
context.
|
2,834 | def get_context_data(self, **kwargs):
from tickets.models import Ticket
from tickets.const import TICKET_DETAIL_URL
ticket_id = self.request.session.get("auth_ticket_id")
if not ticket_id:
ticket = None
else:
ticket = Ticket.all().filter(pk=ticket_id).first()
context = super().get_context_data(**kwargs)
if ticket:
timestamp_created = datetime.datetime.timestamp(ticket.date_created)
ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type)
assignees = ticket.current_node.first().ticket_assignees.all()
assignees_display = ', '.join([str(i.assignee) for i in assignees])
msg = _().format(assignees_display)
else:
timestamp_created = 0
ticket_detail_url = ''
msg = _("No ticket found")
context.update({
"msg": msg,
"timestamp": timestamp_created,
"ticket_detail_url": ticket_detail_url
})
return context
@method_decorator(never_cache, name='dispatch') | Wait for <b>{}</b> confirm, You also can copy link to her/him <br/>
Don't close this page | 16 | 74 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_context_data(self, **kwargs):
from tickets.models import Ticket
from tickets.const import TICKET_DETAIL_URL
ticket_id = self.request.session.get("auth_ticket_id")
if not ticket_id:
ticket = None
else:
ticket = Ticket.all().filter(pk=ticket_id).first()
context = super().get_context_data(**kwargs)
if ticket:
timestamp_created = datetime.datetime.timestamp(ticket.date_created)
ticket_detail_url = TICKET_DETAIL_URL.format(id=ticket_id, type=ticket.type)
assignees = ticket.current_node.first().ticket_assignees.all()
assignees_display = ', '.join([str(i.assignee) for i in assignees])
msg = _().format(assignees_display)
else:
timestamp_created = 0
ticket_detail_url = ''
msg = _("No ticket found")
context.update({
"msg": msg,
"timestamp": timestamp_created,
"ticket_detail_url": ticket_detail_url
})
return context
@method_decorator(never_cache, name='dispatch')
```
###Assistant : Wait for <b>{}</b> confirm, You also can copy link to her/him <br/>
Don't close this page
|
2,835 | def tf_shard_checkpoint(weights, max_shard_size="10GB"):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
shards[shard_file] = shard
for weight in shard:
weight_name = weight.name
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
|
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
given size.
The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
[6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
<Tip warning={true}>
If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
have a size greater than `max_shard_size`.
</Tip>
Args:
weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
(like `"5MB"`).
| 158 | 137 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def tf_shard_checkpoint(weights, max_shard_size="10GB"):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
shards[shard_file] = shard
for weight in shard:
weight_name = weight.name
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
```
###Assistant :
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
given size.
The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
[6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
<Tip warning={true}>
If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
have a size greater than `max_shard_size`.
</Tip>
Args:
weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
(like `"5MB"`).
|
2,836 | def scale(self, x=1, y=1, pt=None):
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, fy = self.functions
return self.func((fx*x, fy*y), self.limits)
| Override GeometryEntity.scale since Curve is not made up of Points.
Returns
=======
Curve :
returns scaled curve.
Examples
========
>>> from sympy import Curve
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).scale(2)
Curve((2*x, x), (x, 0, 1))
| 40 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def scale(self, x=1, y=1, pt=None):
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
fx, fy = self.functions
return self.func((fx*x, fy*y), self.limits)
```
###Assistant : Override GeometryEntity.scale since Curve is not made up of Points.
Returns
=======
Curve :
returns scaled curve.
Examples
========
>>> from sympy import Curve
>>> from sympy.abc import x
>>> Curve((x, x), (x, 0, 1)).scale(2)
Curve((2*x, x), (x, 0, 1))
|
2,837 | def orient_body_fixed(self, parent, angles, rotation_order):
_check_frame(parent)
amounts = list(angles)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123
rot_order = translate(str(rotation_order), 'XYZxyz', '123123')
if rot_order not in approved_orders:
raise TypeError('The rotation order is not a valid order.')
parent_orient_body = []
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient_body = (self._rot(a1, amounts[0]) *
self._rot(a2, amounts[1]) *
self._rot(a3, amounts[2]))
self._dcm(parent, parent_orient_body)
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
'body', rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
# NOTE : SymPy 1.7 removed the call to simplify() that occured
# inside the solve() function, so this restores the pre-1.7
# behavior. See:
# https://github.com/sympy/sympy/issues/23140
# and
# https://github.com/sympy/sympy/issues/23130
wvec = wvec.simplify()
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
| Rotates this reference frame relative to the parent reference frame
by right hand rotating through three successive body fixed simple axis
rotations. Each subsequent axis of rotation is about the "body fixed"
unit vectors of a new intermediate reference frame. This type of
rotation is also referred to rotating through the `Euler and Tait-Bryan
Angles`_.
.. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
angles : 3-tuple of sympifiable
Three angles in radians used for the successive rotations.
rotation_order : 3 character string or 3 digit integer
Order of the rotations about each intermediate reference frames'
unit vectors. The Euler rotation about the X, Z', X'' axes can be
specified by the strings ``'XZX'``, ``'131'``, or the integer
``131``. There are 12 unique valid rotation orders (6 Euler and 6
Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx,
and yxz.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q1, q2, q3 = symbols('q1, q2, q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B1 = ReferenceFrame('B1')
>>> B2 = ReferenceFrame('B2')
>>> B3 = ReferenceFrame('B3')
For example, a classic Euler Angle rotation can be done by:
>>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX')
>>> B.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
This rotates reference frame B relative to reference frame N through
``q1`` about ``N.x``, then rotates B again through ``q2`` about
``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to
three successive ``orient_axis()`` calls:
>>> B1.orient_axis(N, N.x, q1)
>>> B2.orient_axis(B1, B1.y, q2)
>>> B3.orient_axis(B2, B2.x, q3)
>>> B3.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
Acceptable rotation orders are of length 3, expressed in as a string
``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis
twice in a row are prohibited.
>>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ')
>>> B.orient_body_fixed(N, (q1, q2, 0), '121')
>>> B.orient_body_fixed(N, (q1, q2, q3), 123)
| 365 | 217 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def orient_body_fixed(self, parent, angles, rotation_order):
_check_frame(parent)
amounts = list(angles)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123
rot_order = translate(str(rotation_order), 'XYZxyz', '123123')
if rot_order not in approved_orders:
raise TypeError('The rotation order is not a valid order.')
parent_orient_body = []
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient_body = (self._rot(a1, amounts[0]) *
self._rot(a2, amounts[1]) *
self._rot(a3, amounts[2]))
self._dcm(parent, parent_orient_body)
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy)
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
'body', rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
# NOTE : SymPy 1.7 removed the call to simplify() that occured
# inside the solve() function, so this restores the pre-1.7
# behavior. See:
# https://github.com/sympy/sympy/issues/23140
# and
# https://github.com/sympy/sympy/issues/23130
wvec = wvec.simplify()
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
```
###Assistant : Rotates this reference frame relative to the parent reference frame
by right hand rotating through three successive body fixed simple axis
rotations. Each subsequent axis of rotation is about the "body fixed"
unit vectors of a new intermediate reference frame. This type of
rotation is also referred to rotating through the `Euler and Tait-Bryan
Angles`_.
.. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
angles : 3-tuple of sympifiable
Three angles in radians used for the successive rotations.
rotation_order : 3 character string or 3 digit integer
Order of the rotations about each intermediate reference frames'
unit vectors. The Euler rotation about the X, Z', X'' axes can be
specified by the strings ``'XZX'``, ``'131'``, or the integer
``131``. There are 12 unique valid rotation orders (6 Euler and 6
Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx,
and yxz.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q1, q2, q3 = symbols('q1, q2, q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B1 = ReferenceFrame('B1')
>>> B2 = ReferenceFrame('B2')
>>> B3 = ReferenceFrame('B3')
For example, a classic Euler Angle rotation can be done by:
>>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX')
>>> B.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
This rotates reference frame B relative to reference frame N through
``q1`` about ``N.x``, then rotates B again through ``q2`` about
``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to
three successive ``orient_axis()`` calls:
>>> B1.orient_axis(N, N.x, q1)
>>> B2.orient_axis(B1, B1.y, q2)
>>> B3.orient_axis(B2, B2.x, q3)
>>> B3.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
Acceptable rotation orders are of length 3, expressed in as a string
``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis
twice in a row are prohibited.
>>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ')
>>> B.orient_body_fixed(N, (q1, q2, 0), '121')
>>> B.orient_body_fixed(N, (q1, q2, q3), 123)
|
2,838 | def get_applicable_block_lists(employee=None, company=None, all_lists=False):
block_lists = []
if not employee:
employee = frappe.db.get_value("Employee", {"user_id": frappe.session.user})
if not employee:
return []
if not company:
company = frappe.db.get_value("Employee", employee, "company")
def add_block_list(block_list):
if block_list:
if all_lists or not is_user_in_allow_list(block_list):
block_lists.append(block_list)
# per department
department = frappe.db.get_value("Employee", employee, "department")
if department:
block_list = frappe.db.get_value("Department", department, "leave_block_list")
add_block_list(block_list)
# global
for block_list in frappe.db.sql_list(
,
company,
):
add_block_list(block_list)
return list(set(block_lists))
| select name from `tabLeave Block List`
where applies_to_all_departments=1 and company=%s | 10 | 66 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_applicable_block_lists(employee=None, company=None, all_lists=False):
block_lists = []
if not employee:
employee = frappe.db.get_value("Employee", {"user_id": frappe.session.user})
if not employee:
return []
if not company:
company = frappe.db.get_value("Employee", employee, "company")
def add_block_list(block_list):
if block_list:
if all_lists or not is_user_in_allow_list(block_list):
block_lists.append(block_list)
# per department
department = frappe.db.get_value("Employee", employee, "department")
if department:
block_list = frappe.db.get_value("Department", department, "leave_block_list")
add_block_list(block_list)
# global
for block_list in frappe.db.sql_list(
,
company,
):
add_block_list(block_list)
return list(set(block_lists))
```
###Assistant : select name from `tabLeave Block List`
where applies_to_all_departments=1 and company=%s
|
2,839 | def _setup_connection(self):
# noqa
cur = self.connection.cursor()
if ('store',) not in list(cur.execute("SELECT name FROM sqlite_master WHERE type='table';")):
cur.execute(
)
self.internal_registry.commit()
| Checks that a key-value table exists, otherwise creates it. create table store (key text, value text) | 16 | 20 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _setup_connection(self):
# noqa
cur = self.connection.cursor()
if ('store',) not in list(cur.execute("SELECT name FROM sqlite_master WHERE type='table';")):
cur.execute(
)
self.internal_registry.commit()
```
###Assistant : Checks that a key-value table exists, otherwise creates it. create table store (key text, value text)
|
2,840 | def cull(dsk, keys):
if not isinstance(keys, (list, set)):
keys = [keys]
seen = set()
dependencies = dict()
out = {}
work = list(set(flatten(keys)))
while work:
new_work = []
for k in work:
dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists
out[k] = dsk[k]
dependencies[k] = dependencies_k
for d in dependencies_k:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
return out, dependencies
| Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> def inc(x):
... return x + 1
>>> def add(x, y):
... return x + y
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out')
>>> dsk # doctest: +ELLIPSIS
{'out': (<function add at ...>, 'x', 10), 'x': 1}
>>> dependencies # doctest: +ELLIPSIS
{'out': ['x'], 'x': []}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
| 109 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cull(dsk, keys):
if not isinstance(keys, (list, set)):
keys = [keys]
seen = set()
dependencies = dict()
out = {}
work = list(set(flatten(keys)))
while work:
new_work = []
for k in work:
dependencies_k = get_dependencies(dsk, k, as_list=True) # fuse needs lists
out[k] = dsk[k]
dependencies[k] = dependencies_k
for d in dependencies_k:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
return out, dependencies
```
###Assistant : Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> def inc(x):
... return x + 1
>>> def add(x, y):
... return x + y
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out')
>>> dsk # doctest: +ELLIPSIS
{'out': (<function add at ...>, 'x', 10), 'x': 1}
>>> dependencies # doctest: +ELLIPSIS
{'out': ['x'], 'x': []}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
|
2,841 | def fill_leverage_tiers(self) -> None:
leverage_tiers = self.load_leverage_tiers()
for pair, tiers in leverage_tiers.items():
tiers = []
for tier in tiers:
tiers.append(self.parse_leverage_tier(tier))
self._leverage_tiers[pair] = tiers
|
Assigns property _leverage_tiers to a dictionary of information about the leverage
allowed on each pair
| 15 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fill_leverage_tiers(self) -> None:
leverage_tiers = self.load_leverage_tiers()
for pair, tiers in leverage_tiers.items():
tiers = []
for tier in tiers:
tiers.append(self.parse_leverage_tier(tier))
self._leverage_tiers[pair] = tiers
```
###Assistant :
Assigns property _leverage_tiers to a dictionary of information about the leverage
allowed on each pair
|
2,842 | def kronecker_product(*matrices):
if not matrices:
raise TypeError("Empty Kronecker product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return KroneckerProduct(*matrices).doit()
|
The Kronecker product of two or more arguments.
This computes the explicit Kronecker product for subclasses of
``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic
``KroneckerProduct`` object is returned.
Examples
========
For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned.
Elements of this matrix can be obtained by indexing, or for MatrixSymbols
with known dimension the explicit matrix can be obtained with
``.as_explicit()``
>>> from sympy import kronecker_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 2)
>>> B = MatrixSymbol('B', 2, 2)
>>> kronecker_product(A)
A
>>> kronecker_product(A, B)
KroneckerProduct(A, B)
>>> kronecker_product(A, B)[0, 1]
A[0, 0]*B[0, 1]
>>> kronecker_product(A, B).as_explicit()
Matrix([
[A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]],
[A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]],
[A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]],
[A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]])
For explicit matrices the Kronecker product is returned as a Matrix
>>> from sympy import Matrix, kronecker_product
>>> sigma_x = Matrix([
... [0, 1],
... [1, 0]])
...
>>> Isigma_y = Matrix([
... [0, 1],
... [-1, 0]])
...
>>> kronecker_product(sigma_x, Isigma_y)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, -1, 0],
[ 0, 1, 0, 0],
[-1, 0, 0, 0]])
See Also
========
KroneckerProduct
| 212 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def kronecker_product(*matrices):
if not matrices:
raise TypeError("Empty Kronecker product is undefined")
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
else:
return KroneckerProduct(*matrices).doit()
```
###Assistant :
The Kronecker product of two or more arguments.
This computes the explicit Kronecker product for subclasses of
``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic
``KroneckerProduct`` object is returned.
Examples
========
For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned.
Elements of this matrix can be obtained by indexing, or for MatrixSymbols
with known dimension the explicit matrix can be obtained with
``.as_explicit()``
>>> from sympy import kronecker_product, MatrixSymbol
>>> A = MatrixSymbol('A', 2, 2)
>>> B = MatrixSymbol('B', 2, 2)
>>> kronecker_product(A)
A
>>> kronecker_product(A, B)
KroneckerProduct(A, B)
>>> kronecker_product(A, B)[0, 1]
A[0, 0]*B[0, 1]
>>> kronecker_product(A, B).as_explicit()
Matrix([
[A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]],
[A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]],
[A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]],
[A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]])
For explicit matrices the Kronecker product is returned as a Matrix
>>> from sympy import Matrix, kronecker_product
>>> sigma_x = Matrix([
... [0, 1],
... [1, 0]])
...
>>> Isigma_y = Matrix([
... [0, 1],
... [-1, 0]])
...
>>> kronecker_product(sigma_x, Isigma_y)
Matrix([
[ 0, 0, 0, 1],
[ 0, 0, -1, 0],
[ 0, 1, 0, 0],
[-1, 0, 0, 0]])
See Also
========
KroneckerProduct
|
2,843 | def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
|
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
| 20 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_obj_does_not_exist_redirect(self, request, opts, object_id):
msg = _("%(name)s with ID “%(key)s” doesn’t exist. Perhaps it was deleted?") % {
"name": opts.verbose_name,
"key": unquote(object_id),
}
self.message_user(request, msg, messages.WARNING)
url = reverse("admin:index", current_app=self.admin_site.name)
return HttpResponseRedirect(url)
```
###Assistant :
Create a message informing the user that the object doesn't exist
and return a redirect to the admin index page.
|
2,844 | def compare_total(self, other, context=None):
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
| Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
| 32 | 183 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def compare_total(self, other, context=None):
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
```
###Assistant : Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
|
2,845 | def detect_current_mode(cls) -> _FaultTolerantMode:
env_value = os.getenv("PL_FAULT_TOLERANT_TRAINING", "0").lower()
# the int values are kept for backwards compatibility, but long-term we want to keep only the strings
if env_value in ("0", "disabled"):
return _FaultTolerantMode.DISABLED
elif env_value in ("1", "automatic"):
return _FaultTolerantMode.AUTOMATIC
elif env_value in ("2", "manual"):
return _FaultTolerantMode.MANUAL
raise MisconfigurationException(
"The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'."
)
| This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`. | 14 | 61 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def detect_current_mode(cls) -> _FaultTolerantMode:
env_value = os.getenv("PL_FAULT_TOLERANT_TRAINING", "0").lower()
# the int values are kept for backwards compatibility, but long-term we want to keep only the strings
if env_value in ("0", "disabled"):
return _FaultTolerantMode.DISABLED
elif env_value in ("1", "automatic"):
return _FaultTolerantMode.AUTOMATIC
elif env_value in ("2", "manual"):
return _FaultTolerantMode.MANUAL
raise MisconfigurationException(
"The environment flag `PL_FAULT_TOLERANT_TRAINING` should be either 'disabled', 'automatic', or 'manual'."
)
```
###Assistant : This classmethod detects if `Fault Tolerant` is activated and maps its value to `_FaultTolerantMode`.
|
2,846 | def apply_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export("keras.preprocessing.image.random_channel_shift") | Performs a channel shift.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
| 28 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def apply_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
@keras_export("keras.preprocessing.image.random_channel_shift")
```
###Assistant : Performs a channel shift.
Args:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
Returns:
Numpy image tensor.
|
2,847 | def _nested_gather(self, tensors, name=None):
if tensors is None:
return
if self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate. |
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
| 20 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _nested_gather(self, tensors, name=None):
if tensors is None:
return
if self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
```
###Assistant :
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
|
2,848 | def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs available
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
rtol = 5e-5
if n_samples > n_features:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
assert_allclose(model.predict(X), y, rtol=1e-6)
if fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False]) | Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
| 46 | 269 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
# solver=solver, # only lbfgs available
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
rtol = 5e-5
if n_samples > n_features:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
assert_allclose(model.predict(X), y, rtol=1e-6)
if fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
```
###Assistant : Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
|
2,849 | def _node_redundancy(G, v):
n = len(G[v])
overlap = sum(
1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}
)
return (2 * overlap) / (n * (n - 1))
| Returns the redundancy of the node `v` in the bipartite graph `G`.
If `G` is a graph with `n` nodes, the redundancy of a node is the ratio
of the "overlap" of `v` to the maximum possible overlap of `v`
according to its degree. The overlap of `v` is the number of pairs of
neighbors that have mutual neighbors themselves, other than `v`.
`v` must have at least two neighbors in `G`.
| 72 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _node_redundancy(G, v):
n = len(G[v])
overlap = sum(
1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}
)
return (2 * overlap) / (n * (n - 1))
```
###Assistant : Returns the redundancy of the node `v` in the bipartite graph `G`.
If `G` is a graph with `n` nodes, the redundancy of a node is the ratio
of the "overlap" of `v` to the maximum possible overlap of `v`
according to its degree. The overlap of `v` is the number of pairs of
neighbors that have mutual neighbors themselves, other than `v`.
`v` must have at least two neighbors in `G`.
|
2,850 | def solve(self):
count_reaction_loads = 0
for node in self._nodes:
if node in list(self._supports):
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in list(self._members):
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length
coefficients_matrix[start_index*2][cols] += horizontal_component_start
coefficients_matrix[start_index*2+1][cols] += vertical_component_start
coefficients_matrix[end_index*2][cols] += horizontal_component_end
coefficients_matrix[end_index*2+1][cols] += vertical_component_end
cols += 1
forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix
self._reaction_loads = {}
i = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]
i += 2
elif self._supports[node[0]]=='roller':
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]
i += 1
for member in list(self._members):
self._internal_forces[member] = forces_matrix[i]
i += 1
return
|
This method solves for all reaction forces of all supports and all internal forces
of all the members in the truss, provided the Truss is solvable.
A Truss is solvable if the following condition is met,
2n >= r + m
Where n is the number of nodes, r is the number of reaction forces, where each pinned
support has 2 reaction forces and each roller has 1, and m is the number of members.
The given condition is derived from the fact that a system of equations is solvable
only when the number of variables is lesser than or equal to the number of equations.
Equilibrium Equations in x and y directions give two equations per node giving 2n number
equations. The number of variables is simply the sum of the number of reaction forces and
member forces.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node("node_1", 0, 0)
>>> t.add_node("node_2", 6, 0)
>>> t.add_node("node_3", 2, 2)
>>> t.add_node("node_4", 2, 0)
>>> t.add_member("member_1", "node_1", "node_4")
>>> t.add_member("member_2", "node_2", "node_4")
>>> t.add_member("member_3", "node_1", "node_3")
>>> t.add_member("member_4", "node_2", "node_3")
>>> t.add_member("member_5", "node_3", "node_4")
>>> t.apply_load("node_4", magnitude=10, direction=270)
>>> t.apply_support("node_1", type="pinned")
>>> t.apply_support("node_2", type="roller")
>>> t.solve()
>>> t.reaction_loads
{'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333}
>>> t.internal_forces
{'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0}
| 218 | 199 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def solve(self):
count_reaction_loads = 0
for node in self._nodes:
if node in list(self._supports):
if self._supports[node[0]]=='pinned':
count_reaction_loads += 2
elif self._supports[node[0]]=='roller':
count_reaction_loads += 1
coefficients_matrix = [[0 for i in range(2*len(self._nodes))] for j in range(2*len(self._nodes))]
load_matrix = zeros(2*len(self.nodes), 1)
load_matrix_row = 0
for node in self._nodes:
if node[0] in list(self._loads):
for load in self._loads[node[0]]:
if load[0]!=Symbol('R_'+str(node[0])+'_x') and load[0]!=Symbol('R_'+str(node[0])+'_y'):
load_matrix[load_matrix_row] -= load[0]*math.cos(pi*load[1]/180)
load_matrix[load_matrix_row + 1] -= load[0]*math.sin(pi*load[1]/180)
load_matrix_row += 2
cols = 0
row = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
coefficients_matrix[row][cols] += 1
coefficients_matrix[row+1][cols+1] += 1
cols += 2
elif self._supports[node[0]]=='roller':
coefficients_matrix[row+1][cols] += 1
cols += 1
row += 2
for member in list(self._members):
start = self._members[member][0]
end = self._members[member][1]
length = sqrt((self._node_coordinates[start][0]-self._node_coordinates[end][0])**2 + (self._node_coordinates[start][1]-self._node_coordinates[end][1])**2)
start_index = self._node_labels.index(start)
end_index = self._node_labels.index(end)
horizontal_component_start = (self._node_coordinates[end][0]-self._node_coordinates[start][0])/length
vertical_component_start = (self._node_coordinates[end][1]-self._node_coordinates[start][1])/length
horizontal_component_end = (self._node_coordinates[start][0]-self._node_coordinates[end][0])/length
vertical_component_end = (self._node_coordinates[start][1]-self._node_coordinates[end][1])/length
coefficients_matrix[start_index*2][cols] += horizontal_component_start
coefficients_matrix[start_index*2+1][cols] += vertical_component_start
coefficients_matrix[end_index*2][cols] += horizontal_component_end
coefficients_matrix[end_index*2+1][cols] += vertical_component_end
cols += 1
forces_matrix = (Matrix(coefficients_matrix)**-1)*load_matrix
self._reaction_loads = {}
i = 0
for node in self._nodes:
if node[0] in list(self._supports):
if self._supports[node[0]]=='pinned':
self._reaction_loads['R_'+str(node[0])+'_x'] = forces_matrix[i]
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i+1]
i += 2
elif self._supports[node[0]]=='roller':
self._reaction_loads['R_'+str(node[0])+'_y'] = forces_matrix[i]
i += 1
for member in list(self._members):
self._internal_forces[member] = forces_matrix[i]
i += 1
return
```
###Assistant :
This method solves for all reaction forces of all supports and all internal forces
of all the members in the truss, provided the Truss is solvable.
A Truss is solvable if the following condition is met,
2n >= r + m
Where n is the number of nodes, r is the number of reaction forces, where each pinned
support has 2 reaction forces and each roller has 1, and m is the number of members.
The given condition is derived from the fact that a system of equations is solvable
only when the number of variables is lesser than or equal to the number of equations.
Equilibrium Equations in x and y directions give two equations per node giving 2n number
equations. The number of variables is simply the sum of the number of reaction forces and
member forces.
Examples
========
>>> from sympy.physics.continuum_mechanics.truss import Truss
>>> t = Truss()
>>> t.add_node("node_1", 0, 0)
>>> t.add_node("node_2", 6, 0)
>>> t.add_node("node_3", 2, 2)
>>> t.add_node("node_4", 2, 0)
>>> t.add_member("member_1", "node_1", "node_4")
>>> t.add_member("member_2", "node_2", "node_4")
>>> t.add_member("member_3", "node_1", "node_3")
>>> t.add_member("member_4", "node_2", "node_3")
>>> t.add_member("member_5", "node_3", "node_4")
>>> t.apply_load("node_4", magnitude=10, direction=270)
>>> t.apply_support("node_1", type="pinned")
>>> t.apply_support("node_2", type="roller")
>>> t.solve()
>>> t.reaction_loads
{'R_node_1_x': 1.83697019872103e-15, 'R_node_1_y': 6.66666666666667, 'R_node_2_y': 3.33333333333333}
>>> t.internal_forces
{'member_1': 6.66666666666666, 'member_2': 6.66666666666667, 'member_3': -6.66666666666667*sqrt(2), 'member_4': -3.33333333333333*sqrt(5), 'member_5': 10.0}
|
2,851 | def gf_edf_zassenhaus(f, n, p, K):
factors = [f]
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
if p != 2:
b = gf_frobenius_monomial_base(f, p, K)
t = [K.one, K.zero]
while len(factors) < N:
if p == 2:
h = r = t
for i in range(n - 1):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
t += [K.zero, K.zero]
else:
r = gf_random(2 * n - 1, p, K)
h = _gf_pow_pnm1d2(r, n, f, b, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
|
Cantor-Zassenhaus: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and
an integer ``n``, such that ``n`` divides ``deg(f)``, returns all
irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``.
EDF procedure gives complete factorization over Galois fields.
Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in
``GF(5)[x]``. Let's compute its irreducible factors of degree one::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_edf_zassenhaus
>>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)
[[1, 1], [1, 2], [1, 3]]
References
==========
.. [1] [Gathen99]_
.. [2] [Geddes92]_
.. [3] [Cohen93]_
| 96 | 127 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def gf_edf_zassenhaus(f, n, p, K):
factors = [f]
if gf_degree(f) <= n:
return factors
N = gf_degree(f) // n
if p != 2:
b = gf_frobenius_monomial_base(f, p, K)
t = [K.one, K.zero]
while len(factors) < N:
if p == 2:
h = r = t
for i in range(n - 1):
r = gf_pow_mod(r, 2, f, p, K)
h = gf_add(h, r, p, K)
g = gf_gcd(f, h, p, K)
t += [K.zero, K.zero]
else:
r = gf_random(2 * n - 1, p, K)
h = _gf_pow_pnm1d2(r, n, f, b, p, K)
g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K)
if g != [K.one] and g != f:
factors = gf_edf_zassenhaus(g, n, p, K) \
+ gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K)
return _sort_factors(factors, multiple=False)
```
###Assistant :
Cantor-Zassenhaus: Probabilistic Equal Degree Factorization
Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and
an integer ``n``, such that ``n`` divides ``deg(f)``, returns all
irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``.
EDF procedure gives complete factorization over Galois fields.
Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in
``GF(5)[x]``. Let's compute its irreducible factors of degree one::
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.galoistools import gf_edf_zassenhaus
>>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ)
[[1, 1], [1, 2], [1, 3]]
References
==========
.. [1] [Gathen99]_
.. [2] [Geddes92]_
.. [3] [Cohen93]_
|
2,852 | def _tracemin_fiedler(L, X, normalized, tol, method):
import numpy as np
import scipy as sp
import scipy.linalg # call as sp.linalg
import scipy.linalg.blas # call as sp.linalg.blas
import scipy.sparse # call as sp.sparse
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = np.sqrt(L.diagonal())
# TODO: rm csr_array wrapper when spdiags array creation becomes available
D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr"))
L = D @ L @ D
e *= 1.0 / np.linalg.norm(e, 2)
if normalized:
| Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix
of the graph. This function starts with the Laplacian L, not the Graph.
Parameters
----------
L : Laplacian of a possibly weighted or normalized, but undirected graph
X : Initial guess for a solution. Usually a matrix of random numbers.
This function allows more than one column in X to identify more than
one eigenvector if desired.
normalized : bool
Whether the normalized Laplacian matrix is used.
tol : float
Tolerance of relative residual in eigenvalue computation.
Warning: There is no limit on number of iterations.
method : string
Should be 'tracemin_pcg' or 'tracemin_lu'.
Otherwise exception is raised.
Returns
-------
sigma, X : Two NumPy arrays of floats.
The lowest eigenvalues and corresponding eigenvectors of L.
The size of input X determines the size of these outputs.
As this is for Fiedler vectors, the zero eigenvalue (and
constant eigenvector) are avoided.
| 172 | 89 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _tracemin_fiedler(L, X, normalized, tol, method):
import numpy as np
import scipy as sp
import scipy.linalg # call as sp.linalg
import scipy.linalg.blas # call as sp.linalg.blas
import scipy.sparse # call as sp.sparse
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = np.sqrt(L.diagonal())
# TODO: rm csr_array wrapper when spdiags array creation becomes available
D = sp.sparse.csr_array(sp.sparse.spdiags(1 / e, 0, n, n, format="csr"))
L = D @ L @ D
e *= 1.0 / np.linalg.norm(e, 2)
if normalized:
```
###Assistant : Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
The Fiedler vector of a connected undirected graph is the eigenvector
corresponding to the second smallest eigenvalue of the Laplacian matrix
of the graph. This function starts with the Laplacian L, not the Graph.
Parameters
----------
L : Laplacian of a possibly weighted or normalized, but undirected graph
X : Initial guess for a solution. Usually a matrix of random numbers.
This function allows more than one column in X to identify more than
one eigenvector if desired.
normalized : bool
Whether the normalized Laplacian matrix is used.
tol : float
Tolerance of relative residual in eigenvalue computation.
Warning: There is no limit on number of iterations.
method : string
Should be 'tracemin_pcg' or 'tracemin_lu'.
Otherwise exception is raised.
Returns
-------
sigma, X : Two NumPy arrays of floats.
The lowest eigenvalues and corresponding eigenvectors of L.
The size of input X determines the size of these outputs.
As this is for Fiedler vectors, the zero eigenvalue (and
constant eigenvector) are avoided.
|
2,853 | def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):
with open(tmp_path / "tmp_config_with_prompt_template.yml", "w") as tmp_file:
tmp_file.write(
f
)
pipeline = Pipeline.load_from_yaml(path=tmp_path / "tmp_config_with_prompt_template.yml")
result = pipeline.run(query="not relevant", documents=[Document("Berlin is an amazing city.")])
assert "Berlin" in result["results"][0]
assert len(result["meta"]["invocation_context"]) > 0
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),
reason="Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
) |
version: ignore
components:
- name: pmodel
type: PromptModel
params:
model_name_or_path: google/flan-t5-small
model_kwargs:
torch_dtype: torch.bfloat16
- name: question_generation_template
type: PromptTemplate
params:
name: question-generation-new
prompt_text: "Given the context please generate a question. Context: $documents; Question:"
- name: p1
params:
model_name_or_path: pmodel
default_prompt_template: question_generation_template
output_variable: questions
type: PromptNode
- name: p2
params:
model_name_or_path: pmodel
default_prompt_template: question-answering
type: PromptNode
pipelines:
- name: query
nodes:
- name: p1
inputs:
- Query
- name: p2
inputs:
- p1
| 72 | 55 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_complex_pipeline_with_shared_prompt_model_and_prompt_template_yaml(tmp_path):
with open(tmp_path / "tmp_config_with_prompt_template.yml", "w") as tmp_file:
tmp_file.write(
f
)
pipeline = Pipeline.load_from_yaml(path=tmp_path / "tmp_config_with_prompt_template.yml")
result = pipeline.run(query="not relevant", documents=[Document("Berlin is an amazing city.")])
assert "Berlin" in result["results"][0]
assert len(result["meta"]["invocation_context"]) > 0
@pytest.mark.skipif(
not os.environ.get("OPENAI_API_KEY", None),
reason="Please export an env var called OPENAI_API_KEY containing the OpenAI API key to run this test.",
)
```
###Assistant :
version: ignore
components:
- name: pmodel
type: PromptModel
params:
model_name_or_path: google/flan-t5-small
model_kwargs:
torch_dtype: torch.bfloat16
- name: question_generation_template
type: PromptTemplate
params:
name: question-generation-new
prompt_text: "Given the context please generate a question. Context: $documents; Question:"
- name: p1
params:
model_name_or_path: pmodel
default_prompt_template: question_generation_template
output_variable: questions
type: PromptNode
- name: p2
params:
model_name_or_path: pmodel
default_prompt_template: question-answering
type: PromptNode
pipelines:
- name: query
nodes:
- name: p1
inputs:
- Query
- name: p2
inputs:
- p1
|
2,854 | def _download_model(self):
self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download)
for attempt in range(self._retries):
try:
downloaded_size = self._url_partial_size
req = urllib.request.Request(self._url_download)
if downloaded_size != 0:
req.add_header("Range", f"bytes={downloaded_size}-")
with urllib.request.urlopen(req, timeout=10) as response:
self.logger.debug("header info: {%s}", response.info())
self.logger.debug("Return Code: %s", response.getcode())
self._write_zipfile(response, downloaded_size)
break
except (socket_error, socket_timeout,
urllib.error.HTTPError, urllib.error.URLError) as err:
if attempt + 1 < self._retries:
self.logger.warning("Error downloading model (%s). Retrying %s of %s...",
str(err), attempt + 2, self._retries)
else:
self.logger.error("Failed to download model. Exiting. (Error: '%s', URL: "
"'%s')", str(err), self._url_download)
self.logger.info("You can try running again to resume the download.")
self.logger.info("Alternatively, you can manually download the model from: %s "
"and unzip the contents to: %s",
self._url_download, self._cache_dir)
sys.exit(1)
| Download the model zip from github to the cache folder. | 10 | 109 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _download_model(self):
self.logger.info("Downloading model: '%s' from: %s", self._model_name, self._url_download)
for attempt in range(self._retries):
try:
downloaded_size = self._url_partial_size
req = urllib.request.Request(self._url_download)
if downloaded_size != 0:
req.add_header("Range", f"bytes={downloaded_size}-")
with urllib.request.urlopen(req, timeout=10) as response:
self.logger.debug("header info: {%s}", response.info())
self.logger.debug("Return Code: %s", response.getcode())
self._write_zipfile(response, downloaded_size)
break
except (socket_error, socket_timeout,
urllib.error.HTTPError, urllib.error.URLError) as err:
if attempt + 1 < self._retries:
self.logger.warning("Error downloading model (%s). Retrying %s of %s...",
str(err), attempt + 2, self._retries)
else:
self.logger.error("Failed to download model. Exiting. (Error: '%s', URL: "
"'%s')", str(err), self._url_download)
self.logger.info("You can try running again to resume the download.")
self.logger.info("Alternatively, you can manually download the model from: %s "
"and unzip the contents to: %s",
self._url_download, self._cache_dir)
sys.exit(1)
```
###Assistant : Download the model zip from github to the cache folder.
|
2,855 | def fit_transform(self, X, y=None):
self._validate_params()
self._fit_transform(X)
return self.embedding_
| Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
| 45 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fit_transform(self, X, y=None):
self._validate_params()
self._fit_transform(X)
return self.embedding_
```
###Assistant : Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
|
2,856 | def test_song_from_data_dump():
# Loads from str
song = Song.from_data_dump(
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
|
Tests if Song.from_data_dump() works correctly.
{
"name": "Ropes",
"artists": ["Dirty Palm", "Chandler Jewels"],
"album_name": "Ropes",
"album_artist": "Dirty Palm",
"genres": ["gaming edm", "melbourne bounce international"],
"disc_number": 1,
"duration": 188,
"year": 2021,
"date": "2021-10-28",
"track_number": 1,
"tracks_count": 1,
"isrc": "GB2LD2110301",
"song_id": "1t2qKa8K72IBC8yQlhD9bU",
"cover_url": "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332",
"explicit": false,
"download_url": null,
"artist" : "Dirty Palm",
"disc_count": 1,
"copyright": "",
"publisher": "",
"url": "https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU"
}
| 59 | 84 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_song_from_data_dump():
# Loads from str
song = Song.from_data_dump(
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming edm", "melbourne bounce international"]
assert song.disc_number == 1
assert song.duration == 188
assert song.year == 2021
assert song.date == "2021-10-28"
assert song.track_number == 1
assert song.tracks_count == 1
assert song.isrc == "GB2LD2110301"
assert song.song_id == "1t2qKa8K72IBC8yQlhD9bU"
assert (
song.cover_url
== "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332"
)
assert song.explicit == False
assert song.download_url == None
```
###Assistant :
Tests if Song.from_data_dump() works correctly.
{
"name": "Ropes",
"artists": ["Dirty Palm", "Chandler Jewels"],
"album_name": "Ropes",
"album_artist": "Dirty Palm",
"genres": ["gaming edm", "melbourne bounce international"],
"disc_number": 1,
"duration": 188,
"year": 2021,
"date": "2021-10-28",
"track_number": 1,
"tracks_count": 1,
"isrc": "GB2LD2110301",
"song_id": "1t2qKa8K72IBC8yQlhD9bU",
"cover_url": "https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332",
"explicit": false,
"download_url": null,
"artist" : "Dirty Palm",
"disc_count": 1,
"copyright": "",
"publisher": "",
"url": "https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU"
}
|
2,857 | def test_run(self) -> Tuple[Dict, Dict]:
print(" | > Synthesizing test sentences.")
test_audios = {}
test_figures = {}
test_sentences = self.config.test_sentences
for idx, s_info in enumerate(test_sentences):
try:
aux_inputs = self.get_aux_input_from_test_sentences(s_info)
wav, alignment, _, _ = synthesis(
self,
aux_inputs["text"],
self.config,
"cuda" in str(next(self.parameters()).device),
ap,
speaker_id=aux_inputs["speaker_id"],
d_vector=aux_inputs["d_vector"],
style_wav=aux_inputs["style_wav"],
language_id=aux_inputs["language_id"],
language_name=aux_inputs["language_name"],
enable_eos_bos_chars=self.config.enable_eos_bos_chars,
use_griffin_lim=True,
do_trim_silence=False,
).values()
test_audios["{}-audio".format(idx)] = wav
test_figures["{}-alignment".format(idx)] = plot_alignment(alignment.T, output_fig=False)
except: # pylint: disable=bare-except
print(" !! Error creating Test Sentence -", idx)
return test_figures, test_audios
| Generic test run for `tts` models used by `Trainer`.
You can override this for a different behaviour.
Returns:
Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.
| 29 | 73 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_run(self) -> Tuple[Dict, Dict]:
print(" | > Synthesizing test sentences.")
test_audios = {}
test_figures = {}
test_sentences = self.config.test_sentences
for idx, s_info in enumerate(test_sentences):
try:
aux_inputs = self.get_aux_input_from_test_sentences(s_info)
wav, alignment, _, _ = synthesis(
self,
aux_inputs["text"],
self.config,
"cuda" in str(next(self.parameters()).device),
ap,
speaker_id=aux_inputs["speaker_id"],
d_vector=aux_inputs["d_vector"],
style_wav=aux_inputs["style_wav"],
language_id=aux_inputs["language_id"],
language_name=aux_inputs["language_name"],
enable_eos_bos_chars=self.config.enable_eos_bos_chars,
use_griffin_lim=True,
do_trim_silence=False,
).values()
test_audios["{}-audio".format(idx)] = wav
test_figures["{}-alignment".format(idx)] = plot_alignment(alignment.T, output_fig=False)
except: # pylint: disable=bare-except
print(" !! Error creating Test Sentence -", idx)
return test_figures, test_audios
```
###Assistant : Generic test run for `tts` models used by `Trainer`.
You can override this for a different behaviour.
Returns:
Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.
|
2,858 | def read_dict(self, dictionary, source='<dict>'):
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
| Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
| 72 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def read_dict(self, dictionary, source='<dict>'):
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
```
###Assistant : Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
|
2,859 | def send(self, load, tries=3, timeout=60, raw=False):
_try = 1
while True:
try:
if self.crypt == "clear":
log.trace("ReqChannel send clear load=%r", load)
ret = yield self._uncrypted_transfer(load, timeout=timeout)
else:
log.trace("ReqChannel send crypt load=%r", load)
ret = yield self._crypted_transfer(
load, timeout=timeout, raw=raw
)
break
except Exception as exc:
log.error("Failed to send msg %r", dir(exc))
if _try == tries:
raise #salt.exceptions.SaltClientError("Connection to master lost")
else:
_try += 1
continue
raise salt.ext.tornado.gen.Return(ret)
|
Send a request, return a future which will complete when we send the message
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
| 47 | 67 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def send(self, load, tries=3, timeout=60, raw=False):
_try = 1
while True:
try:
if self.crypt == "clear":
log.trace("ReqChannel send clear load=%r", load)
ret = yield self._uncrypted_transfer(load, timeout=timeout)
else:
log.trace("ReqChannel send crypt load=%r", load)
ret = yield self._crypted_transfer(
load, timeout=timeout, raw=raw
)
break
except Exception as exc:
log.error("Failed to send msg %r", dir(exc))
if _try == tries:
raise #salt.exceptions.SaltClientError("Connection to master lost")
else:
_try += 1
continue
raise salt.ext.tornado.gen.Return(ret)
```
###Assistant :
Send a request, return a future which will complete when we send the message
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing
|
2,860 | def get_fields_from_path(model, path):
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
| Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
| 27 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_fields_from_path(model, path):
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
```
###Assistant : Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
|
2,861 | def find_config_files(self):
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
| Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
| 93 | 92 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def find_config_files(self):
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
```
###Assistant : Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
|
2,862 | def get_save_image_name(org_im_path, output_dir, num):
# name prefix of orginal image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
ext = '.png'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(save_im_path):
save_im_path = os.path.join(
output_dir, im_prefix + str(num) + ext)
return save_im_path
|
Get save image name from source image path.
| 8 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_save_image_name(org_im_path, output_dir, num):
# name prefix of orginal image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
ext = '.png'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(save_im_path):
save_im_path = os.path.join(
output_dir, im_prefix + str(num) + ext)
return save_im_path
```
###Assistant :
Get save image name from source image path.
|
2,863 | def rgb_to_hsv(arr):
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {} was found.".format(arr.shape))
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
|
Convert float RGB values (in the range [0, 1]), in a numpy array to HSV
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
(..., 3) ndarray
Colors converted to HSV values in range [0, 1]
| 46 | 175 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def rgb_to_hsv(arr):
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {} was found.".format(arr.shape))
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
```
###Assistant :
Convert float RGB values (in the range [0, 1]), in a numpy array to HSV
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
(..., 3) ndarray
Colors converted to HSV values in range [0, 1]
|
2,864 | def test_scheduler_verify_pool_full(self, dag_maker, configs):
with conf_vars(configs):
with dag_maker(dag_id='test_scheduler_verify_pool_full'):
BashOperator(
task_id='dummy',
pool='test_scheduler_verify_pool_full',
bash_command='echo hi',
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
session.flush()
self.scheduler_job = SchedulerJob(executor=self.null_exec)
self.scheduler_job.processor_agent = mock.MagicMock()
# Create 2 dagruns, which will create 2 task instances.
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
self.scheduler_job._schedule_dag_run(dr, session)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
self.scheduler_job._schedule_dag_run(dr, session)
session.flush()
task_instances_list = self.scheduler_job._executable_task_instances_to_queued(
max_tis=32, session=session
)
assert len(task_instances_list) == 1
|
Test task instances not queued when pool is full
| 9 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_scheduler_verify_pool_full(self, dag_maker, configs):
with conf_vars(configs):
with dag_maker(dag_id='test_scheduler_verify_pool_full'):
BashOperator(
task_id='dummy',
pool='test_scheduler_verify_pool_full',
bash_command='echo hi',
)
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
session.flush()
self.scheduler_job = SchedulerJob(executor=self.null_exec)
self.scheduler_job.processor_agent = mock.MagicMock()
# Create 2 dagruns, which will create 2 task instances.
dr = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
)
self.scheduler_job._schedule_dag_run(dr, session)
dr = dag_maker.create_dagrun_after(dr, run_type=DagRunType.SCHEDULED, state=State.RUNNING)
self.scheduler_job._schedule_dag_run(dr, session)
session.flush()
task_instances_list = self.scheduler_job._executable_task_instances_to_queued(
max_tis=32, session=session
)
assert len(task_instances_list) == 1
```
###Assistant :
Test task instances not queued when pool is full
|
2,865 | def ms_word():
# TODO : Take location input from the user
try:
speak("Enter the document's location - ")
location = input("Enter the document's location - ")
file_loc = doubleslash(location)
doc = docx.Document(file_loc)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
#print(fullText)
doc_file = '\n'.join(fullText)
print(doc_file)
speak(doc_file)
except Exception as exp:
#print(exp)
print(f"ERROR - {exp}")
print(Fore.YELLOW + "I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it.")
return "None"
| [Print and speak out a ms_word docx file as specified in the path]
| 13 | 74 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def ms_word():
# TODO : Take location input from the user
try:
speak("Enter the document's location - ")
location = input("Enter the document's location - ")
file_loc = doubleslash(location)
doc = docx.Document(file_loc)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
#print(fullText)
doc_file = '\n'.join(fullText)
print(doc_file)
speak(doc_file)
except Exception as exp:
#print(exp)
print(f"ERROR - {exp}")
print(Fore.YELLOW + "I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it.")
return "None"
```
###Assistant : [Print and speak out a ms_word docx file as specified in the path]
|
2,866 | def tree_data(G, root, attrs=None, ident="id", children="children"):
if G.number_of_nodes() != G.number_of_edges() + 1:
raise TypeError("G is not a tree.")
if not G.is_directed():
raise TypeError("G is not directed.")
if not nx.is_weakly_connected(G):
raise TypeError("G is not weakly connected.")
# NOTE: to be removed in 3.0
if attrs is not None:
import warnings
msg = (
"\nThe `attrs` keyword argument of tree_data is deprecated\n"
"and will be removed in networkx 3.0.\n"
"It is replaced with explicit `ident` and `children` "
"keyword arguments.\n"
"To make this warning go away and ensure usage is forward\n"
"compatible, replace `attrs` with `ident` and `children,\n"
"for example:\n\n"
" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\n\n"
"should instead be written as\n\n"
" >>> tree_data(G, root, ident='foo', children='bar')\n\n"
"The default values of 'id' and 'children' will not change."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
ident = attrs["id"]
children = attrs["children"]
if ident == children:
raise nx.NetworkXError("The values for `id` and `children` must be different.")
| Returns data in tree format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
G must be an oriented tree
root : node
The root of the tree
attrs : dict
A dictionary that contains two keys 'id' and 'children'. The
corresponding values provide the attribute names for storing
NetworkX-internal graph data. The values should be unique. Default
value: :samp:`dict(id='id', children='children')`.
If some user-defined graph data use these attribute names as data keys,
they may be silently dropped.
.. deprecated:: 2.6
The `attrs` keyword argument is replaced by `ident` and `children`
and will be removed in networkx 3.0
ident : string
Attribute name for storing NetworkX-internal graph data. `ident` must
have a different value than `children`. The default is 'id'.
children : string
Attribute name for storing NetworkX-internal graph data. `children`
must have a different value than `ident`. The default is 'children'.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Raises
------
NetworkXError
If `children` and `ident` attributes are identical.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.DiGraph([(1, 2)])
>>> data = json_graph.tree_data(G, root=1)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Node attributes are stored in this format but keys
for attributes must be strings if you want to serialize with JSON.
Graph and edge attributes are not stored.
See Also
--------
tree_graph, node_link_data, adjacency_data
| 235 | 151 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def tree_data(G, root, attrs=None, ident="id", children="children"):
if G.number_of_nodes() != G.number_of_edges() + 1:
raise TypeError("G is not a tree.")
if not G.is_directed():
raise TypeError("G is not directed.")
if not nx.is_weakly_connected(G):
raise TypeError("G is not weakly connected.")
# NOTE: to be removed in 3.0
if attrs is not None:
import warnings
msg = (
"\nThe `attrs` keyword argument of tree_data is deprecated\n"
"and will be removed in networkx 3.0.\n"
"It is replaced with explicit `ident` and `children` "
"keyword arguments.\n"
"To make this warning go away and ensure usage is forward\n"
"compatible, replace `attrs` with `ident` and `children,\n"
"for example:\n\n"
" >>> tree_data(G, root, attrs={'id': 'foo', 'children': 'bar'})\n\n"
"should instead be written as\n\n"
" >>> tree_data(G, root, ident='foo', children='bar')\n\n"
"The default values of 'id' and 'children' will not change."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
ident = attrs["id"]
children = attrs["children"]
if ident == children:
raise nx.NetworkXError("The values for `id` and `children` must be different.")
```
###Assistant : Returns data in tree format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
G must be an oriented tree
root : node
The root of the tree
attrs : dict
A dictionary that contains two keys 'id' and 'children'. The
corresponding values provide the attribute names for storing
NetworkX-internal graph data. The values should be unique. Default
value: :samp:`dict(id='id', children='children')`.
If some user-defined graph data use these attribute names as data keys,
they may be silently dropped.
.. deprecated:: 2.6
The `attrs` keyword argument is replaced by `ident` and `children`
and will be removed in networkx 3.0
ident : string
Attribute name for storing NetworkX-internal graph data. `ident` must
have a different value than `children`. The default is 'id'.
children : string
Attribute name for storing NetworkX-internal graph data. `children`
must have a different value than `ident`. The default is 'children'.
Returns
-------
data : dict
A dictionary with node-link formatted data.
Raises
------
NetworkXError
If `children` and `ident` attributes are identical.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.DiGraph([(1, 2)])
>>> data = json_graph.tree_data(G, root=1)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Node attributes are stored in this format but keys
for attributes must be strings if you want to serialize with JSON.
Graph and edge attributes are not stored.
See Also
--------
tree_graph, node_link_data, adjacency_data
|
2,867 | def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
ax1.set_ylim([0, 4])
ax1.set_zlim([0, 4])
fig_test.draw_without_rendering()
# mutate y,z to get a nontrivial line
y[:] = [1, 2, 3]
z[:] = [1, 2, 3]
# draw the same plot without mutating x and y
ax2 = fig_ref.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax2.plot(x, y, z, 'o-')
ax2.set_ylim([0, 4])
ax2.set_zlim([0, 4])
fig_test.draw_without_rendering()
|
Test to see if the `z` axis does not get mutated
after a call to `Axes3D.plot`
test cases came from GH#8990
| 21 | 87 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_mutating_input_arrays_y_and_z(fig_test, fig_ref):
ax1 = fig_test.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax1.plot(x, y, z, 'o-')
ax1.set_ylim([0, 4])
ax1.set_zlim([0, 4])
fig_test.draw_without_rendering()
# mutate y,z to get a nontrivial line
y[:] = [1, 2, 3]
z[:] = [1, 2, 3]
# draw the same plot without mutating x and y
ax2 = fig_ref.add_subplot(111, projection='3d')
x = [1, 2, 3]
y = [0.0, 0.0, 0.0]
z = [0.0, 0.0, 0.0]
ax2.plot(x, y, z, 'o-')
ax2.set_ylim([0, 4])
ax2.set_zlim([0, 4])
fig_test.draw_without_rendering()
```
###Assistant :
Test to see if the `z` axis does not get mutated
after a call to `Axes3D.plot`
test cases came from GH#8990
|
2,868 | def test_search_with_hyphen(self):
book = models.Book.objects.create(
title="Harry Potter and the Half-Blood Prince",
publication_date=date(2009, 7, 15),
number_of_pages=607,
)
index = self.backend.get_index_for_model(models.Book)
index.add_item(book)
index.refresh()
results = self.backend.search("Half-Blood", models.Book)
self.assertUnsortedListEqual(
[r.title for r in results],
[
"Harry Potter and the Half-Blood Prince",
],
)
|
This tests that punctuation characters are treated the same
way in both indexing and querying.
See: https://github.com/wagtail/wagtail/issues/937
| 17 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_search_with_hyphen(self):
book = models.Book.objects.create(
title="Harry Potter and the Half-Blood Prince",
publication_date=date(2009, 7, 15),
number_of_pages=607,
)
index = self.backend.get_index_for_model(models.Book)
index.add_item(book)
index.refresh()
results = self.backend.search("Half-Blood", models.Book)
self.assertUnsortedListEqual(
[r.title for r in results],
[
"Harry Potter and the Half-Blood Prince",
],
)
```
###Assistant :
This tests that punctuation characters are treated the same
way in both indexing and querying.
See: https://github.com/wagtail/wagtail/issues/937
|
2,869 | def resolve(self, s):
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
|
Resolve strings to objects using standard import and attribute
syntax.
| 10 | 58 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def resolve(self, s):
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
```
###Assistant :
Resolve strings to objects using standard import and attribute
syntax.
|
2,870 | def heldout_score(clf, X_test, y_test):
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = binomial_deviance(y_test, y_pred.ravel())
return score
| compute deviance scores on ``X_test`` and ``y_test``. | 7 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def heldout_score(clf, X_test, y_test):
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = binomial_deviance(y_test, y_pred.ravel())
return score
```
###Assistant : compute deviance scores on ``X_test`` and ``y_test``.
|
2,871 | def test_product_types_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["productTypes"] is None
QUERY_FILTER_PRODUCT_TYPES =
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The worst beers"]),
("average", []),
),
) |
query($filters: ProductTypeFilterInput) {
productTypes(first: 10, filter: $filters) {
edges {
node {
name
}
}
}
}
| 17 | 72 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_product_types_query_ids_not_exists(user_api_client, category):
query = NOT_EXISTS_IDS_COLLECTIONS_QUERY
variables = {"filter": {"ids": ["fTEJRuFHU6fd2RU=", "2XwnQNNhwCdEjhP="]}}
response = user_api_client.post_graphql(query, variables)
content = get_graphql_content(response, ignore_errors=True)
message_error = '{"ids": [{"message": "Invalid ID specified.", "code": ""}]}'
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == message_error
assert content["data"]["productTypes"] is None
QUERY_FILTER_PRODUCT_TYPES =
@pytest.mark.parametrize(
"search, expected_names",
(
("", ["The best juices", "The best beers", "The worst beers"]),
("best", ["The best juices", "The best beers"]),
("worst", ["The worst beers"]),
("average", []),
),
)
```
###Assistant :
query($filters: ProductTypeFilterInput) {
productTypes(first: 10, filter: $filters) {
edges {
node {
name
}
}
}
}
|
2,872 | def _generate_examples(self, filepath):
for idx, line in enumerate(open(filepath, "r", encoding="utf-8")):
if idx == 0:
continue # skip header
line = line.strip()
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0],
"binary_parse_premise": split_line[1],
"binary_parse_hypothesis": split_line[2],
"parse_premise": split_line[3],
"parse_hypothesis": split_line[4],
"heuristic": split_line[8],
"subcase": split_line[9],
"template": split_line[10],
}
| Generate hans examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
| 15 | 90 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _generate_examples(self, filepath):
for idx, line in enumerate(open(filepath, "r", encoding="utf-8")):
if idx == 0:
continue # skip header
line = line.strip()
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0],
"binary_parse_premise": split_line[1],
"binary_parse_hypothesis": split_line[2],
"parse_premise": split_line[3],
"parse_hypothesis": split_line[4],
"heuristic": split_line[8],
"subcase": split_line[9],
"template": split_line[10],
}
```
###Assistant : Generate hans examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
|
2,873 | def _light_internal_color_mode(self) -> str:
if (color_mode := self.color_mode) is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if ColorMode.HS in supported and self.hs_color is not None:
return ColorMode.HS
if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:
return ColorMode.COLOR_TEMP
if ColorMode.BRIGHTNESS in supported and self.brightness is not None:
return ColorMode.BRIGHTNESS
if ColorMode.ONOFF in supported:
return ColorMode.ONOFF
return ColorMode.UNKNOWN
return color_mode
| Return the color mode of the light with backwards compatibility. | 10 | 72 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _light_internal_color_mode(self) -> str:
if (color_mode := self.color_mode) is None:
# Backwards compatibility for color_mode added in 2021.4
# Add warning in 2021.6, remove in 2021.10
supported = self._light_internal_supported_color_modes
if ColorMode.HS in supported and self.hs_color is not None:
return ColorMode.HS
if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:
return ColorMode.COLOR_TEMP
if ColorMode.BRIGHTNESS in supported and self.brightness is not None:
return ColorMode.BRIGHTNESS
if ColorMode.ONOFF in supported:
return ColorMode.ONOFF
return ColorMode.UNKNOWN
return color_mode
```
###Assistant : Return the color mode of the light with backwards compatibility.
|
2,874 | def kde(self, bw_method=None, ind=None, **kwargs):
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
|
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
| 399 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def kde(self, bw_method=None, ind=None, **kwargs):
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
```
###Assistant :
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
|
2,875 | def subscription_invoice_requested_webhook(subscription_webhook):
return subscription_webhook(
INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED
)
INVOICE_DELETED_SUBSCRIPTION_QUERY =
@pytest.fixture |
subscription{
event{
...on InvoiceDeleted{
invoice{
id
}
}
}
}
| 10 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def subscription_invoice_requested_webhook(subscription_webhook):
return subscription_webhook(
INVOICE_REQUESTED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.INVOICE_REQUESTED
)
INVOICE_DELETED_SUBSCRIPTION_QUERY =
@pytest.fixture
```
###Assistant :
subscription{
event{
...on InvoiceDeleted{
invoice{
id
}
}
}
}
|
2,876 | def test_prompt_from_examples_valid() -> None:
template =
input_variables = ["question"]
example_separator = "\n\n"
prefix =
suffix =
examples = [
,
,
]
prompt_from_examples = PromptTemplate.from_examples(
examples,
suffix,
input_variables,
example_separator=example_separator,
prefix=prefix,
)
prompt_from_template = PromptTemplate(
input_variables=input_variables, template=template
)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
| Test prompt can be successfully constructed from examples.Test Prompt:
Question: who are you?
Answer: foo
Question: what are you?
Answer: bar
Question: {question}
Answer:Test Prompt:Question: {question}\nAnswer:Question: who are you?\nAnswer: fooQuestion: what are you?\nAnswer: bar | 34 | 45 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_prompt_from_examples_valid() -> None:
template =
input_variables = ["question"]
example_separator = "\n\n"
prefix =
suffix =
examples = [
,
,
]
prompt_from_examples = PromptTemplate.from_examples(
examples,
suffix,
input_variables,
example_separator=example_separator,
prefix=prefix,
)
prompt_from_template = PromptTemplate(
input_variables=input_variables, template=template
)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
```
###Assistant : Test prompt can be successfully constructed from examples.Test Prompt:
Question: who are you?
Answer: foo
Question: what are you?
Answer: bar
Question: {question}
Answer:Test Prompt:Question: {question}\nAnswer:Question: who are you?\nAnswer: fooQuestion: what are you?\nAnswer: bar
|
2,877 | def compatible_platforms(provided, required):
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
| Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
| 33 | 163 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def compatible_platforms(provided, required):
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
```
###Assistant : Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
|
2,878 | def find_negative_cycle(G, source, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
v = _inner_bellman_ford(G, [source], weight, pred=pred)
if v is None:
raise nx.NetworkXError("No negative cycles detected.")
# negative cycle detected... find it
neg_cycle = []
stack = [(v, list(pred[v]))]
seen = {v}
while stack:
node, preds = stack[-1]
if v in preds:
# found the cycle
neg_cycle.extend([node, v])
neg_cycle = list(reversed(neg_cycle))
return neg_cycle
if preds:
nbr = preds.pop()
if nbr not in seen:
stack.append((nbr, list(pred[nbr])))
neg_cycle.append(node)
seen.add(nbr)
else:
stack.pop()
if neg_cycle:
neg_cycle.pop()
else:
if v in G[v] and weight(G, v, v) < 0:
return [v, v]
# should not reach here
raise nx.NetworkXError("Negative cycle is detected but not found")
# should not get here...
msg = "negative cycle detected but not identified"
raise nx.NetworkXUnbounded(msg)
| Returns a cycle with negative total weight if it exists.
Bellman-Ford is used to find shortest_paths. That algorithm
stops if there exists a negative cycle. This algorithm
picks up from there and returns the found negative cycle.
The cycle consists of a list of nodes in the cycle order. The last
node equals the first to make it a cycle.
You can look up the edge weights in the original graph. In the case
of multigraphs the relevant edge is the minimal weight edge between
the nodes in the 2-tuple.
If the graph has no negative cycle, a NetworkXError is raised.
Parameters
----------
G : NetworkX graph
source: node label
The search for the negative cycle will start from this node.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)])
>>> nx.find_negative_cycle(G, 0)
[4, 0, 1, 4]
Returns
-------
cycle : list
A list of nodes in the order of the cycle found. The last node
equals the first to indicate a cycle.
Raises
------
NetworkXError
If no negative cycle is found.
| 285 | 125 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def find_negative_cycle(G, source, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
v = _inner_bellman_ford(G, [source], weight, pred=pred)
if v is None:
raise nx.NetworkXError("No negative cycles detected.")
# negative cycle detected... find it
neg_cycle = []
stack = [(v, list(pred[v]))]
seen = {v}
while stack:
node, preds = stack[-1]
if v in preds:
# found the cycle
neg_cycle.extend([node, v])
neg_cycle = list(reversed(neg_cycle))
return neg_cycle
if preds:
nbr = preds.pop()
if nbr not in seen:
stack.append((nbr, list(pred[nbr])))
neg_cycle.append(node)
seen.add(nbr)
else:
stack.pop()
if neg_cycle:
neg_cycle.pop()
else:
if v in G[v] and weight(G, v, v) < 0:
return [v, v]
# should not reach here
raise nx.NetworkXError("Negative cycle is detected but not found")
# should not get here...
msg = "negative cycle detected but not identified"
raise nx.NetworkXUnbounded(msg)
```
###Assistant : Returns a cycle with negative total weight if it exists.
Bellman-Ford is used to find shortest_paths. That algorithm
stops if there exists a negative cycle. This algorithm
picks up from there and returns the found negative cycle.
The cycle consists of a list of nodes in the cycle order. The last
node equals the first to make it a cycle.
You can look up the edge weights in the original graph. In the case
of multigraphs the relevant edge is the minimal weight edge between
the nodes in the 2-tuple.
If the graph has no negative cycle, a NetworkXError is raised.
Parameters
----------
G : NetworkX graph
source: node label
The search for the negative cycle will start from this node.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)])
>>> nx.find_negative_cycle(G, 0)
[4, 0, 1, 4]
Returns
-------
cycle : list
A list of nodes in the order of the cycle found. The last node
equals the first to indicate a cycle.
Raises
------
NetworkXError
If no negative cycle is found.
|
2,879 | def expand(image, border=0, fill=0):
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, image.mode)
if image.mode == "P" and image.palette:
palette = ImagePalette.ImagePalette(palette=image.getpalette())
if isinstance(color, tuple):
color = palette.getcolor(color)
else:
palette = None
out = Image.new(image.mode, (width, height), color)
if palette:
out.putpalette(palette.palette)
out.paste(image, (left, top))
return out
|
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
| 32 | 61 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def expand(image, border=0, fill=0):
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
color = _color(fill, image.mode)
if image.mode == "P" and image.palette:
palette = ImagePalette.ImagePalette(palette=image.getpalette())
if isinstance(color, tuple):
color = palette.getcolor(color)
else:
palette = None
out = Image.new(image.mode, (width, height), color)
if palette:
out.putpalette(palette.palette)
out.paste(image, (left, top))
return out
```
###Assistant :
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
|
2,880 | def test_digest_enabled(self, digests, mock_func):
backend = RedisBackend()
digests.digest = backend.digest
digests.enabled.return_value = True
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
key = f"mail:p:{self.project.id}"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
attachment, text = get_attachment()
assert attachment["title"] == "Hello world"
assert attachment["text"] == ""
|
Test that with digests enabled, but Slack notification settings
(and not email settings), we send a Slack notification
| 18 | 54 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_digest_enabled(self, digests, mock_func):
backend = RedisBackend()
digests.digest = backend.digest
digests.enabled.return_value = True
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
key = f"mail:p:{self.project.id}"
backend.add(key, event_to_record(event, [rule]), increment_delay=0, maximum_delay=0)
with self.tasks():
deliver_digest(key)
attachment, text = get_attachment()
assert attachment["title"] == "Hello world"
assert attachment["text"] == ""
```
###Assistant :
Test that with digests enabled, but Slack notification settings
(and not email settings), we send a Slack notification
|
2,881 | def pole_zero_numerical_data(system):
_check_system(system)
system = system.doit() # Get the equivalent TransferFunction object.
num_poly = Poly(system.num, system.var).all_coeffs()
den_poly = Poly(system.den, system.var).all_coeffs()
num_poly = np.array(num_poly, dtype=np.complex128)
den_poly = np.array(den_poly, dtype=np.complex128)
zeros = np.roots(num_poly)
poles = np.roots(den_poly)
return zeros, poles
|
Returns the numerical data of poles and zeros of the system.
It is internally used by ``pole_zero_plot`` to get the data
for plotting poles and zeros. Users can use this data to further
analyse the dynamics of the system or plot using a different
backend/plotting-module.
Parameters
==========
system : SISOLinearTimeInvariant
The system for which the pole-zero data is to be computed.
Returns
=======
tuple : (zeros, poles)
zeros = Zeros of the system. NumPy array of complex numbers.
poles = Poles of the system. NumPy array of complex numbers.
Raises
======
NotImplementedError
When a SISO LTI system is not passed.
When time delay terms are present in the system.
ValueError
When more than one free symbol is present in the system.
The only variable in the transfer function should be
the variable of the Laplace transform.
Examples
========
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import pole_zero_numerical_data
>>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)
>>> pole_zero_numerical_data(tf1) # doctest: +SKIP
([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ])
See Also
========
pole_zero_plot
| 187 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def pole_zero_numerical_data(system):
_check_system(system)
system = system.doit() # Get the equivalent TransferFunction object.
num_poly = Poly(system.num, system.var).all_coeffs()
den_poly = Poly(system.den, system.var).all_coeffs()
num_poly = np.array(num_poly, dtype=np.complex128)
den_poly = np.array(den_poly, dtype=np.complex128)
zeros = np.roots(num_poly)
poles = np.roots(den_poly)
return zeros, poles
```
###Assistant :
Returns the numerical data of poles and zeros of the system.
It is internally used by ``pole_zero_plot`` to get the data
for plotting poles and zeros. Users can use this data to further
analyse the dynamics of the system or plot using a different
backend/plotting-module.
Parameters
==========
system : SISOLinearTimeInvariant
The system for which the pole-zero data is to be computed.
Returns
=======
tuple : (zeros, poles)
zeros = Zeros of the system. NumPy array of complex numbers.
poles = Poles of the system. NumPy array of complex numbers.
Raises
======
NotImplementedError
When a SISO LTI system is not passed.
When time delay terms are present in the system.
ValueError
When more than one free symbol is present in the system.
The only variable in the transfer function should be
the variable of the Laplace transform.
Examples
========
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import pole_zero_numerical_data
>>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)
>>> pole_zero_numerical_data(tf1) # doctest: +SKIP
([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ])
See Also
========
pole_zero_plot
|
2,882 | def get_versions():
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
| Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
| 19 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_versions():
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
```
###Assistant : Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
|
2,883 | def __virtual__():
if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
err_msg = "NAPALM is not installed."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
|
This beacon can only work when running under a regular or a proxy minion, managed through napalm.
| 17 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __virtual__():
if salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
err_msg = "NAPALM is not installed."
log.error("Unable to load %s beacon: %s", __virtualname__, err_msg)
return False, err_msg
```
###Assistant :
This beacon can only work when running under a regular or a proxy minion, managed through napalm.
|
2,884 | def parse_prompt_attention(text):
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
|
Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight.
Accepted tokens are:
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\( - literal character '('
\[ - literal character '['
\) - literal character ')'
\] - literal character ']'
\\ - literal character '\'
anything else - just text
>>> parse_prompt_attention('normal text')
[['normal text', 1.0]]
>>> parse_prompt_attention('an (important) word')
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\(literal\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
[['a ', 1.0],
['house', 1.5730000000000004],
[' ', 1.1],
['on', 1.0],
[' a ', 1.1],
['hill', 0.55],
[', sun, ', 1.1],
['sky', 1.4641000000000006],
['.', 1.1]]
| 145 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def parse_prompt_attention(text):
res = []
round_brackets = []
square_brackets = []
round_bracket_multiplier = 1.1
square_bracket_multiplier = 1 / 1.1
```
###Assistant :
Parses a string with attention tokens and returns a list of pairs: text and its assoicated weight.
Accepted tokens are:
(abc) - increases attention to abc by a multiplier of 1.1
(abc:3.12) - increases attention to abc by a multiplier of 3.12
[abc] - decreases attention to abc by a multiplier of 1.1
\( - literal character '('
\[ - literal character '['
\) - literal character ')'
\] - literal character ']'
\\ - literal character '\'
anything else - just text
>>> parse_prompt_attention('normal text')
[['normal text', 1.0]]
>>> parse_prompt_attention('an (important) word')
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
>>> parse_prompt_attention('(unbalanced')
[['unbalanced', 1.1]]
>>> parse_prompt_attention('\(literal\]')
[['(literal]', 1.0]]
>>> parse_prompt_attention('(unnecessary)(parens)')
[['unnecessaryparens', 1.1]]
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
[['a ', 1.0],
['house', 1.5730000000000004],
[' ', 1.1],
['on', 1.0],
[' a ', 1.1],
['hill', 0.55],
[', sun, ', 1.1],
['sky', 1.4641000000000006],
['.', 1.1]]
|
2,885 | def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it polulates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
| Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
| 18 | 178 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_dependencies(self, candidate):
# type: (Candidate) -> list[Candidate]
r
# FIXME: If there's several galaxy servers set, there may be a
# FIXME: situation when the metadata of the same collection
# FIXME: differs. So how do we resolve this case? Priority?
# FIXME: Taking into account a pinned hash? Exploding on
# FIXME: any differences?
# NOTE: The underlying implmentation currently uses first found
req_map = self._api_proxy.get_collection_dependencies(candidate)
# NOTE: This guard expression MUST perform an early exit only
# NOTE: after the `get_collection_dependencies()` call because
# NOTE: internally it polulates the artifact URL of the candidate,
# NOTE: its SHA hash and the Galaxy API token. These are still
# NOTE: necessary with `--no-deps` because even with the disabled
# NOTE: dependency resolution the outer layer will still need to
# NOTE: know how to download and validate the artifact.
#
# NOTE: Virtual candidates should always return dependencies
# NOTE: because they are ephemeral and non-installable.
if not self._with_deps and not candidate.is_virtual:
return []
return [
self._make_req_from_dict({'name': dep_name, 'version': dep_req})
for dep_name, dep_req in req_map.items()
]
```
###Assistant : Get direct dependencies of a candidate.
:returns: A collection of requirements that `candidate` \
specifies as its dependencies.
|
2,886 | def test_set_displayname(self) -> None:
# Modify user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"displayname": "foobar"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
|
Test setting the displayname of another user.
| 7 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_set_displayname(self) -> None:
# Modify user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"displayname": "foobar"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
```
###Assistant :
Test setting the displayname of another user.
|
2,887 | def on_ssl_errors(self, reply, qt_errors):
errors = certificateerror.CertificateErrorWrapper(reply, qt_errors)
log.network.debug("Certificate errors: {!r}".format(errors))
try:
host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple(
reply.url())
except ValueError:
host_tpl = None
is_accepted = False
is_rejected = False
else:
assert host_tpl is not None
is_accepted = errors in self._accepted_ssl_errors[host_tpl]
is_rejected = errors in self._rejected_ssl_errors[host_tpl]
log.network.debug("Already accepted: {} / "
"rejected {}".format(is_accepted, is_rejected))
if is_rejected:
return
elif is_accepted:
reply.ignoreSslErrors()
return
abort_on = self._get_abort_signals(reply)
tab = self._get_tab()
first_party_url = QUrl() if tab is None else tab.data.last_navigation.url
shared.handle_certificate_error(
request_url=reply.url(),
first_party_url=first_party_url,
error=errors,
abort_on=abort_on,
)
if errors.certificate_was_accepted():
if host_tpl is not None:
self._accepted_ssl_errors[host_tpl].add(errors)
elif host_tpl is not None:
self._rejected_ssl_errors[host_tpl].add(errors)
| Decide if SSL errors should be ignored or not.
This slot is called on SSL/TLS errors by the self.sslErrors signal.
Args:
reply: The QNetworkReply that is encountering the errors.
qt_errors: A list of errors.
| 34 | 94 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def on_ssl_errors(self, reply, qt_errors):
errors = certificateerror.CertificateErrorWrapper(reply, qt_errors)
log.network.debug("Certificate errors: {!r}".format(errors))
try:
host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple(
reply.url())
except ValueError:
host_tpl = None
is_accepted = False
is_rejected = False
else:
assert host_tpl is not None
is_accepted = errors in self._accepted_ssl_errors[host_tpl]
is_rejected = errors in self._rejected_ssl_errors[host_tpl]
log.network.debug("Already accepted: {} / "
"rejected {}".format(is_accepted, is_rejected))
if is_rejected:
return
elif is_accepted:
reply.ignoreSslErrors()
return
abort_on = self._get_abort_signals(reply)
tab = self._get_tab()
first_party_url = QUrl() if tab is None else tab.data.last_navigation.url
shared.handle_certificate_error(
request_url=reply.url(),
first_party_url=first_party_url,
error=errors,
abort_on=abort_on,
)
if errors.certificate_was_accepted():
if host_tpl is not None:
self._accepted_ssl_errors[host_tpl].add(errors)
elif host_tpl is not None:
self._rejected_ssl_errors[host_tpl].add(errors)
```
###Assistant : Decide if SSL errors should be ignored or not.
This slot is called on SSL/TLS errors by the self.sslErrors signal.
Args:
reply: The QNetworkReply that is encountering the errors.
qt_errors: A list of errors.
|
2,888 | def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += f"{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
return rendered
| Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
| 37 | 36 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += f"{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}"
if pieces["dirty"]:
rendered += ".dirty"
return rendered
```
###Assistant : Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
2,889 | async def test_aset_many(self):
self.assertEqual(await cache.aset_many({"a": 1, "b": 2}), [])
self.assertEqual(
await cache.aset_many({"a": 1, "b": 2}, timeout=2, version="1"),
[],
)
| aset_many() does nothing for the dummy cache backend. | 8 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_aset_many(self):
self.assertEqual(await cache.aset_many({"a": 1, "b": 2}), [])
self.assertEqual(
await cache.aset_many({"a": 1, "b": 2}, timeout=2, version="1"),
[],
)
```
###Assistant : aset_many() does nothing for the dummy cache backend.
|
2,890 | def _user_input(self, input_str):
os.environ["COMP_WORDS"] = input_str
idx = len(input_str.split(" ")) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(" ") else idx
os.environ["COMP_CWORD"] = str(comp_cword)
sys.argv = input_str.split()
|
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
| 80 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _user_input(self, input_str):
os.environ["COMP_WORDS"] = input_str
idx = len(input_str.split(" ")) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(" ") else idx
os.environ["COMP_CWORD"] = str(comp_cword)
sys.argv = input_str.split()
```
###Assistant :
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
|
2,891 | def _check_conflicts() -> None:
# Node-related conflicts
# When using the Node server, we must always connect to 8501 (this is
# hard-coded in JS). Otherwise, the browser would decide what port to
# connect to based on window.location.port, which in dev is going to
# be (3000)
# Import logger locally to prevent circular references
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
if get_option("global.developmentMode"):
assert _is_unset(
"server.port"
), "server.port does not work when global.developmentMode is true."
assert _is_unset("browser.serverPort"), (
"browser.serverPort does not work when global.developmentMode is " "true."
)
# XSRF conflicts
if get_option("server.enableXsrfProtection"):
if not get_option("server.enableCORS") or get_option("global.developmentMode"):
LOGGER.warning(
)
|
Warning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.
As a result, 'server.enableCORS' is being overridden to 'true'.
More information:
In order to protect against CSRF attacks, we send a cookie with each request.
To do so, we must specify allowable origins, which places a restriction on
cross-origin resource sharing.
If cross origin resource sharing is required, please disable server.enableXsrfProtection.
| 61 | 102 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _check_conflicts() -> None:
# Node-related conflicts
# When using the Node server, we must always connect to 8501 (this is
# hard-coded in JS). Otherwise, the browser would decide what port to
# connect to based on window.location.port, which in dev is going to
# be (3000)
# Import logger locally to prevent circular references
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
if get_option("global.developmentMode"):
assert _is_unset(
"server.port"
), "server.port does not work when global.developmentMode is true."
assert _is_unset("browser.serverPort"), (
"browser.serverPort does not work when global.developmentMode is " "true."
)
# XSRF conflicts
if get_option("server.enableXsrfProtection"):
if not get_option("server.enableCORS") or get_option("global.developmentMode"):
LOGGER.warning(
)
```
###Assistant :
Warning: the config option 'server.enableCORS=false' is not compatible with 'server.enableXsrfProtection=true'.
As a result, 'server.enableCORS' is being overridden to 'true'.
More information:
In order to protect against CSRF attacks, we send a cookie with each request.
To do so, we must specify allowable origins, which places a restriction on
cross-origin resource sharing.
If cross origin resource sharing is required, please disable server.enableXsrfProtection.
|
2,892 | def num_columns(self) -> Dict[str, int]:
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
| Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
| 29 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def num_columns(self) -> Dict[str, int]:
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
```
###Assistant : Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
|
2,893 | def _sample_with_replacement_map_partitions(population, k):
stream = iter(population)
e = next(stream)
reservoir, stream_length = [e for _ in range(k)], 1
w = [rnd.random() for _ in range(k)]
nxt = [_geometric(wi) for wi in w]
min_nxt = min(nxt)
for i, e in enumerate(stream, 1):
if i == min_nxt:
for j, n in enumerate(nxt):
if n == min_nxt:
reservoir[j] = e
w[j] *= rnd.random()
nxt[j] += _geometric(w[j])
min_nxt = min(nxt)
stream_length += 1
return reservoir, stream_length
|
Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1
See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf
| 20 | 72 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _sample_with_replacement_map_partitions(population, k):
stream = iter(population)
e = next(stream)
reservoir, stream_length = [e for _ in range(k)], 1
w = [rnd.random() for _ in range(k)]
nxt = [_geometric(wi) for wi in w]
min_nxt = min(nxt)
for i, e in enumerate(stream, 1):
if i == min_nxt:
for j, n in enumerate(nxt):
if n == min_nxt:
reservoir[j] = e
w[j] *= rnd.random()
nxt[j] += _geometric(w[j])
min_nxt = min(nxt)
stream_length += 1
return reservoir, stream_length
```
###Assistant :
Reservoir sampling with replacement, the main idea is to use k reservoirs of size 1
See Section Applications in http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf
|
2,894 | def __setattr__(cls, name, value):
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('cannot reassign member %r' % (name, ))
super().__setattr__(name, value)
|
Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
| 35 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __setattr__(cls, name, value):
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('cannot reassign member %r' % (name, ))
super().__setattr__(name, value)
```
###Assistant :
Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
|
2,895 | def read_file(self, f, source=None):
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
| Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
| 53 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def read_file(self, f, source=None):
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
```
###Assistant : Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
|
2,896 | def get_newsletters() -> pd.DataFrame:
urls = [
"https://defiweekly.substack.com/archive",
"https://newsletter.thedefiant.io/archive",
"https://thedailygwei.substack.com/archive",
"https://todayindefi.substack.com/archive",
"https://newsletter.banklesshq.com/archive",
"https://defislate.substack.com/archive",
]
threads = len(urls)
newsletters = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
for newsletter in executor.map(scrape_substack, urls):
try:
newsletters.append(pd.DataFrame(newsletter))
except KeyError as e:
console.print(e, "\n")
continue
df = pd.concat(newsletters, ignore_index=True)
df.columns = ["Title", "Link", "Date"]
df["Title"] = df["Title"].apply(lambda x: "".join(i for i in x if ord(i) < 128))
df["Date"] = df["Date"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d %H:%M:%S")
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
return (
df[["Title", "Date", "Link"]]
.sort_values(by="Date", ascending=False)
.reset_index(drop="index")
)
| Scrape all substack newsletters from url list.
[Source: substack.com]
Returns
-------
pd.DataFrame
DataFrame with recent news from most popular DeFi related newsletters.
| 22 | 90 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_newsletters() -> pd.DataFrame:
urls = [
"https://defiweekly.substack.com/archive",
"https://newsletter.thedefiant.io/archive",
"https://thedailygwei.substack.com/archive",
"https://todayindefi.substack.com/archive",
"https://newsletter.banklesshq.com/archive",
"https://defislate.substack.com/archive",
]
threads = len(urls)
newsletters = []
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
for newsletter in executor.map(scrape_substack, urls):
try:
newsletters.append(pd.DataFrame(newsletter))
except KeyError as e:
console.print(e, "\n")
continue
df = pd.concat(newsletters, ignore_index=True)
df.columns = ["Title", "Link", "Date"]
df["Title"] = df["Title"].apply(lambda x: "".join(i for i in x if ord(i) < 128))
df["Date"] = df["Date"].apply(
lambda x: parser.parse(x).strftime("%Y-%m-%d %H:%M:%S")
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
return (
df[["Title", "Date", "Link"]]
.sort_values(by="Date", ascending=False)
.reset_index(drop="index")
)
```
###Assistant : Scrape all substack newsletters from url list.
[Source: substack.com]
Returns
-------
pd.DataFrame
DataFrame with recent news from most popular DeFi related newsletters.
|
2,897 | def _object2proto(self) -> GetEnumAttributeAction_PB:
return GetEnumAttributeAction_PB(
path=self.path,
id_at_location=serialize(self.id_at_location),
address=serialize(self.address),
msg_id=serialize(self.id),
)
| Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: GetOrSetPropertyAction_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
| 68 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _object2proto(self) -> GetEnumAttributeAction_PB:
return GetEnumAttributeAction_PB(
path=self.path,
id_at_location=serialize(self.id_at_location),
address=serialize(self.address),
msg_id=serialize(self.id),
)
```
###Assistant : Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: GetOrSetPropertyAction_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
|
2,898 | async def test_becomes_delivered_not_shown_notification(hass):
package = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package_delivered = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
status=40,
)
ProfileMock.package_list = [package_delivered]
with patch(
"homeassistant.components.seventeentrack.sensor.persistent_notification"
) as persistent_notification_mock:
await _goto_future(hass)
persistent_notification_mock.create.assert_called()
assert not hass.states.async_entity_ids()
| Ensure notification is triggered when package becomes delivered. | 8 | 70 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_becomes_delivered_not_shown_notification(hass):
package = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
)
ProfileMock.package_list = [package]
await _setup_seventeentrack(hass, VALID_CONFIG_FULL_NO_DELIVERED)
assert hass.states.get("sensor.seventeentrack_package_456") is not None
assert len(hass.states.async_entity_ids()) == 1
package_delivered = Package(
tracking_number="456",
destination_country=206,
friendly_name="friendly name 1",
info_text="info text 1",
location="location 1",
timestamp="2020-08-10 10:32",
origin_country=206,
package_type=2,
status=40,
)
ProfileMock.package_list = [package_delivered]
with patch(
"homeassistant.components.seventeentrack.sensor.persistent_notification"
) as persistent_notification_mock:
await _goto_future(hass)
persistent_notification_mock.create.assert_called()
assert not hass.states.async_entity_ids()
```
###Assistant : Ensure notification is triggered when package becomes delivered.
|
2,899 | def fit(self, X, y=None, sample_weight=None):
X = self._validate_data(X, accept_sparse=["csc"])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(256, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0.0 < self.max_samples <= 1.0:
raise ValueError(
"max_samples must be in (0, 1], got %r" % self.max_samples
)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)
return self
|
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
| 66 | 230 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fit(self, X, y=None, sample_weight=None):
X = self._validate_data(X, accept_sparse=["csc"])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(256, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0.0 < self.max_samples <= 1.0:
raise ValueError(
"max_samples must be in (0, 1], got %r" % self.max_samples
)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)
return self
```
###Assistant :
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.