Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
1,700 | def progress(self, msg):
if self.paras.verbose:
sys.stdout.write("\033[K") # Clear line
print('[{}] {}'.format(human_format(self.step), msg), end='\r')
| Verbose function for updating progress on stdout (do not include newline) | 11 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def progress(self, msg):
if self.paras.verbose:
sys.stdout.write("\033[K") # Clear line
print('[{}] {}'.format(human_format(self.step), msg), end='\r')
```
###Assistant : Verbose function for updating progress on stdout (do not include newline)
|
1,701 | def test_customize_compiler_before_get_config_vars(self):
# Issue #21923: test that a Distribution compiler
# instance can be called without an explicit call to
# get_config_vars().
with open(TESTFN, 'w') as f:
f.writelines(textwrap.dedent())
p = subprocess.Popen([str(sys.executable), TESTFN],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
outs, errs = p.communicate()
self.assertEqual(0, p.returncode, "Subprocess failed: " + outs)
| \
from distutils.core import Distribution
config = Distribution().get_command_obj('config')
# try_compile may pass or it may fail if no compiler
# is found but it should not raise an exception.
rc = config.try_compile('int x;')
| 33 | 46 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_customize_compiler_before_get_config_vars(self):
# Issue #21923: test that a Distribution compiler
# instance can be called without an explicit call to
# get_config_vars().
with open(TESTFN, 'w') as f:
f.writelines(textwrap.dedent())
p = subprocess.Popen([str(sys.executable), TESTFN],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
outs, errs = p.communicate()
self.assertEqual(0, p.returncode, "Subprocess failed: " + outs)
```
###Assistant : \
from distutils.core import Distribution
config = Distribution().get_command_obj('config')
# try_compile may pass or it may fail if no compiler
# is found but it should not raise an exception.
rc = config.try_compile('int x;')
|
1,702 | def siren_platform_only():
with patch(
"homeassistant.components.zha.PLATFORMS",
(
Platform.DEVICE_TRACKER,
Platform.NUMBER,
Platform.SENSOR,
Platform.SELECT,
Platform.SIREN,
),
):
yield
@pytest.fixture | Only setup the siren and required base platforms to speed up tests. | 12 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def siren_platform_only():
with patch(
"homeassistant.components.zha.PLATFORMS",
(
Platform.DEVICE_TRACKER,
Platform.NUMBER,
Platform.SENSOR,
Platform.SELECT,
Platform.SIREN,
),
):
yield
@pytest.fixture
```
###Assistant : Only setup the siren and required base platforms to speed up tests.
|
1,703 | def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
self._rlsn._auth.check_auth_blocking = Mock(
return_value=make_awaitable(None),
side_effect=ResourceLimitError(
403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
),
)
self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock(
return_value=make_awaitable((True, []))
)
mock_event = Mock(
type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
)
self._rlsn._store.get_events = Mock(
return_value=make_awaitable({"123": mock_event})
)
self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
self._send_notice.assert_called_once()
|
When the room is already in a blocked state, test that when alerting
is suppressed that the room is returned to an unblocked state.
| 24 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self):
self._rlsn._auth.check_auth_blocking = Mock(
return_value=make_awaitable(None),
side_effect=ResourceLimitError(
403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER
),
)
self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock(
return_value=make_awaitable((True, []))
)
mock_event = Mock(
type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType}
)
self._rlsn._store.get_events = Mock(
return_value=make_awaitable({"123": mock_event})
)
self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
self._send_notice.assert_called_once()
```
###Assistant :
When the room is already in a blocked state, test that when alerting
is suppressed that the room is returned to an unblocked state.
|
1,704 | def test_send(event_listener, salt_master, salt_minion, salt_call_cli):
event_tag = random_string("salt/test/event/")
data = {"event.fire": "just test it!!!!"}
start_time = time.time()
ret = salt_call_cli.run(
"event.send",
event_tag,
data=data,
with_grains=True,
with_pillar=True,
preload={"foo": "bar"},
)
assert ret.exitcode == 0
assert ret.json
assert ret.json is True
event_pattern = (salt_master.id, event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=30
)
assert matched_events.found_all_events
for event in matched_events:
assert event.data["id"] == salt_minion.id
assert event.data["cmd"] == "_minion_event"
assert "event.fire" in event.data["data"]
assert event.data["foo"] == "bar"
assert event.data["data"]["grains"]["test_grain"] == "cheese"
assert event.data["data"]["pillar"]["ext_spam"] == "eggs"
|
Test sending an event to the master event bus
| 9 | 79 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_send(event_listener, salt_master, salt_minion, salt_call_cli):
event_tag = random_string("salt/test/event/")
data = {"event.fire": "just test it!!!!"}
start_time = time.time()
ret = salt_call_cli.run(
"event.send",
event_tag,
data=data,
with_grains=True,
with_pillar=True,
preload={"foo": "bar"},
)
assert ret.exitcode == 0
assert ret.json
assert ret.json is True
event_pattern = (salt_master.id, event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=30
)
assert matched_events.found_all_events
for event in matched_events:
assert event.data["id"] == salt_minion.id
assert event.data["cmd"] == "_minion_event"
assert "event.fire" in event.data["data"]
assert event.data["foo"] == "bar"
assert event.data["data"]["grains"]["test_grain"] == "cheese"
assert event.data["data"]["pillar"]["ext_spam"] == "eggs"
```
###Assistant :
Test sending an event to the master event bus
|
1,705 | def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
if tree1 != tree2:
raise TypeError(
f"{what} must have same type structure, got {tree1} and {tree2}.")
if not all(_map(core.typematch, avals1, avals2)):
diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),
tree_unflatten(tree2, avals2))
raise TypeError(f"{what} must have identical types, got\n{diff}.")
| Raises TypeError if (tree1, avals1) does not match (tree2, avals2).
Corresponding `tree` and `avals` must match in the sense that the number of
leaves in `tree` must be equal to the length of `avals`. `what` will be
prepended to details of the mismatch in TypeError.
| 45 | 41 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
if tree1 != tree2:
raise TypeError(
f"{what} must have same type structure, got {tree1} and {tree2}.")
if not all(_map(core.typematch, avals1, avals2)):
diff = tree_map(_show_diff, tree_unflatten(tree1, avals1),
tree_unflatten(tree2, avals2))
raise TypeError(f"{what} must have identical types, got\n{diff}.")
```
###Assistant : Raises TypeError if (tree1, avals1) does not match (tree2, avals2).
Corresponding `tree` and `avals` must match in the sense that the number of
leaves in `tree` must be equal to the length of `avals`. `what` will be
prepended to details of the mismatch in TypeError.
|
1,706 | def test_load_global_local_flag_config(self):
global_config =
local_config =
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
local_open = mock_open(read_data=local_config)
open = mock_open()
open.side_effect = [global_open.return_value, local_open.return_value]
open_patch = patch("streamlit.config.open", open)
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path in [
global_config_path,
local_config_path,
]
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(options_from_flags={"theme.font": "monospace"})
self.assertEqual("light", config.get_option("theme.base"))
self.assertEqual("#FFFFFF", config.get_option("theme.textColor"))
self.assertEqual("monospace", config.get_option("theme.font"))
| Test that CLI flags have higher priority than both
~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.
[theme]
base = "dark"
font = "sans serif"
textColor = "#FFFFFF"
[theme]
base = "light"
font = "serif"
| 33 | 70 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_load_global_local_flag_config(self):
global_config =
local_config =
global_config_path = "/mock/home/folder/.streamlit/config.toml"
local_config_path = os.path.join(os.getcwd(), ".streamlit/config.toml")
global_open = mock_open(read_data=global_config)
local_open = mock_open(read_data=local_config)
open = mock_open()
open.side_effect = [global_open.return_value, local_open.return_value]
open_patch = patch("streamlit.config.open", open)
# patch streamlit.*.os.* instead of os.* for py35 compat
makedirs_patch = patch("streamlit.config.os.makedirs")
makedirs_patch.return_value = True
pathexists_patch = patch("streamlit.config.os.path.exists")
pathexists_patch.side_effect = lambda path: path in [
global_config_path,
local_config_path,
]
with open_patch, makedirs_patch, pathexists_patch:
config.get_config_options(options_from_flags={"theme.font": "monospace"})
self.assertEqual("light", config.get_option("theme.base"))
self.assertEqual("#FFFFFF", config.get_option("theme.textColor"))
self.assertEqual("monospace", config.get_option("theme.font"))
```
###Assistant : Test that CLI flags have higher priority than both
~/.streamlit/config.toml and $CWD/.streamlit/config.toml at parse time.
[theme]
base = "dark"
font = "sans serif"
textColor = "#FFFFFF"
[theme]
base = "light"
font = "serif"
|
1,707 | def get_valid_filename(name):
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text |
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
| 44 | 31 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_valid_filename(name):
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text
```
###Assistant :
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
|
1,708 | def test_no_duplicates_for_m2m_in_list_filter(self):
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get("/band/", data={"genres": blues.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
|
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
| 18 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_no_duplicates_for_m2m_in_list_filter(self):
blues = Genre.objects.create(name="Blues")
band = Band.objects.create(name="B.B. King Review", nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get("/band/", data={"genres": blues.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
# Queryset must be deletable.
self.assertIs(cl.queryset.query.distinct, False)
cl.queryset.delete()
self.assertEqual(cl.queryset.count(), 0)
```
###Assistant :
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
|
1,709 | def add_flex_arithmetic_methods(cls) -> None:
flex_arith_method, flex_comp_method = _get_method_wrappers(cls)
new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)
new_methods.update(
{
"multiply": new_methods["mul"],
"subtract": new_methods["sub"],
"divide": new_methods["div"],
}
)
# opt out of bool flex methods for now
assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_"))
_add_methods(cls, new_methods=new_methods)
|
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
cls : class
flex methods will be defined and pinned to this class
| 29 | 45 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def add_flex_arithmetic_methods(cls) -> None:
flex_arith_method, flex_comp_method = _get_method_wrappers(cls)
new_methods = _create_methods(cls, flex_arith_method, flex_comp_method)
new_methods.update(
{
"multiply": new_methods["mul"],
"subtract": new_methods["sub"],
"divide": new_methods["div"],
}
)
# opt out of bool flex methods for now
assert not any(kname in new_methods for kname in ("ror_", "rxor", "rand_"))
_add_methods(cls, new_methods=new_methods)
```
###Assistant :
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
cls : class
flex methods will be defined and pinned to this class
|
1,710 | def get_collected_keypoint(self):
output = []
for tracker_id in self.id_to_pop:
output.append([tracker_id, self.keypoint_saver[tracker_id]])
del (self.keypoint_saver[tracker_id])
self.flag_to_pop = False
self.id_to_pop.clear()
return output
|
Output (List): List of keypoint results for Action Recognition task, where
the format of each element is [tracker_id, KeyPointSequence of tracker_id]
| 21 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_collected_keypoint(self):
output = []
for tracker_id in self.id_to_pop:
output.append([tracker_id, self.keypoint_saver[tracker_id]])
del (self.keypoint_saver[tracker_id])
self.flag_to_pop = False
self.id_to_pop.clear()
return output
```
###Assistant :
Output (List): List of keypoint results for Action Recognition task, where
the format of each element is [tracker_id, KeyPointSequence of tracker_id]
|
1,711 | def get_course_schedule_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql(.format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist() | Returns events for Course Schedule Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
select name, course, color,
timestamp(schedule_date, from_time) as from_time,
timestamp(schedule_date, to_time) as to_time,
room, student_group, 0 as 'allDay'
from `tabCourse Schedule`
where ( schedule_date between %(start)s and %(end)s )
{conditions} | 49 | 28 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_course_schedule_events(start, end, filters=None):
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql(.format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
```
###Assistant : Returns events for Course Schedule Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
select name, course, color,
timestamp(schedule_date, from_time) as from_time,
timestamp(schedule_date, to_time) as to_time,
room, student_group, 0 as 'allDay'
from `tabCourse Schedule`
where ( schedule_date between %(start)s and %(end)s )
{conditions}
|
1,712 | def check_keys_split(self, decoded) -> None:
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys_joined = ", ".join(bad_keys)
raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
|
Checks that dict has only the appropriate keys for orient='split'.
| 10 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_keys_split(self, decoded) -> None:
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys_joined = ", ".join(bad_keys)
raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
```
###Assistant :
Checks that dict has only the appropriate keys for orient='split'.
|
1,713 | def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError(
"Oracle backend does not support timezone-aware datetimes when USE_TZ is False."
)
return Oracle_datetime.from_datetime(value)
|
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
| 44 | 53 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError(
"Oracle backend does not support timezone-aware datetimes when USE_TZ is False."
)
return Oracle_datetime.from_datetime(value)
```
###Assistant :
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
|
1,714 | def __pow__(a, b):
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power,
_normalize=False)
elif a._numerator >= 0:
return Fraction(a._denominator ** -power,
a._numerator ** -power,
_normalize=False)
else:
return Fraction((-a._denominator) ** -power,
(-a._numerator) ** -power,
_normalize=False)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
| a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
| 32 | 67 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __pow__(a, b):
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power,
_normalize=False)
elif a._numerator >= 0:
return Fraction(a._denominator ** -power,
a._numerator ** -power,
_normalize=False)
else:
return Fraction((-a._denominator) ** -power,
(-a._numerator) ** -power,
_normalize=False)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
```
###Assistant : a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
|
1,715 | def fit(self, X, y, sample_weight=None):
self._validate_params()
super().fit(X, y, sample_weight=sample_weight)
return self
| Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
| 118 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fit(self, X, y, sample_weight=None):
self._validate_params()
super().fit(X, y, sample_weight=sample_weight)
return self
```
###Assistant : Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
|
1,716 | def calc_position(self, x):
if x < self.x[0]:
return None
elif x > self.x[-1]:
return None
i = self.__search_index(x)
dx = x - self.x[i]
position = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return position
|
Calc `y` position for given `x`.
if `x` is outside the data point's `x` range, return None.
Returns
-------
y : float
y position for given x.
| 27 | 45 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def calc_position(self, x):
if x < self.x[0]:
return None
elif x > self.x[-1]:
return None
i = self.__search_index(x)
dx = x - self.x[i]
position = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return position
```
###Assistant :
Calc `y` position for given `x`.
if `x` is outside the data point's `x` range, return None.
Returns
-------
y : float
y position for given x.
|
1,717 | def get_daily_sector_prices(start_date, end_date) -> dict:
# sector ticker information
sp500_tickers = {
"S&P 500 Materials (Sector)": "^SP500-15",
"S&P 500 Industrials (Sector)": "^SP500-20",
"S&P 500 Consumer Discretionary (Sector)": "^SP500-25",
"S&P 500 Consumer Staples (Sector)": "^SP500-30",
"S&P 500 Health Care (Sector)": "^SP500-35",
"S&P 500 Financials (Sector)": "^SP500-40",
"S&P 500 Information Technology (Sector)": "^SP500-45",
"S&P 500 Telecommunication Services (Sector)": "^SP500-50",
"S&P 500 Utilities (Sector)": "^SP500-55",
"S&P 500 Real Estate (Sector)": "^SP500-60",
"S&P 500 Energy (Sector)": "^GSPE",
}
sp500_tickers_data = {} # to store data
for (
sector,
sector_ticker,
) in sp500_tickers.items(): # iterate thru the sectors
# load the data required from yfinance
sp500_tickers_data[
sector
] = { # builds a dictionary entry for the sector with adj close data
"sector_data": yf.download(
sector_ticker,
start=start_date,
end=end_date,
progress=False,
)["Adj Close"]
} # stores the data here
return sp500_tickers_data
|
fetches daily sector prices for S&P500 for a fixed time period
Parameters
----------
start_date : str ('yyyy-mm-dd') or datetime.date
start date for fetching data
end_date : str ('yyyy-mm-dd') or datetime.date
end date for fetching data
Returns
-------
sp500_tickers_data : dict
dictionary of dataframes with SPY daily sector prices
| 48 | 133 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_daily_sector_prices(start_date, end_date) -> dict:
# sector ticker information
sp500_tickers = {
"S&P 500 Materials (Sector)": "^SP500-15",
"S&P 500 Industrials (Sector)": "^SP500-20",
"S&P 500 Consumer Discretionary (Sector)": "^SP500-25",
"S&P 500 Consumer Staples (Sector)": "^SP500-30",
"S&P 500 Health Care (Sector)": "^SP500-35",
"S&P 500 Financials (Sector)": "^SP500-40",
"S&P 500 Information Technology (Sector)": "^SP500-45",
"S&P 500 Telecommunication Services (Sector)": "^SP500-50",
"S&P 500 Utilities (Sector)": "^SP500-55",
"S&P 500 Real Estate (Sector)": "^SP500-60",
"S&P 500 Energy (Sector)": "^GSPE",
}
sp500_tickers_data = {} # to store data
for (
sector,
sector_ticker,
) in sp500_tickers.items(): # iterate thru the sectors
# load the data required from yfinance
sp500_tickers_data[
sector
] = { # builds a dictionary entry for the sector with adj close data
"sector_data": yf.download(
sector_ticker,
start=start_date,
end=end_date,
progress=False,
)["Adj Close"]
} # stores the data here
return sp500_tickers_data
```
###Assistant :
fetches daily sector prices for S&P500 for a fixed time period
Parameters
----------
start_date : str ('yyyy-mm-dd') or datetime.date
start date for fetching data
end_date : str ('yyyy-mm-dd') or datetime.date
end date for fetching data
Returns
-------
sp500_tickers_data : dict
dictionary of dataframes with SPY daily sector prices
|
1,718 | def get_unpositioned_tip(self, tip_shape=None, tip_length=None):
from manim.mobject.geometry.tips import ArrowTriangleFilledTip
if tip_shape is None:
tip_shape = ArrowTriangleFilledTip
if tip_length is None:
tip_length = self.get_default_tip_length()
color = self.get_color()
style = {"fill_color": color, "stroke_color": color}
style.update(self.tip_style)
tip = tip_shape(length=tip_length, **style)
return tip
|
Returns a tip that has been stylistically configured,
but has not yet been given a position in space.
| 18 | 38 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_unpositioned_tip(self, tip_shape=None, tip_length=None):
from manim.mobject.geometry.tips import ArrowTriangleFilledTip
if tip_shape is None:
tip_shape = ArrowTriangleFilledTip
if tip_length is None:
tip_length = self.get_default_tip_length()
color = self.get_color()
style = {"fill_color": color, "stroke_color": color}
style.update(self.tip_style)
tip = tip_shape(length=tip_length, **style)
return tip
```
###Assistant :
Returns a tip that has been stylistically configured,
but has not yet been given a position in space.
|
1,719 | def test_parse_due_date_without_timezone_uses_offset():
data: DueDate = {
"date": "2022-02-02T14:00:00",
"is_recurring": False,
"lang": "en",
"string": "Feb 2 2:00 PM",
"timezone": None,
}
actual = _parse_due_date(data, timezone_offset=-8)
assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual
| Test due date uses user local timezone offset when it has no timezone. | 13 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_parse_due_date_without_timezone_uses_offset():
data: DueDate = {
"date": "2022-02-02T14:00:00",
"is_recurring": False,
"lang": "en",
"string": "Feb 2 2:00 PM",
"timezone": None,
}
actual = _parse_due_date(data, timezone_offset=-8)
assert datetime(2022, 2, 2, 22, 0, 0, tzinfo=dt.UTC) == actual
```
###Assistant : Test due date uses user local timezone offset when it has no timezone.
|
1,720 | def serving_output(self, output):
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(
last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions
)
@add_start_docstrings(
,
HUBERT_START_DOCSTRING,
) | TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). | 14 | 27 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def serving_output(self, output):
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(
last_hidden_state=output.last_hidden_state, hidden_states=hidden_states, attentions=attentions
)
@add_start_docstrings(
,
HUBERT_START_DOCSTRING,
)
```
###Assistant : TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
|
1,721 | def _unique_np(values, return_inverse=False, return_counts=False):
uniques = np.unique(
values, return_inverse=return_inverse, return_counts=return_counts
)
inverse, counts = None, None
if return_counts:
*uniques, counts = uniques
if return_inverse:
*uniques, inverse = uniques
if return_counts or return_inverse:
uniques = uniques[0]
# np.unique will have duplicate missing values at the end of `uniques`
# here we clip the nans and remove it from uniques
if uniques.size and is_scalar_nan(uniques[-1]):
nan_idx = np.searchsorted(uniques, np.nan)
uniques = uniques[: nan_idx + 1]
if return_inverse:
inverse[inverse > nan_idx] = nan_idx
if return_counts:
counts[nan_idx] = np.sum(counts[nan_idx:])
counts = counts[: nan_idx + 1]
ret = (uniques,)
if return_inverse:
ret += (inverse,)
if return_counts:
ret += (counts,)
return ret[0] if len(ret) == 1 else ret
| Helper function to find unique values for numpy arrays that correctly
accounts for nans. See `_unique` documentation for details. | 19 | 111 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _unique_np(values, return_inverse=False, return_counts=False):
uniques = np.unique(
values, return_inverse=return_inverse, return_counts=return_counts
)
inverse, counts = None, None
if return_counts:
*uniques, counts = uniques
if return_inverse:
*uniques, inverse = uniques
if return_counts or return_inverse:
uniques = uniques[0]
# np.unique will have duplicate missing values at the end of `uniques`
# here we clip the nans and remove it from uniques
if uniques.size and is_scalar_nan(uniques[-1]):
nan_idx = np.searchsorted(uniques, np.nan)
uniques = uniques[: nan_idx + 1]
if return_inverse:
inverse[inverse > nan_idx] = nan_idx
if return_counts:
counts[nan_idx] = np.sum(counts[nan_idx:])
counts = counts[: nan_idx + 1]
ret = (uniques,)
if return_inverse:
ret += (inverse,)
if return_counts:
ret += (counts,)
return ret[0] if len(ret) == 1 else ret
```
###Assistant : Helper function to find unique values for numpy arrays that correctly
accounts for nans. See `_unique` documentation for details.
|
1,722 | def switch_to_live(self) -> None:
self.logbook_run.event_cache.clear()
self.logbook_run.context_lookup.clear()
| Switch to live stream.
Clear caches so we can reduce memory pressure.
| 12 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def switch_to_live(self) -> None:
self.logbook_run.event_cache.clear()
self.logbook_run.context_lookup.clear()
```
###Assistant : Switch to live stream.
Clear caches so we can reduce memory pressure.
|
1,723 | def require_comet_ml(test_case):
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
|
Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
| 16 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def require_comet_ml(test_case):
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
```
###Assistant :
Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
|
1,724 | def triggered_id(self):
component_id = None
if self.triggered:
prop_id = self.triggered_prop_ids.first()
component_id = self.triggered_prop_ids[prop_id]
return component_id
|
Returns the component id (str or dict) of the Input component that triggered the callback.
Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if
multiple Inputs triggered the callback.
Example usage:
`if "btn-1" == ctx.triggered_id:
do_something()`
| 47 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def triggered_id(self):
component_id = None
if self.triggered:
prop_id = self.triggered_prop_ids.first()
component_id = self.triggered_prop_ids[prop_id]
return component_id
```
###Assistant :
Returns the component id (str or dict) of the Input component that triggered the callback.
Note - use `triggered_prop_ids` if you need both the component id and the prop that triggered the callback or if
multiple Inputs triggered the callback.
Example usage:
`if "btn-1" == ctx.triggered_id:
do_something()`
|
1,725 | def _solve_W(self, X, H, max_iter):
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
# Get scaled regularization terms. Done for each minibatch to take into account
# variable sizes of minibatches.
l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X)
for _ in range(max_iter):
W, *_ = _multiplicative_update_w(
X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
)
W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
if self.tol > 0 and W_diff <= self.tol:
break
W_buffer[:] = W
return W
| Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
| 30 | 80 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _solve_W(self, X, H, max_iter):
avg = np.sqrt(X.mean() / self._n_components)
W = np.full((X.shape[0], self._n_components), avg, dtype=X.dtype)
W_buffer = W.copy()
# Get scaled regularization terms. Done for each minibatch to take into account
# variable sizes of minibatches.
l1_reg_W, _, l2_reg_W, _ = self._scale_regularization(X)
for _ in range(max_iter):
W, *_ = _multiplicative_update_w(
X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma
)
W_diff = linalg.norm(W - W_buffer) / linalg.norm(W)
if self.tol > 0 and W_diff <= self.tol:
break
W_buffer[:] = W
return W
```
###Assistant : Minimize the objective function w.r.t W.
Update W with H being fixed, until convergence. This is the heart
of `transform` but it's also used during `fit` when doing fresh restarts.
|
1,726 | def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h + 1e-6)
return np.array([x, y, s, r]).reshape((4, 1))
|
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
| 34 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h + 1e-6)
return np.array([x, y, s, r]).reshape((4, 1))
```
###Assistant :
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
|
1,727 | def remove_whitespace(string, leading=False, trailing=False):
# Remove any leading new line characters along with any surrounding white space
if leading:
string = re.sub(r'^\s*\n+\s*', '', string)
# Remove any trailing new line characters along with any surrounding white space
if trailing:
string = re.sub(r'\s*\n+\s*$', '', string)
# Replace new line characters and absorb any surrounding space.
string = re.sub(r'\s*\n\s*', ' ', string)
# TODO need some way to get rid of extra spaces in e.g. text <span> </span> text
return re.sub(r'\s+', ' ', string)
| Remove white space from a string.
Args:
string(str): The string to remove white space from.
leading(bool, optional): Remove leading new lines when True.
trailing(bool, optional): Remove trailing new lines when False.
Returns:
str: The input string with new line characters removed and white space squashed.
Examples:
Single or multiple new line characters are replaced with space.
>>> remove_whitespace("abc\\ndef")
'abc def'
>>> remove_whitespace("abc\\n\\n\\ndef")
'abc def'
New line characters surrounded by white space are replaced with a single space.
>>> remove_whitespace("abc \\n \\n \\n def")
'abc def'
>>> remove_whitespace("abc \\n \\n \\n def")
'abc def'
Leading and trailing new lines are replaced with a single space.
>>> remove_whitespace("\\nabc")
' abc'
>>> remove_whitespace(" \\n abc")
' abc'
>>> remove_whitespace("abc\\n")
'abc '
>>> remove_whitespace("abc \\n ")
'abc '
Use ``leading=True`` to remove leading new line characters, including any surrounding
white space:
>>> remove_whitespace("\\nabc", leading=True)
'abc'
>>> remove_whitespace(" \\n abc", leading=True)
'abc'
Use ``trailing=True`` to remove trailing new line characters, including any surrounding
white space:
>>> remove_whitespace("abc \\n ", trailing=True)
'abc'
| 166 | 82 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def remove_whitespace(string, leading=False, trailing=False):
# Remove any leading new line characters along with any surrounding white space
if leading:
string = re.sub(r'^\s*\n+\s*', '', string)
# Remove any trailing new line characters along with any surrounding white space
if trailing:
string = re.sub(r'\s*\n+\s*$', '', string)
# Replace new line characters and absorb any surrounding space.
string = re.sub(r'\s*\n\s*', ' ', string)
# TODO need some way to get rid of extra spaces in e.g. text <span> </span> text
return re.sub(r'\s+', ' ', string)
```
###Assistant : Remove white space from a string.
Args:
string(str): The string to remove white space from.
leading(bool, optional): Remove leading new lines when True.
trailing(bool, optional): Remove trailing new lines when False.
Returns:
str: The input string with new line characters removed and white space squashed.
Examples:
Single or multiple new line characters are replaced with space.
>>> remove_whitespace("abc\\ndef")
'abc def'
>>> remove_whitespace("abc\\n\\n\\ndef")
'abc def'
New line characters surrounded by white space are replaced with a single space.
>>> remove_whitespace("abc \\n \\n \\n def")
'abc def'
>>> remove_whitespace("abc \\n \\n \\n def")
'abc def'
Leading and trailing new lines are replaced with a single space.
>>> remove_whitespace("\\nabc")
' abc'
>>> remove_whitespace(" \\n abc")
' abc'
>>> remove_whitespace("abc\\n")
'abc '
>>> remove_whitespace("abc \\n ")
'abc '
Use ``leading=True`` to remove leading new line characters, including any surrounding
white space:
>>> remove_whitespace("\\nabc", leading=True)
'abc'
>>> remove_whitespace(" \\n abc", leading=True)
'abc'
Use ``trailing=True`` to remove trailing new line characters, including any surrounding
white space:
>>> remove_whitespace("abc \\n ", trailing=True)
'abc'
|
1,728 | def selectionChanged(self, selected, deselected):
if not self._active:
return
super().selectionChanged(selected, deselected)
indexes = selected.indexes()
if not indexes:
return
data = str(self._model().data(indexes[0]))
self.selection_changed.emit(data)
| Extend selectionChanged to call completers selection_changed. | 6 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def selectionChanged(self, selected, deselected):
if not self._active:
return
super().selectionChanged(selected, deselected)
indexes = selected.indexes()
if not indexes:
return
data = str(self._model().data(indexes[0]))
self.selection_changed.emit(data)
```
###Assistant : Extend selectionChanged to call completers selection_changed.
|
1,729 | def __mul__(self, other):
newlist = [v for v in self.args]
other = sympify(other)
for i, v in enumerate(newlist):
newlist[i] = (other * newlist[i][0], newlist[i][1])
return Vector(newlist)
| Multiplies the Vector by a sympifyable expression.
Parameters
==========
other : Sympifyable
The scalar to multiply this Vector with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> b = Symbol('b')
>>> V = 10 * b * N.x
>>> print(V)
10*b*N.x
| 50 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __mul__(self, other):
newlist = [v for v in self.args]
other = sympify(other)
for i, v in enumerate(newlist):
newlist[i] = (other * newlist[i][0], newlist[i][1])
return Vector(newlist)
```
###Assistant : Multiplies the Vector by a sympifyable expression.
Parameters
==========
other : Sympifyable
The scalar to multiply this Vector with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> b = Symbol('b')
>>> V = 10 * b * N.x
>>> print(V)
10*b*N.x
|
1,730 | def to_dict(self) -> Dict:
return serve_application_to_schema(self._deployments.values()).dict()
| Returns this Application's deployments as a dictionary.
This dictionary adheres to the Serve REST API schema. It can be deployed
via the Serve REST API.
Returns:
Dict: The Application's deployments formatted in a dictionary.
| 34 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def to_dict(self) -> Dict:
return serve_application_to_schema(self._deployments.values()).dict()
```
###Assistant : Returns this Application's deployments as a dictionary.
This dictionary adheres to the Serve REST API schema. It can be deployed
via the Serve REST API.
Returns:
Dict: The Application's deployments formatted in a dictionary.
|
1,731 | def get_shift_details(shift_type_name, for_timestamp=None):
if not shift_type_name:
return None
if not for_timestamp:
for_timestamp = now_datetime()
shift_type = frappe.get_doc('Shift Type', shift_type_name)
shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)
if shift_type.start_time > shift_type.end_time:
# shift spans accross 2 different days
if get_time(for_timestamp.time()) >= get_time(shift_actual_start):
# if for_timestamp is greater than start time, its in the first day
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
for_timestamp = for_timestamp + timedelta(days=1)
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
elif get_time(for_timestamp.time()) < get_time(shift_actual_start):
# if for_timestamp is less than start time, its in the second day
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
for_timestamp = for_timestamp + timedelta(days=-1)
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
else:
# start and end times fall on the same day
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)
actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time)
return frappe._dict({
'shift_type': shift_type,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'actual_start': actual_start,
'actual_end': actual_end
})
| Returns Shift Details which contain some additional information as described below.
'shift_details' contains the following keys:
'shift_type' - Object of DocType Shift Type,
'start_datetime' - Date and Time of shift start on given date,
'end_datetime' - Date and Time of shift end on given date,
'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time',
'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero)
:param shift_type_name: shift type name for which shift_details is required.
:param for_timestamp: DateTime value on which shift_details are required
| 88 | 149 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_shift_details(shift_type_name, for_timestamp=None):
if not shift_type_name:
return None
if not for_timestamp:
for_timestamp = now_datetime()
shift_type = frappe.get_doc('Shift Type', shift_type_name)
shift_actual_start = shift_type.start_time - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)
if shift_type.start_time > shift_type.end_time:
# shift spans accross 2 different days
if get_time(for_timestamp.time()) >= get_time(shift_actual_start):
# if for_timestamp is greater than start time, its in the first day
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
for_timestamp = for_timestamp + timedelta(days=1)
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
elif get_time(for_timestamp.time()) < get_time(shift_actual_start):
# if for_timestamp is less than start time, its in the second day
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
for_timestamp = for_timestamp + timedelta(days=-1)
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
else:
# start and end times fall on the same day
start_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.start_time
end_datetime = datetime.combine(for_timestamp, datetime.min.time()) + shift_type.end_time
actual_start = start_datetime - timedelta(minutes=shift_type.begin_check_in_before_shift_start_time)
actual_end = end_datetime + timedelta(minutes=shift_type.allow_check_out_after_shift_end_time)
return frappe._dict({
'shift_type': shift_type,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'actual_start': actual_start,
'actual_end': actual_end
})
```
###Assistant : Returns Shift Details which contain some additional information as described below.
'shift_details' contains the following keys:
'shift_type' - Object of DocType Shift Type,
'start_datetime' - Date and Time of shift start on given date,
'end_datetime' - Date and Time of shift end on given date,
'actual_start' - datetime of shift start after adding 'begin_check_in_before_shift_start_time',
'actual_end' - datetime of shift end after adding 'allow_check_out_after_shift_end_time'(None is returned if this is zero)
:param shift_type_name: shift type name for which shift_details is required.
:param for_timestamp: DateTime value on which shift_details are required
|
1,732 | def subprocess_run_helper(func, *args, timeout, extra_env=None):
target = func.__name__
module = func.__module__
proc = subprocess.run(
[sys.executable,
"-c",
f"from {module} import {target}; {target}()",
*args],
env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})},
timeout=timeout, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
return proc
|
Run a function in a sub-process.
Parameters
----------
func : function
The function to be run. It must be in a module that is importable.
*args : str
Any additional command line arguments to be passed in
the first argument to ``subprocess.run``.
extra_env : dict[str, str]
Any additional environment variables to be set for the subprocess.
| 56 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def subprocess_run_helper(func, *args, timeout, extra_env=None):
target = func.__name__
module = func.__module__
proc = subprocess.run(
[sys.executable,
"-c",
f"from {module} import {target}; {target}()",
*args],
env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})},
timeout=timeout, check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
return proc
```
###Assistant :
Run a function in a sub-process.
Parameters
----------
func : function
The function to be run. It must be in a module that is importable.
*args : str
Any additional command line arguments to be passed in
the first argument to ``subprocess.run``.
extra_env : dict[str, str]
Any additional environment variables to be set for the subprocess.
|
1,733 | def add_preheated_app_session(self) -> None:
session = self._create_or_reuse_app_session(ws=None)
session.handle_rerun_script_request(is_preheat=True)
| Register a fake browser with the server and run the script.
This is used to start running the user's script even before the first
browser connects.
| 26 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def add_preheated_app_session(self) -> None:
session = self._create_or_reuse_app_session(ws=None)
session.handle_rerun_script_request(is_preheat=True)
```
###Assistant : Register a fake browser with the server and run the script.
This is used to start running the user's script even before the first
browser connects.
|
1,734 | def score(self, testing_features, testing_target):
if self.fitted_pipeline_ is None:
raise RuntimeError(
"A pipeline has not yet been optimized. Please call fit() first."
)
testing_features, testing_target = self._check_dataset(
testing_features, testing_target, sample_weight=None
)
# If the scoring function is a string, we must adjust to use the sklearn
# scoring interface
if isinstance(self.scoring_function, str):
scorer = SCORERS[self.scoring_function]
elif callable(self.scoring_function):
scorer = self.scoring_function
else:
raise RuntimeError(
"The scoring function should either be the name of a scikit-learn scorer or a scorer object"
)
score = scorer(
self.fitted_pipeline_,
testing_features.astype(np.float64),
testing_target.astype(np.float64),
)
return score
| Return the score on the given testing data using the user-specified scoring function.
Parameters
----------
testing_features: array-like {n_samples, n_features}
Feature matrix of the testing set
testing_target: array-like {n_samples}
List of class labels for prediction in the testing set
Returns
-------
accuracy_score: float
The estimated test set accuracy
| 47 | 88 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def score(self, testing_features, testing_target):
if self.fitted_pipeline_ is None:
raise RuntimeError(
"A pipeline has not yet been optimized. Please call fit() first."
)
testing_features, testing_target = self._check_dataset(
testing_features, testing_target, sample_weight=None
)
# If the scoring function is a string, we must adjust to use the sklearn
# scoring interface
if isinstance(self.scoring_function, str):
scorer = SCORERS[self.scoring_function]
elif callable(self.scoring_function):
scorer = self.scoring_function
else:
raise RuntimeError(
"The scoring function should either be the name of a scikit-learn scorer or a scorer object"
)
score = scorer(
self.fitted_pipeline_,
testing_features.astype(np.float64),
testing_target.astype(np.float64),
)
return score
```
###Assistant : Return the score on the given testing data using the user-specified scoring function.
Parameters
----------
testing_features: array-like {n_samples, n_features}
Feature matrix of the testing set
testing_target: array-like {n_samples}
List of class labels for prediction in the testing set
Returns
-------
accuracy_score: float
The estimated test set accuracy
|
1,735 | def test_as_dict():
expected = {
LENGTH: UnitOfLength.KILOMETERS,
WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,
TEMPERATURE: UnitOfTemperature.CELSIUS,
VOLUME: UnitOfVolume.LITERS,
MASS: UnitOfMass.GRAMS,
PRESSURE: UnitOfPressure.PA,
ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS,
}
assert expected == METRIC_SYSTEM.as_dict()
| Test that the as_dict() method returns the expected dictionary. | 9 | 24 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_as_dict():
expected = {
LENGTH: UnitOfLength.KILOMETERS,
WIND_SPEED: UnitOfSpeed.METERS_PER_SECOND,
TEMPERATURE: UnitOfTemperature.CELSIUS,
VOLUME: UnitOfVolume.LITERS,
MASS: UnitOfMass.GRAMS,
PRESSURE: UnitOfPressure.PA,
ACCUMULATED_PRECIPITATION: UnitOfLength.MILLIMETERS,
}
assert expected == METRIC_SYSTEM.as_dict()
```
###Assistant : Test that the as_dict() method returns the expected dictionary.
|
1,736 | def _get_inputs(self):
logger.debug("Getting inputs")
if len(self.input_shape) == 3:
input_shapes = [self.input_shape, self.input_shape]
else:
input_shapes = self.input_shape
inputs = [Input(shape=shape, name=f"face_in_{side}")
for side, shape in zip(("a", "b"), input_shapes)]
logger.debug("inputs: %s", inputs)
return inputs
| Obtain the standardized inputs for the model.
The inputs will be returned for the "A" and "B" sides in the shape as defined by
:attr:`input_shape`.
Returns
-------
list
A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one
for each side) each of shapes :attr:`input_shape`.
| 49 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_inputs(self):
logger.debug("Getting inputs")
if len(self.input_shape) == 3:
input_shapes = [self.input_shape, self.input_shape]
else:
input_shapes = self.input_shape
inputs = [Input(shape=shape, name=f"face_in_{side}")
for side, shape in zip(("a", "b"), input_shapes)]
logger.debug("inputs: %s", inputs)
return inputs
```
###Assistant : Obtain the standardized inputs for the model.
The inputs will be returned for the "A" and "B" sides in the shape as defined by
:attr:`input_shape`.
Returns
-------
list
A list of :class:`keras.layers.Input` tensors. This will be a list of 2 tensors (one
for each side) each of shapes :attr:`input_shape`.
|
1,737 | def collect_units_install() -> t.List[PipInstall]:
requirements_paths = [] # type: t.List[t.Tuple[str, str]]
constraints_paths = [] # type: t.List[t.Tuple[str, str]]
path = os.path.join(data_context().content.unit_path, 'requirements.txt')
requirements_paths.append((data_context().content.root, path))
path = os.path.join(data_context().content.unit_path, 'constraints.txt')
constraints_paths.append((data_context().content.root, path))
return collect_install(requirements_paths, constraints_paths)
| Return details necessary for the specified units pip install(s). | 9 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def collect_units_install() -> t.List[PipInstall]:
requirements_paths = [] # type: t.List[t.Tuple[str, str]]
constraints_paths = [] # type: t.List[t.Tuple[str, str]]
path = os.path.join(data_context().content.unit_path, 'requirements.txt')
requirements_paths.append((data_context().content.root, path))
path = os.path.join(data_context().content.unit_path, 'constraints.txt')
constraints_paths.append((data_context().content.root, path))
return collect_install(requirements_paths, constraints_paths)
```
###Assistant : Return details necessary for the specified units pip install(s).
|
1,738 | def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
,
YOSO_START_DOCSTRING,
) | YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. | 23 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
,
YOSO_START_DOCSTRING,
)
```
###Assistant : YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks.
|
1,739 | def delete_batch(self, pk_list, using):
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.clear_where()
self.add_filter(
f"{field.attname}__in",
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE],
)
num_deleted += self.do_query(
self.get_meta().db_table, self.where, using=using
)
return num_deleted
|
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
| 29 | 39 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def delete_batch(self, pk_list, using):
# number of objects deleted
num_deleted = 0
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.clear_where()
self.add_filter(
f"{field.attname}__in",
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE],
)
num_deleted += self.do_query(
self.get_meta().db_table, self.where, using=using
)
return num_deleted
```
###Assistant :
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
|
1,740 | def generate_ansible_coverage_config() -> str:
coverage_config =
return coverage_config
| Generate code coverage configuration for Ansible tests.
[run]
branch = True
concurrency = multiprocessing
parallel = True
omit =
*/python*/dist-packages/*
*/python*/site-packages/*
*/python*/distutils/*
*/pyshared/*
*/pytest
*/AnsiballZ_*.py
*/test/results/*
| 26 | 8 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def generate_ansible_coverage_config() -> str:
coverage_config =
return coverage_config
```
###Assistant : Generate code coverage configuration for Ansible tests.
[run]
branch = True
concurrency = multiprocessing
parallel = True
omit =
*/python*/dist-packages/*
*/python*/site-packages/*
*/python*/distutils/*
*/pyshared/*
*/pytest
*/AnsiballZ_*.py
*/test/results/*
|
1,741 | def get_allowed_roles_to_invite(self):
return [
r
for r in organization_roles.get_all()
if r.priority <= organization_roles.get(self.role).priority
]
|
Return a list of roles which that member could invite
Must check if member member has member:admin first before checking
| 20 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_allowed_roles_to_invite(self):
return [
r
for r in organization_roles.get_all()
if r.priority <= organization_roles.get(self.role).priority
]
```
###Assistant :
Return a list of roles which that member could invite
Must check if member member has member:admin first before checking
|
1,742 | def search_space_spec(self) -> Dict[str, ParameterSpec]:
raise NotImplementedError()
|
Space specification (sample points).
Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export.
For example: ::
{"layer1": ParameterSpec(values=["conv", "pool"])}
| 28 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def search_space_spec(self) -> Dict[str, ParameterSpec]:
raise NotImplementedError()
```
###Assistant :
Space specification (sample points).
Mapping from spec name to ParameterSpec. The names in choices should be in the same format of export.
For example: ::
{"layer1": ParameterSpec(values=["conv", "pool"])}
|
1,743 | def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
events = []
for i in range(30):
events.append(
self.helper.send_event(
room_id, "com.example.test", content={"index": i}, tok=user_tok
)
)
# Now let's fetch the context for this room.
midway = (len(events) - 1) // 2
channel = self.make_request(
"GET",
"/_synapse/admin/v1/rooms/%s/context/%s"
% (room_id, events[midway]["event_id"]),
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body["event"]["event_id"], events[midway]["event_id"]
)
for found_event in channel.json_body["events_before"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j < midway)
break
else:
self.fail("Event %s from events_before not found" % j)
for found_event in channel.json_body["events_after"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j > midway)
break
else:
self.fail("Event %s from events_after not found" % j)
|
Test that, as admin, we can find the context of an event without having joined the room.
| 17 | 132 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_context_as_admin(self) -> None:
# Create a room. We're not part of it.
user_id = self.register_user("test", "test")
user_tok = self.login("test", "test")
room_id = self.helper.create_room_as(user_id, tok=user_tok)
# Populate the room with events.
events = []
for i in range(30):
events.append(
self.helper.send_event(
room_id, "com.example.test", content={"index": i}, tok=user_tok
)
)
# Now let's fetch the context for this room.
midway = (len(events) - 1) // 2
channel = self.make_request(
"GET",
"/_synapse/admin/v1/rooms/%s/context/%s"
% (room_id, events[midway]["event_id"]),
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(
channel.json_body["event"]["event_id"], events[midway]["event_id"]
)
for found_event in channel.json_body["events_before"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j < midway)
break
else:
self.fail("Event %s from events_before not found" % j)
for found_event in channel.json_body["events_after"]:
for j, posted_event in enumerate(events):
if found_event["event_id"] == posted_event["event_id"]:
self.assertTrue(j > midway)
break
else:
self.fail("Event %s from events_after not found" % j)
```
###Assistant :
Test that, as admin, we can find the context of an event without having joined the room.
|
1,744 | def marginal_std(self, t):
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
|
Compute sigma_t of a given continuous-time label t in [0, T].
| 11 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def marginal_std(self, t):
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
```
###Assistant :
Compute sigma_t of a given continuous-time label t in [0, T].
|
1,745 | def for_request(cls, request):
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
# to allow more efficient page url generation
site_settings._request = request
setattr(request, attr_name, site_settings)
return site_settings
|
Get or create an instance of this model for the request,
and cache the result on the request for faster repeat access.
| 22 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def for_request(cls, request):
attr_name = cls.get_cache_attr_name()
if hasattr(request, attr_name):
return getattr(request, attr_name)
site = Site.find_for_request(request)
site_settings = cls.for_site(site)
# to allow more efficient page url generation
site_settings._request = request
setattr(request, attr_name, site_settings)
return site_settings
```
###Assistant :
Get or create an instance of this model for the request,
and cache the result on the request for faster repeat access.
|
1,746 | def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.parser.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
| Enables OCSP Stapling
In OCSP, each client (e.g. browser) would have to query the
OCSP Responder to validate that the site certificate was not revoked.
Enabling OCSP Stapling, would allow the web-server to query the OCSP
Responder, and staple its response to the offered certificate during
TLS. i.e. clients would not have to query the OCSP responder.
OCSP Stapling enablement on Apache implicitly depends on
SSLCertificateChainFile being set by other code.
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)
| 107 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _enable_ocsp_stapling(self, ssl_vhost, unused_options):
min_apache_ver = (2, 3, 3)
if self.get_version() < min_apache_ver:
raise errors.PluginError(
"Unable to set OCSP directives.\n"
"Apache version is below 2.3.3.")
if "socache_shmcb_module" not in self.parser.modules:
self.enable_mod("socache_shmcb")
# Check if there's an existing SSLUseStapling directive on.
use_stapling_aug_path = self.parser.find_dir("SSLUseStapling",
"on", start=ssl_vhost.path)
if not use_stapling_aug_path:
self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on")
ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))
# Check if there's an existing SSLStaplingCache directive.
stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',
None, ssl_vhost_aug_path)
# We'll simply delete the directive, so that we'll have a
# consistent OCSP cache path.
if stapling_cache_aug_path:
self.parser.aug.remove(
re.sub(r"/\w*$", "", stapling_cache_aug_path[0]))
self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,
"SSLStaplingCache",
["shmcb:/var/run/apache2/stapling_cache(128000)"])
msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%(
ssl_vhost.filep)
self.save_notes += msg
self.save()
logger.info(msg)
```
###Assistant : Enables OCSP Stapling
In OCSP, each client (e.g. browser) would have to query the
OCSP Responder to validate that the site certificate was not revoked.
Enabling OCSP Stapling, would allow the web-server to query the OCSP
Responder, and staple its response to the offered certificate during
TLS. i.e. clients would not have to query the OCSP responder.
OCSP Stapling enablement on Apache implicitly depends on
SSLCertificateChainFile being set by other code.
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)
|
1,747 | def make_regional_gl_entries(gl_entries, doc):
country = frappe.get_cached_value("Company", doc.company, "country")
if country != "United Arab Emirates":
return gl_entries
if doc.reverse_charge == "Y":
tax_accounts = get_tax_accounts(doc.company)
for tax in doc.get("taxes"):
if tax.category not in ("Total", "Valuation and Total"):
continue
gl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts)
return gl_entries
| Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries. | 19 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def make_regional_gl_entries(gl_entries, doc):
country = frappe.get_cached_value("Company", doc.company, "country")
if country != "United Arab Emirates":
return gl_entries
if doc.reverse_charge == "Y":
tax_accounts = get_tax_accounts(doc.company)
for tax in doc.get("taxes"):
if tax.category not in ("Total", "Valuation and Total"):
continue
gl_entries = make_gl_entry(tax, gl_entries, doc, tax_accounts)
return gl_entries
```
###Assistant : Hooked to make_regional_gl_entries in Purchase Invoice.It appends the region specific general ledger entries to the list of GL Entries.
|
1,748 | def private_param(param):
return pytest.param(
*param,
marks=pytest.mark.skipif(
not _run_private_tests,
reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run",
),
)
| Wrap param to mark it as private, meaning it requires credentials to run.
Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them.
| 31 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def private_param(param):
return pytest.param(
*param,
marks=pytest.mark.skipif(
not _run_private_tests,
reason="Skipping: this test is marked private, set RUN_PRIVATE=1 in your environment to run",
),
)
```
###Assistant : Wrap param to mark it as private, meaning it requires credentials to run.
Private tests are skipped by default. Set the RUN_PRIVATE environment variable to a truth value to run them.
|
1,749 | def minimal_d_separator(G, u, v):
if not nx.is_directed_acyclic_graph(G):
raise nx.NetworkXError("graph should be directed acyclic")
union_uv = {u, v}
if any(n not in G.nodes for n in union_uv):
raise nx.NodeNotFound("one or more specified nodes not found in the graph")
# first construct the set of ancestors of X and Y
x_anc = nx.ancestors(G, u)
y_anc = nx.ancestors(G, v)
D_anc_xy = x_anc.union(y_anc)
D_anc_xy.update((u, v))
# second, construct the moralization of the subgraph of Anc(X,Y)
moral_G = nx.moral_graph(G.subgraph(D_anc_xy))
# find a separating set Z' in moral_G
Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v)))
# perform BFS on the graph from 'x' to mark
Z_dprime = _bfs_with_marks(moral_G, u, Z_prime)
Z = _bfs_with_marks(moral_G, v, Z_dprime)
return Z
@not_implemented_for("undirected") | Compute a minimal d-separating set between 'u' and 'v'.
A d-separating set in a DAG is a set of nodes that blocks all paths
between the two nodes, 'u' and 'v'. This function
constructs a d-separating set that is "minimal", meaning it is the smallest
d-separating set for 'u' and 'v'. This is not necessarily
unique. For more details, see Notes.
Parameters
----------
G : graph
A networkx DAG.
u : node
A node in the graph, G.
v : node
A node in the graph, G.
Raises
------
NetworkXError
Raises a :exc:`NetworkXError` if the input graph is not a DAG.
NodeNotFound
If any of the input nodes are not found in the graph,
a :exc:`NodeNotFound` exception is raised.
References
----------
.. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators.
Notes
-----
This function only finds ``a`` minimal d-separator. It does not guarantee
uniqueness, since in a DAG there may be more than one minimal d-separator
between two nodes. Moreover, this only checks for minimal separators
between two nodes, not two sets. Finding minimal d-separators between
two sets of nodes is not supported.
Uses the algorithm presented in [1]_. The complexity of the algorithm
is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the
number of edges in the moralized graph of the sub-graph consisting
of only the ancestors of 'u' and 'v'. For full details, see [1]_.
The algorithm works by constructing the moral graph consisting of just
the ancestors of `u` and `v`. Then it constructs a candidate for
a separating set ``Z'`` from the predecessors of `u` and `v`.
Then BFS is run starting from `u` and marking nodes
found from ``Z'`` and calling those nodes ``Z''``.
Then BFS is run again starting from `v` and marking nodes if they are
present in ``Z''``. Those marked nodes are the returned minimal
d-separating set.
https://en.wikipedia.org/wiki/Bayesian_network#d-separation
| 306 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def minimal_d_separator(G, u, v):
if not nx.is_directed_acyclic_graph(G):
raise nx.NetworkXError("graph should be directed acyclic")
union_uv = {u, v}
if any(n not in G.nodes for n in union_uv):
raise nx.NodeNotFound("one or more specified nodes not found in the graph")
# first construct the set of ancestors of X and Y
x_anc = nx.ancestors(G, u)
y_anc = nx.ancestors(G, v)
D_anc_xy = x_anc.union(y_anc)
D_anc_xy.update((u, v))
# second, construct the moralization of the subgraph of Anc(X,Y)
moral_G = nx.moral_graph(G.subgraph(D_anc_xy))
# find a separating set Z' in moral_G
Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v)))
# perform BFS on the graph from 'x' to mark
Z_dprime = _bfs_with_marks(moral_G, u, Z_prime)
Z = _bfs_with_marks(moral_G, v, Z_dprime)
return Z
@not_implemented_for("undirected")
```
###Assistant : Compute a minimal d-separating set between 'u' and 'v'.
A d-separating set in a DAG is a set of nodes that blocks all paths
between the two nodes, 'u' and 'v'. This function
constructs a d-separating set that is "minimal", meaning it is the smallest
d-separating set for 'u' and 'v'. This is not necessarily
unique. For more details, see Notes.
Parameters
----------
G : graph
A networkx DAG.
u : node
A node in the graph, G.
v : node
A node in the graph, G.
Raises
------
NetworkXError
Raises a :exc:`NetworkXError` if the input graph is not a DAG.
NodeNotFound
If any of the input nodes are not found in the graph,
a :exc:`NodeNotFound` exception is raised.
References
----------
.. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators.
Notes
-----
This function only finds ``a`` minimal d-separator. It does not guarantee
uniqueness, since in a DAG there may be more than one minimal d-separator
between two nodes. Moreover, this only checks for minimal separators
between two nodes, not two sets. Finding minimal d-separators between
two sets of nodes is not supported.
Uses the algorithm presented in [1]_. The complexity of the algorithm
is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the
number of edges in the moralized graph of the sub-graph consisting
of only the ancestors of 'u' and 'v'. For full details, see [1]_.
The algorithm works by constructing the moral graph consisting of just
the ancestors of `u` and `v`. Then it constructs a candidate for
a separating set ``Z'`` from the predecessors of `u` and `v`.
Then BFS is run starting from `u` and marking nodes
found from ``Z'`` and calling those nodes ``Z''``.
Then BFS is run again starting from `v` and marking nodes if they are
present in ``Z''``. Those marked nodes are the returned minimal
d-separating set.
https://en.wikipedia.org/wiki/Bayesian_network#d-separation
|
1,750 | def _change_alignment_for_a_line(self, alignment, line_no):
self.lines[1][line_no] = alignment
if self.lines[1][line_no] == "center":
self[line_no].move_to(
np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),
)
elif self.lines[1][line_no] == "right":
self[line_no].move_to(
np.array(
[
self.get_right()[0] - self[line_no].width / 2,
self[line_no].get_center()[1],
0,
],
),
)
elif self.lines[1][line_no] == "left":
self[line_no].move_to(
np.array(
[
self.get_left()[0] + self[line_no].width / 2,
self[line_no].get_center()[1],
0,
],
),
)
| Function to change one line's alignment to a specific value.
Parameters
----------
alignment : :class:`str`
Defines the alignment of paragraph. Possible values are "left", "right", "center".
line_no : :class:`int`
Defines the line number for which we want to set given alignment.
| 41 | 50 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _change_alignment_for_a_line(self, alignment, line_no):
self.lines[1][line_no] = alignment
if self.lines[1][line_no] == "center":
self[line_no].move_to(
np.array([self.get_center()[0], self[line_no].get_center()[1], 0]),
)
elif self.lines[1][line_no] == "right":
self[line_no].move_to(
np.array(
[
self.get_right()[0] - self[line_no].width / 2,
self[line_no].get_center()[1],
0,
],
),
)
elif self.lines[1][line_no] == "left":
self[line_no].move_to(
np.array(
[
self.get_left()[0] + self[line_no].width / 2,
self[line_no].get_center()[1],
0,
],
),
)
```
###Assistant : Function to change one line's alignment to a specific value.
Parameters
----------
alignment : :class:`str`
Defines the alignment of paragraph. Possible values are "left", "right", "center".
line_no : :class:`int`
Defines the line number for which we want to set given alignment.
|
1,751 | def test_rejoin_forgotten_by_user(self) -> None:
self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
self.get_success(self.handler.forget(self.alice_ID, self.room_id))
self.assertTrue(
self.get_success(self.store.did_forget(self.alice, self.room_id))
)
# the server has not forgotten the room
self.assertFalse(
self.get_success(self.store.is_locally_forgotten_room(self.room_id))
)
self.helper.join(self.room_id, user=self.alice, tok=self.alice_token)
# TODO: A join to a room does not invalidate the forgotten cache
# see https://github.com/matrix-org/synapse/issues/13262
self.store.did_forget.invalidate_all()
self.assertFalse(
self.get_success(self.store.did_forget(self.alice, self.room_id))
)
| Test that a user that has forgotten a room can do a re-join.
The room was not forgotten from the local server.
One local user is still member of the room. | 31 | 51 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_rejoin_forgotten_by_user(self) -> None:
self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
self.get_success(self.handler.forget(self.alice_ID, self.room_id))
self.assertTrue(
self.get_success(self.store.did_forget(self.alice, self.room_id))
)
# the server has not forgotten the room
self.assertFalse(
self.get_success(self.store.is_locally_forgotten_room(self.room_id))
)
self.helper.join(self.room_id, user=self.alice, tok=self.alice_token)
# TODO: A join to a room does not invalidate the forgotten cache
# see https://github.com/matrix-org/synapse/issues/13262
self.store.did_forget.invalidate_all()
self.assertFalse(
self.get_success(self.store.did_forget(self.alice, self.room_id))
)
```
###Assistant : Test that a user that has forgotten a room can do a re-join.
The room was not forgotten from the local server.
One local user is still member of the room.
|
1,752 | def std_call(func):
if os.name == "nt":
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call("GDALVersionInfo")
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
|
Return the correct STDCALL function for certain OSR routines on Win32
platforms.
| 12 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def std_call(func):
if os.name == "nt":
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Return GDAL library version information with the given key.
_version_info = std_call("GDALVersionInfo")
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
```
###Assistant :
Return the correct STDCALL function for certain OSR routines on Win32
platforms.
|
1,753 | def __getitem__(self, key):
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix) :]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
|
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
| 28 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __getitem__(self, key):
use_func = key.startswith(self.prefix)
if use_func:
key = key[len(self.prefix) :]
value = super().__getitem__(key)
if use_func:
return self.func(value)
return value
```
###Assistant :
Retrieve the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
|
1,754 | def not_enough_open_files() -> bool:
try:
import resource
except ImportError:
# resource limits is not a concept on all systems, notably Windows
return False
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
return soft_limit < 512 or hard_limit < 512
|
The current process does not currently allow enough open files for this test.
You can increase the number of open files with `ulimit -n 512`.
| 25 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def not_enough_open_files() -> bool:
try:
import resource
except ImportError:
# resource limits is not a concept on all systems, notably Windows
return False
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
return soft_limit < 512 or hard_limit < 512
```
###Assistant :
The current process does not currently allow enough open files for this test.
You can increase the number of open files with `ulimit -n 512`.
|
1,755 | def configure(self, request):
# Save ordering preference
if request.user.is_authenticated:
table_name = self.__class__.__name__
if self.prefixed_order_by_field in request.GET:
# If an ordering has been specified as a query parameter, save it as the
# user's preferred ordering for this table.
ordering = request.GET.getlist(self.prefixed_order_by_field)
request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)
elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):
# If no ordering has been specified, set the preferred ordering (if any).
self.order_by = ordering
# Paginate the table results
paginate = {
'paginator_class': EnhancedPaginator,
'per_page': get_paginate_count(request)
}
tables.RequestConfig(request, paginate).configure(self)
|
Configure the table for a specific request context. This performs pagination and records
the user's preferred ordering logic.
| 18 | 79 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def configure(self, request):
# Save ordering preference
if request.user.is_authenticated:
table_name = self.__class__.__name__
if self.prefixed_order_by_field in request.GET:
# If an ordering has been specified as a query parameter, save it as the
# user's preferred ordering for this table.
ordering = request.GET.getlist(self.prefixed_order_by_field)
request.user.config.set(f'tables.{table_name}.ordering', ordering, commit=True)
elif ordering := request.user.config.get(f'tables.{table_name}.ordering'):
# If no ordering has been specified, set the preferred ordering (if any).
self.order_by = ordering
# Paginate the table results
paginate = {
'paginator_class': EnhancedPaginator,
'per_page': get_paginate_count(request)
}
tables.RequestConfig(request, paginate).configure(self)
```
###Assistant :
Configure the table for a specific request context. This performs pagination and records
the user's preferred ordering logic.
|
1,756 | def _shard_arg(arg, devices, arg_indices):
if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
# NOTE: we compare indices instead of sharding_spec because
# pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.
return [
buf if buf.device() == d else buf.copy_to_device(d)
for d, buf in zip(devices, arg.device_buffers)
]
else:
arg = xla.canonicalize_dtype(arg)
return shard_arg_handlers[type(arg)](arg, devices, arg_indices)
@profiler.annotate_function | Returns a list of size len(devices) containing per-device buffers.
For the C++ pmap path, we fallback to Python (this function) to shard
arguments that are not supported by the C++ `ShardArg`.
Arrgs:
arg: The Python argument.
devices: The list of devices to shard over.
arg_indices: A list of `len(devices)` indices to use to shard the argument.
| 56 | 75 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _shard_arg(arg, devices, arg_indices):
if isinstance(arg, ShardedDeviceArray) and arg_indices == arg.indices:
# The shard_arg_handlers allow an extensible set of types to be sharded, but
# inline handling for ShardedDeviceArray as a special case for performance
# NOTE: we compare indices instead of sharding_spec because
# pmap_benchmark.pmap_shard_args_benchmark indicates this is faster.
return [
buf if buf.device() == d else buf.copy_to_device(d)
for d, buf in zip(devices, arg.device_buffers)
]
else:
arg = xla.canonicalize_dtype(arg)
return shard_arg_handlers[type(arg)](arg, devices, arg_indices)
@profiler.annotate_function
```
###Assistant : Returns a list of size len(devices) containing per-device buffers.
For the C++ pmap path, we fallback to Python (this function) to shard
arguments that are not supported by the C++ `ShardArg`.
Arrgs:
arg: The Python argument.
devices: The list of devices to shard over.
arg_indices: A list of `len(devices)` indices to use to shard the argument.
|
1,757 | def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
|
Anisotropic mapping is equivalent to its isotropic parts.
| 8 | 51 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
```
###Assistant :
Anisotropic mapping is equivalent to its isotropic parts.
|
1,758 | def test_get_page_url_when_for_settings_fetched_via_for_site(self):
self._create_importantpages_object()
settings = ImportantPages.for_site(self.default_site)
# Force site root paths query beforehand
self.default_site.root_page._get_site_root_paths()
for page_fk_field, expected_result in (
("sign_up_page", "http://localhost/"),
("general_terms_page", "http://localhost/"),
("privacy_policy_page", "http://other/"),
):
with self.subTest(page_fk_field=page_fk_field):
# only the first request for each URL will trigger queries.
# 2 are triggered instead of 1 here, because tests use the
# database cache backed, and the cache is queried each time
# to fetch site root paths (because there's no 'request' to
# store them on)
with self.assertNumQueries(2):
self.assertEqual(
settings.get_page_url(page_fk_field), expected_result
)
# when called directly
self.assertEqual(
settings.get_page_url(page_fk_field), expected_result
)
# when called indirectly via shortcut
self.assertEqual(
getattr(settings.page_url, page_fk_field), expected_result
)
| ImportantPages.for_site() cannot make the settings object
request-aware, so things are a little less efficient, and the
URLs returned will not be site-relative | 22 | 102 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_get_page_url_when_for_settings_fetched_via_for_site(self):
self._create_importantpages_object()
settings = ImportantPages.for_site(self.default_site)
# Force site root paths query beforehand
self.default_site.root_page._get_site_root_paths()
for page_fk_field, expected_result in (
("sign_up_page", "http://localhost/"),
("general_terms_page", "http://localhost/"),
("privacy_policy_page", "http://other/"),
):
with self.subTest(page_fk_field=page_fk_field):
# only the first request for each URL will trigger queries.
# 2 are triggered instead of 1 here, because tests use the
# database cache backed, and the cache is queried each time
# to fetch site root paths (because there's no 'request' to
# store them on)
with self.assertNumQueries(2):
self.assertEqual(
settings.get_page_url(page_fk_field), expected_result
)
# when called directly
self.assertEqual(
settings.get_page_url(page_fk_field), expected_result
)
# when called indirectly via shortcut
self.assertEqual(
getattr(settings.page_url, page_fk_field), expected_result
)
```
###Assistant : ImportantPages.for_site() cannot make the settings object
request-aware, so things are a little less efficient, and the
URLs returned will not be site-relative
|
1,759 | def test_send_receipts_with_backoff(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
)
mock_send_transaction.return_value = make_awaitable({})
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
"room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
# expect a call to send_transaction
mock_send_transaction.assert_called_once()
json_cb = mock_send_transaction.call_args[0][1]
data = json_cb()
self.assertEqual(
data["edus"],
[
{
"edu_type": EduTypes.RECEIPT,
"content": {
"room_id": {
"m.read": {
"user_id": {
"event_ids": ["event_id"],
"data": {"ts": 1234},
}
}
}
},
}
],
)
mock_send_transaction.reset_mock()
# send the second RR
receipt = ReadReceipt(
"room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
mock_send_transaction.assert_not_called()
self.reactor.advance(19)
mock_send_transaction.assert_not_called()
self.reactor.advance(10)
mock_send_transaction.assert_called_once()
json_cb = mock_send_transaction.call_args[0][1]
data = json_cb()
self.assertEqual(
data["edus"],
[
{
"edu_type": EduTypes.RECEIPT,
"content": {
"room_id": {
"m.read": {
"user_id": {
"event_ids": ["other_id"],
"data": {"ts": 1234},
}
}
}
},
}
],
)
| Send two receipts in quick succession; the second should be flushed, but
only after 20ms | 15 | 119 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_send_receipts_with_backoff(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
)
mock_send_transaction.return_value = make_awaitable({})
sender = self.hs.get_federation_sender()
receipt = ReadReceipt(
"room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
# expect a call to send_transaction
mock_send_transaction.assert_called_once()
json_cb = mock_send_transaction.call_args[0][1]
data = json_cb()
self.assertEqual(
data["edus"],
[
{
"edu_type": EduTypes.RECEIPT,
"content": {
"room_id": {
"m.read": {
"user_id": {
"event_ids": ["event_id"],
"data": {"ts": 1234},
}
}
}
},
}
],
)
mock_send_transaction.reset_mock()
# send the second RR
receipt = ReadReceipt(
"room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}
)
self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))
self.pump()
mock_send_transaction.assert_not_called()
self.reactor.advance(19)
mock_send_transaction.assert_not_called()
self.reactor.advance(10)
mock_send_transaction.assert_called_once()
json_cb = mock_send_transaction.call_args[0][1]
data = json_cb()
self.assertEqual(
data["edus"],
[
{
"edu_type": EduTypes.RECEIPT,
"content": {
"room_id": {
"m.read": {
"user_id": {
"event_ids": ["other_id"],
"data": {"ts": 1234},
}
}
}
},
}
],
)
```
###Assistant : Send two receipts in quick succession; the second should be flushed, but
only after 20ms
|
1,760 | def process_frame(self, processable_frame, processing_task):
frame = processable_frame.frame
token = None
cache = self.cache
sourcemaps = self.sourcemaps
all_errors = []
sourcemap_applied = False
# can't demangle if there's no filename or line number present
if not frame.get("abs_path") or not frame.get("lineno"):
return
# also can't demangle node's internal modules
# therefore we only process user-land frames (starting with /)
# or those created by bundle/webpack internals
if self.data.get("platform") == "node" and not frame.get("abs_path").startswith(
("/", "app:", "webpack:")
):
return
errors = cache.get_errors(frame["abs_path"])
if errors:
all_errors.extend(errors)
# This might fail but that's okay, we try with a different path a
# bit later down the road.
source = self.get_sourceview(frame["abs_path"])
in_app = None
new_frame = dict(frame)
raw_frame = dict(frame)
sourcemap_url, sourcemap_view = sourcemaps.get_link(frame["abs_path"])
self.sourcemaps_touched.add(sourcemap_url)
if sourcemap_view and frame.get("colno") is None:
all_errors.append(
{"type": EventError.JS_NO_COLUMN, "url": http.expose_url(frame["abs_path"])}
)
elif sourcemap_view:
if is_data_uri(sourcemap_url):
sourcemap_label = frame["abs_path"]
else:
sourcemap_label = sourcemap_url
sourcemap_label = http.expose_url(sourcemap_label)
if frame.get("function"):
minified_function_name = frame["function"]
minified_source = self.get_sourceview(frame["abs_path"])
else:
minified_function_name = minified_source = None
try:
# Errors are 1-indexed in the frames, so we need to -1 to get
# zero-indexed value from tokens.
assert frame["lineno"] > 0, "line numbers are 1-indexed"
token = sourcemap_view.lookup(
frame["lineno"] - 1, frame["colno"] - 1, minified_function_name, minified_source
)
except Exception:
token = None
all_errors.append(
{
"type": EventError.JS_INVALID_SOURCEMAP_LOCATION,
"column": frame.get("colno"),
"row": frame.get("lineno"),
"source": frame["abs_path"],
"sourcemap": sourcemap_label,
}
)
# persist the token so that we can find it later
processable_frame.data["token"] = token
# Store original data in annotation
new_frame["data"] = dict(frame.get("data") or {}, sourcemap=sourcemap_label)
sourcemap_applied = True
if token is not None:
abs_path = non_standard_url_join(sourcemap_url, token.src)
logger.debug(
"Mapping compressed source %r to mapping in %r", frame["abs_path"], abs_path
)
source = self.get_sourceview(abs_path)
if source is None:
errors = cache.get_errors(abs_path)
if errors:
all_errors.extend(errors)
else:
all_errors.append(
{"type": EventError.JS_MISSING_SOURCE, "url": http.expose_url(abs_path)}
)
# the tokens are zero indexed, so offset correctly
new_frame["lineno"] = token.src_line + 1
new_frame["colno"] = token.src_col + 1
# Try to use the function name we got from symbolic
original_function_name = token.function_name
# In the ideal case we can use the function name from the
# frame and the location to resolve the original name
# through the heuristics in our sourcemap library.
if original_function_name is None:
last_token = None
# Find the previous token for function name handling as a
# fallback.
if (
processable_frame.previous_frame
and processable_frame.previous_frame.processor is self
):
last_token = processable_frame.previous_frame.data.get("token")
if last_token:
original_function_name = last_token.name
if original_function_name is not None:
new_frame["function"] = original_function_name
filename = token.src
# special case webpack support
# abs_path will always be the full path with webpack:/// prefix.
# filename will be relative to that
if abs_path.startswith("webpack:"):
filename = abs_path
# webpack seems to use ~ to imply "relative to resolver root"
# which is generally seen for third party deps
# (i.e. node_modules)
if "/~/" in filename:
filename = "~/" + abs_path.split("/~/", 1)[-1]
elif WEBPACK_NAMESPACE_RE.match(filename):
filename = re.sub(WEBPACK_NAMESPACE_RE, "./", abs_path)
else:
filename = filename.split("webpack:///", 1)[-1]
# As noted above:
# * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies
# * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps
# * [node] and webpack, which includes it's own code to bootstrap all modules and its internals
# eg. webpack:///webpack/bootstrap, webpack:///external
if (
filename.startswith("~/")
or "/node_modules/" in filename
or not filename.startswith("./")
):
in_app = False
# And conversely, local dependencies start with './'
elif filename.startswith("./"):
in_app = True
# We want to explicitly generate a webpack module name
new_frame["module"] = generate_module(filename)
# while you could technically use a subpath of 'node_modules' for your libraries,
# it would be an extremely complicated decision and we've not seen anyone do it
# so instead we assume if node_modules is in the path its part of the vendored code
elif "/node_modules/" in abs_path:
in_app = False
if abs_path.startswith("app:"):
if filename and NODE_MODULES_RE.search(filename):
in_app = False
else:
in_app = True
new_frame["abs_path"] = abs_path
new_frame["filename"] = filename
if not frame.get("module") and abs_path.startswith(
("http:", "https:", "webpack:", "app:")
):
new_frame["module"] = generate_module(abs_path)
elif sourcemap_url:
new_frame["data"] = dict(
new_frame.get("data") or {}, sourcemap=http.expose_url(sourcemap_url)
)
# TODO: theoretically a minified source could point to
# another mapped, minified source
changed_frame = self.expand_frame(new_frame, source=source)
# If we did not manage to match but we do have a line or column
# we want to report an error here.
if not new_frame.get("context_line") and source and new_frame.get("colno") is not None:
all_errors.append(
{
"type": EventError.JS_INVALID_SOURCEMAP_LOCATION,
"column": new_frame["colno"],
"row": new_frame["lineno"],
"source": new_frame["abs_path"],
}
)
changed_raw = sourcemap_applied and self.expand_frame(raw_frame)
if sourcemap_applied or all_errors or changed_frame or changed_raw:
# In case we are done processing, we iterate over all errors that we got
# and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have
# a `context_line` we have a symbolicated frame and we don't need to show the error
has_context_line = bool(new_frame.get("context_line"))
if has_context_line:
all_errors[:] = [
x for x in all_errors if x.get("type") is not EventError.JS_MISSING_SOURCE
]
if in_app is not None:
new_frame["in_app"] = in_app
raw_frame["in_app"] = in_app
# Run new processor only for frames that were actually modified in any way.
if should_run_smcache(self) and new_frame != raw_frame:
smcache_rv = self.smcache_processor.process_frame(processable_frame, None)
set_path(new_frame, "data", "smcache_frame", value=smcache_rv[0][0])
new_frames = [new_frame]
raw_frames = [raw_frame] if changed_raw else None
return new_frames, raw_frames, all_errors
|
Attempt to demangle the given frame.
| 6 | 857 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def process_frame(self, processable_frame, processing_task):
frame = processable_frame.frame
token = None
cache = self.cache
sourcemaps = self.sourcemaps
all_errors = []
sourcemap_applied = False
# can't demangle if there's no filename or line number present
if not frame.get("abs_path") or not frame.get("lineno"):
return
# also can't demangle node's internal modules
# therefore we only process user-land frames (starting with /)
# or those created by bundle/webpack internals
if self.data.get("platform") == "node" and not frame.get("abs_path").startswith(
("/", "app:", "webpack:")
):
return
errors = cache.get_errors(frame["abs_path"])
if errors:
all_errors.extend(errors)
# This might fail but that's okay, we try with a different path a
# bit later down the road.
source = self.get_sourceview(frame["abs_path"])
in_app = None
new_frame = dict(frame)
raw_frame = dict(frame)
sourcemap_url, sourcemap_view = sourcemaps.get_link(frame["abs_path"])
self.sourcemaps_touched.add(sourcemap_url)
if sourcemap_view and frame.get("colno") is None:
all_errors.append(
{"type": EventError.JS_NO_COLUMN, "url": http.expose_url(frame["abs_path"])}
)
elif sourcemap_view:
if is_data_uri(sourcemap_url):
sourcemap_label = frame["abs_path"]
else:
sourcemap_label = sourcemap_url
sourcemap_label = http.expose_url(sourcemap_label)
if frame.get("function"):
minified_function_name = frame["function"]
minified_source = self.get_sourceview(frame["abs_path"])
else:
minified_function_name = minified_source = None
try:
# Errors are 1-indexed in the frames, so we need to -1 to get
# zero-indexed value from tokens.
assert frame["lineno"] > 0, "line numbers are 1-indexed"
token = sourcemap_view.lookup(
frame["lineno"] - 1, frame["colno"] - 1, minified_function_name, minified_source
)
except Exception:
token = None
all_errors.append(
{
"type": EventError.JS_INVALID_SOURCEMAP_LOCATION,
"column": frame.get("colno"),
"row": frame.get("lineno"),
"source": frame["abs_path"],
"sourcemap": sourcemap_label,
}
)
# persist the token so that we can find it later
processable_frame.data["token"] = token
# Store original data in annotation
new_frame["data"] = dict(frame.get("data") or {}, sourcemap=sourcemap_label)
sourcemap_applied = True
if token is not None:
abs_path = non_standard_url_join(sourcemap_url, token.src)
logger.debug(
"Mapping compressed source %r to mapping in %r", frame["abs_path"], abs_path
)
source = self.get_sourceview(abs_path)
if source is None:
errors = cache.get_errors(abs_path)
if errors:
all_errors.extend(errors)
else:
all_errors.append(
{"type": EventError.JS_MISSING_SOURCE, "url": http.expose_url(abs_path)}
)
# the tokens are zero indexed, so offset correctly
new_frame["lineno"] = token.src_line + 1
new_frame["colno"] = token.src_col + 1
# Try to use the function name we got from symbolic
original_function_name = token.function_name
# In the ideal case we can use the function name from the
# frame and the location to resolve the original name
# through the heuristics in our sourcemap library.
if original_function_name is None:
last_token = None
# Find the previous token for function name handling as a
# fallback.
if (
processable_frame.previous_frame
and processable_frame.previous_frame.processor is self
):
last_token = processable_frame.previous_frame.data.get("token")
if last_token:
original_function_name = last_token.name
if original_function_name is not None:
new_frame["function"] = original_function_name
filename = token.src
# special case webpack support
# abs_path will always be the full path with webpack:/// prefix.
# filename will be relative to that
if abs_path.startswith("webpack:"):
filename = abs_path
# webpack seems to use ~ to imply "relative to resolver root"
# which is generally seen for third party deps
# (i.e. node_modules)
if "/~/" in filename:
filename = "~/" + abs_path.split("/~/", 1)[-1]
elif WEBPACK_NAMESPACE_RE.match(filename):
filename = re.sub(WEBPACK_NAMESPACE_RE, "./", abs_path)
else:
filename = filename.split("webpack:///", 1)[-1]
# As noted above:
# * [js/node] '~/' means they're coming from node_modules, so these are not app dependencies
# * [node] sames goes for `./node_modules/` and '../node_modules/', which is used when bundling node apps
# * [node] and webpack, which includes it's own code to bootstrap all modules and its internals
# eg. webpack:///webpack/bootstrap, webpack:///external
if (
filename.startswith("~/")
or "/node_modules/" in filename
or not filename.startswith("./")
):
in_app = False
# And conversely, local dependencies start with './'
elif filename.startswith("./"):
in_app = True
# We want to explicitly generate a webpack module name
new_frame["module"] = generate_module(filename)
# while you could technically use a subpath of 'node_modules' for your libraries,
# it would be an extremely complicated decision and we've not seen anyone do it
# so instead we assume if node_modules is in the path its part of the vendored code
elif "/node_modules/" in abs_path:
in_app = False
if abs_path.startswith("app:"):
if filename and NODE_MODULES_RE.search(filename):
in_app = False
else:
in_app = True
new_frame["abs_path"] = abs_path
new_frame["filename"] = filename
if not frame.get("module") and abs_path.startswith(
("http:", "https:", "webpack:", "app:")
):
new_frame["module"] = generate_module(abs_path)
elif sourcemap_url:
new_frame["data"] = dict(
new_frame.get("data") or {}, sourcemap=http.expose_url(sourcemap_url)
)
# TODO: theoretically a minified source could point to
# another mapped, minified source
changed_frame = self.expand_frame(new_frame, source=source)
# If we did not manage to match but we do have a line or column
# we want to report an error here.
if not new_frame.get("context_line") and source and new_frame.get("colno") is not None:
all_errors.append(
{
"type": EventError.JS_INVALID_SOURCEMAP_LOCATION,
"column": new_frame["colno"],
"row": new_frame["lineno"],
"source": new_frame["abs_path"],
}
)
changed_raw = sourcemap_applied and self.expand_frame(raw_frame)
if sourcemap_applied or all_errors or changed_frame or changed_raw:
# In case we are done processing, we iterate over all errors that we got
# and we filter out all `JS_MISSING_SOURCE` errors since we consider if we have
# a `context_line` we have a symbolicated frame and we don't need to show the error
has_context_line = bool(new_frame.get("context_line"))
if has_context_line:
all_errors[:] = [
x for x in all_errors if x.get("type") is not EventError.JS_MISSING_SOURCE
]
if in_app is not None:
new_frame["in_app"] = in_app
raw_frame["in_app"] = in_app
# Run new processor only for frames that were actually modified in any way.
if should_run_smcache(self) and new_frame != raw_frame:
smcache_rv = self.smcache_processor.process_frame(processable_frame, None)
set_path(new_frame, "data", "smcache_frame", value=smcache_rv[0][0])
new_frames = [new_frame]
raw_frames = [raw_frame] if changed_raw else None
return new_frames, raw_frames, all_errors
```
###Assistant :
Attempt to demangle the given frame.
|
1,761 | def validate_csv(headers, fields, required_fields):
# Validate provided column headers
is_update = False
for field, to_field in headers.items():
if field == "id":
is_update = True
continue
if field not in fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields (if not an update)
if not is_update:
for f in required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
|
Validate that parsed csv data conforms to the object's available fields. Raise validation errors
if parsed csv data contains invalid headers or does not contain required headers.
| 27 | 95 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def validate_csv(headers, fields, required_fields):
# Validate provided column headers
is_update = False
for field, to_field in headers.items():
if field == "id":
is_update = True
continue
if field not in fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields (if not an update)
if not is_update:
for f in required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
```
###Assistant :
Validate that parsed csv data conforms to the object's available fields. Raise validation errors
if parsed csv data contains invalid headers or does not contain required headers.
|
1,762 | def bernoulli_poly(n, x=None, polys=False):
return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2),
lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys)
@public | Generates the Bernoulli polynomial of degree `n` in `x`.
Parameters
==========
n : int
Degree of the polynomial.
x : optional
polys : bool, optional
If True, return a Poly, otherwise (default) return an expression.
| 35 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def bernoulli_poly(n, x=None, polys=False):
return appell_poly(n, [[1], [1, QQ(-1,2)]], QQ(1,2),
lambda p, i: p * QQ(1<<(i-1), 1-(1<<i)), QQ, x, polys)
@public
```
###Assistant : Generates the Bernoulli polynomial of degree `n` in `x`.
Parameters
==========
n : int
Degree of the polynomial.
x : optional
polys : bool, optional
If True, return a Poly, otherwise (default) return an expression.
|
1,763 | def test_calculate_max_drawdown_abs(values, relative, result, result_rel):
dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))]
df = DataFrame(zip(values, dates), columns=['profit_abs', 'open_date'])
# sort by profit and reset index
df = df.sort_values('profit_abs').reset_index(drop=True)
df1 = df.copy()
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
df, date_col='open_date', starting_balance=1000, relative=relative)
# Ensure df has not been altered.
assert df.equals(df1)
assert isinstance(drawdown, float)
assert isinstance(drawdown_rel, float)
# High must be before low
assert hdate < ldate
# High value must be higher than low value
assert hval > lval
assert drawdown == result
assert pytest.approx(drawdown_rel) == result_rel
|
Test case from issue https://github.com/freqtrade/freqtrade/issues/6655
[1000, 500, 1000, 11000, 10000] # absolute results
[1000, 50%, 0%, 0%, ~9%] # Relative drawdowns
| 21 | 91 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_calculate_max_drawdown_abs(values, relative, result, result_rel):
dates = [Arrow(2020, 1, 1).shift(days=i) for i in range(len(values))]
df = DataFrame(zip(values, dates), columns=['profit_abs', 'open_date'])
# sort by profit and reset index
df = df.sort_values('profit_abs').reset_index(drop=True)
df1 = df.copy()
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
df, date_col='open_date', starting_balance=1000, relative=relative)
# Ensure df has not been altered.
assert df.equals(df1)
assert isinstance(drawdown, float)
assert isinstance(drawdown_rel, float)
# High must be before low
assert hdate < ldate
# High value must be higher than low value
assert hval > lval
assert drawdown == result
assert pytest.approx(drawdown_rel) == result_rel
```
###Assistant :
Test case from issue https://github.com/freqtrade/freqtrade/issues/6655
[1000, 500, 1000, 11000, 10000] # absolute results
[1000, 50%, 0%, 0%, ~9%] # Relative drawdowns
|
1,764 | def testOnCheckpointUnavailableAttribute(self):
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)
no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})
with patch.object(logger, "error") as log_error_mock:
checkpoint_manager.on_checkpoint(no_attr_checkpoint)
log_error_mock.assert_called_once()
# The newest checkpoint should still be set despite this error.
self.assertEqual(
checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint
)
|
Tests that an error is logged when the associated result of the
checkpoint has no checkpoint score attribute.
| 18 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def testOnCheckpointUnavailableAttribute(self):
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1)
no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {})
with patch.object(logger, "error") as log_error_mock:
checkpoint_manager.on_checkpoint(no_attr_checkpoint)
log_error_mock.assert_called_once()
# The newest checkpoint should still be set despite this error.
self.assertEqual(
checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint
)
```
###Assistant :
Tests that an error is logged when the associated result of the
checkpoint has no checkpoint score attribute.
|
1,765 | def subscription_app_status_changed_webhook(subscription_webhook):
return subscription_webhook(
APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,
WebhookEventAsyncType.APP_STATUS_CHANGED,
)
CATEGORY_CREATED_SUBSCRIPTION_QUERY =
@pytest.fixture |
subscription{
event{
...on CategoryCreated{
category{
id
}
}
}
}
| 10 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def subscription_app_status_changed_webhook(subscription_webhook):
return subscription_webhook(
APP_STATUS_CHANGED_SUBSCRIPTION_QUERY,
WebhookEventAsyncType.APP_STATUS_CHANGED,
)
CATEGORY_CREATED_SUBSCRIPTION_QUERY =
@pytest.fixture
```
###Assistant :
subscription{
event{
...on CategoryCreated{
category{
id
}
}
}
}
|
1,766 | def binary_op(self, op, right_frame, join_type="outer"):
left_parts, right_parts, joined_index, row_lengths = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._partition_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(
new_frame,
joined_index,
new_columns,
row_lengths,
column_widths=self._column_widths_cache,
)
|
Perform an operation that requires joining with another Modin DataFrame.
Parameters
----------
op : callable
Function to apply after the join.
right_frame : PandasDataframe
Modin DataFrame to join with.
join_type : str, default: "outer"
Type of join to apply.
Returns
-------
PandasDataframe
New Modin DataFrame.
| 45 | 49 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def binary_op(self, op, right_frame, join_type="outer"):
left_parts, right_parts, joined_index, row_lengths = self._copartition(
0, right_frame, join_type, sort=True
)
# unwrap list returned by `copartition`.
right_parts = right_parts[0]
new_frame = self._partition_mgr_cls.binary_operation(
1, left_parts, lambda l, r: op(l, r), right_parts
)
new_columns = self.columns.join(right_frame.columns, how=join_type)
return self.__constructor__(
new_frame,
joined_index,
new_columns,
row_lengths,
column_widths=self._column_widths_cache,
)
```
###Assistant :
Perform an operation that requires joining with another Modin DataFrame.
Parameters
----------
op : callable
Function to apply after the join.
right_frame : PandasDataframe
Modin DataFrame to join with.
join_type : str, default: "outer"
Type of join to apply.
Returns
-------
PandasDataframe
New Modin DataFrame.
|
1,767 | def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager |
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
| 58 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def rc_file(fname, *, use_default_template=True):
# Deprecation warnings were already handled in rc_params_from_file, no need
# to reemit them here.
with _api.suppress_matplotlib_deprecation_warning():
from .style.core import STYLE_BLACKLIST
rc_from_file = rc_params_from_file(
fname, use_default_template=use_default_template)
rcParams.update({k: rc_from_file[k] for k in rc_from_file
if k not in STYLE_BLACKLIST})
@contextlib.contextmanager
```
###Assistant :
Update `.rcParams` from file.
Style-blacklisted `.rcParams` (defined in
``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.
Parameters
----------
fname : str or path-like
A file with Matplotlib rc settings.
use_default_template : bool
If True, initialize with default parameters before updating with those
in the given file. If False, the current configuration persists
and only the parameters specified in the file are updated.
|
1,768 | def create_gloo_context(rank, world_size):
context = pygloo.rendezvous.Context(rank, world_size)
return context
| Create a GLOO context using GLOO APIs.
Args:
rank: the rank of this process.
world_size: the number of processes of this collective group.
Returns:
context (pygloo.Context): a GLOO context.
| 29 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def create_gloo_context(rank, world_size):
context = pygloo.rendezvous.Context(rank, world_size)
return context
```
###Assistant : Create a GLOO context using GLOO APIs.
Args:
rank: the rank of this process.
world_size: the number of processes of this collective group.
Returns:
context (pygloo.Context): a GLOO context.
|
1,769 | def require_bitsandbytes(test_case):
if not is_bitsandbytes_available():
return unittest.skip("test requires bnb")(test_case)
else:
return test_case
|
Decorator for bits and bytes (bnb) dependency
| 7 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def require_bitsandbytes(test_case):
if not is_bitsandbytes_available():
return unittest.skip("test requires bnb")(test_case)
else:
return test_case
```
###Assistant :
Decorator for bits and bytes (bnb) dependency
|
1,770 | def copy_func(f) -> Callable:
g = types.FunctionType(
f.__code__,
f.__globals__,
name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__,
)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
| Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277
Parameters
----------
f: Callable
Function to be copied
Returns
-------
g: Callable
New function
| 26 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def copy_func(f) -> Callable:
g = types.FunctionType(
f.__code__,
f.__globals__,
name=f.__name__,
argdefs=f.__defaults__,
closure=f.__closure__,
)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
```
###Assistant : Copies the contents and attributes of the entered function. Based on https://stackoverflow.com/a/13503277
Parameters
----------
f: Callable
Function to be copied
Returns
-------
g: Callable
New function
|
1,771 | def evaluate(self, expr, context):
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr]
else:
assert isinstance(expr, dict)
op = expr['op']
if op not in self.operations:
raise NotImplementedError('op not implemented: %s' % op)
elhs = expr['lhs']
erhs = expr['rhs']
if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
lhs = self.evaluate(elhs, context)
rhs = self.evaluate(erhs, context)
if ((elhs == 'python_version' or erhs == 'python_version') and
op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):
lhs = NV(lhs)
rhs = NV(rhs)
elif elhs == 'python_version' and op in ('in', 'not in'):
lhs = NV(lhs)
rhs = _get_versions(rhs)
result = self.operations[op](lhs, rhs)
return result
|
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
| 13 | 123 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def evaluate(self, expr, context):
if isinstance(expr, string_types):
if expr[0] in '\'"':
result = expr[1:-1]
else:
if expr not in context:
raise SyntaxError('unknown variable: %s' % expr)
result = context[expr]
else:
assert isinstance(expr, dict)
op = expr['op']
if op not in self.operations:
raise NotImplementedError('op not implemented: %s' % op)
elhs = expr['lhs']
erhs = expr['rhs']
if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
lhs = self.evaluate(elhs, context)
rhs = self.evaluate(erhs, context)
if ((elhs == 'python_version' or erhs == 'python_version') and
op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):
lhs = NV(lhs)
rhs = NV(rhs)
elif elhs == 'python_version' and op in ('in', 'not in'):
lhs = NV(lhs)
rhs = _get_versions(rhs)
result = self.operations[op](lhs, rhs)
return result
```
###Assistant :
Evaluate a marker expression returned by the :func:`parse_requirement`
function in the specified context.
|
1,772 | def track_tf_optimizer(tf_optimizer):
if tf.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
@keras_export("keras.__internal__.backend.track_variable", v1=[]) | Tracks the given TF optimizer for initialization of its variables. | 10 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def track_tf_optimizer(tf_optimizer):
if tf.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
@keras_export("keras.__internal__.backend.track_variable", v1=[])
```
###Assistant : Tracks the given TF optimizer for initialization of its variables.
|
1,773 | def _galois_group_degree_5(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.named_groups import (
CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup
)
# The ideas here are all the same as in the degree-4 method.
# The specific resolvents we use, and how we interpret the results, are
# adapted to the degree-5 case.
X = symbols('X0 X1 X2 X3 X4')
# For the first resolvent, we have G = S5,
# and stabilizer H = M20 = < (01234), (1234) >.
F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3])
+ X[1]**2*(X[2]*X[0] + X[3]*X[4])
+ X[2]**2*(X[3]*X[1] + X[4]*X[0])
+ X[3]**2*(X[4]*X[2] + X[0]*X[1])
+ X[4]**2*(X[0]*X[3] + X[1]*X[2]))
s1 = [
Permutation(4),
Permutation(4)(0, 1),
Permutation(4)(0, 2),
Permutation(4)(0, 3),
Permutation(4)(0, 4),
Permutation(4)(1, 4)
]
R1 = Resolvent(F1, X, s1)
# For the second resolvent, we'll have G = D5, H = C5.
F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2
s2_pre = [
Permutation(4),
Permutation(4)(0, 1)(2, 4)
]
history = set()
for i in range(max_tries):
if i > 0:
_, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize)
R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True)
if not dup_sqf_p(R_dup, ZZ):
continue
sq_disc = has_square_disc(T)
if i0 is None:
return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False)
if not sq_disc:
return (M20(), False)
sigma = s1[i0]
F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True)
s2 = [sigma*tau*sigma for tau in s2_pre]
R2 = Resolvent(F2, X, s2)
R_dup, _, _ = R2.eval_for_poly(T)
d = dup_discriminant(R_dup, ZZ)
if d == 0:
continue
if is_square(d):
return (CyclicGroup(5), True)
else:
return (DihedralGroup(5), True)
raise MaxTriesException
|
Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9
of Cohen.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.
| 28 | 247 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _galois_group_degree_5(T, max_tries=30, randomize=False):
r
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.named_groups import (
CyclicGroup, DihedralGroup, AlternatingGroup, SymmetricGroup
)
# The ideas here are all the same as in the degree-4 method.
# The specific resolvents we use, and how we interpret the results, are
# adapted to the degree-5 case.
X = symbols('X0 X1 X2 X3 X4')
# For the first resolvent, we have G = S5,
# and stabilizer H = M20 = < (01234), (1234) >.
F1 = (X[0]**2*(X[1]*X[4] + X[2]*X[3])
+ X[1]**2*(X[2]*X[0] + X[3]*X[4])
+ X[2]**2*(X[3]*X[1] + X[4]*X[0])
+ X[3]**2*(X[4]*X[2] + X[0]*X[1])
+ X[4]**2*(X[0]*X[3] + X[1]*X[2]))
s1 = [
Permutation(4),
Permutation(4)(0, 1),
Permutation(4)(0, 2),
Permutation(4)(0, 3),
Permutation(4)(0, 4),
Permutation(4)(1, 4)
]
R1 = Resolvent(F1, X, s1)
# For the second resolvent, we'll have G = D5, H = C5.
F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2
s2_pre = [
Permutation(4),
Permutation(4)(0, 1)(2, 4)
]
history = set()
for i in range(max_tries):
if i > 0:
_, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize)
R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True)
if not dup_sqf_p(R_dup, ZZ):
continue
sq_disc = has_square_disc(T)
if i0 is None:
return (AlternatingGroup(5), True) if sq_disc else (SymmetricGroup(5), False)
if not sq_disc:
return (M20(), False)
sigma = s1[i0]
F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True)
s2 = [sigma*tau*sigma for tau in s2_pre]
R2 = Resolvent(F2, X, s2)
R_dup, _, _ = R2.eval_for_poly(T)
d = dup_discriminant(R_dup, ZZ)
if d == 0:
continue
if is_square(d):
return (CyclicGroup(5), True)
else:
return (DihedralGroup(5), True)
raise MaxTriesException
```
###Assistant :
Compute the Galois group of a polynomial of degree 5, following Alg 6.3.9
of Cohen.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.
|
1,774 | def error(self, message):
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
| error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
| 33 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def error(self, message):
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
```
###Assistant : error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
|
1,775 | def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export("keras.applications.regnet.decode_predictions") | A placeholder method for backward compatibility.
The preprocessing logic has been included in the regnet model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it, it
defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
| 95 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export("keras.applications.regnet.decode_predictions")
```
###Assistant : A placeholder method for backward compatibility.
The preprocessing logic has been included in the regnet model
implementation. Users are no longer required to call this method to normalize
the input data. This method does nothing and only kept as a placeholder to
align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. Defaults to
None, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it, it
defaults to "channels_last").{mode}
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
|
1,776 | def test_edit_post(self):
# Send request
response = self.client.post(
reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)),
{
"doc-%d-%s" % (self.doc.id, field): data
for field, data in self.edit_post_data.items()
},
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn("doc_id", response_json)
self.assertNotIn("form", response_json)
self.assertIn("success", response_json)
self.assertEqual(response_json["doc_id"], self.doc.id)
self.assertTrue(response_json["success"])
self.check_doc_after_edit()
|
This tests that a POST request to the edit view edits the document
| 13 | 46 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_edit_post(self):
# Send request
response = self.client.post(
reverse("wagtaildocs:edit_multiple", args=(self.doc.id,)),
{
"doc-%d-%s" % (self.doc.id, field): data
for field, data in self.edit_post_data.items()
},
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn("doc_id", response_json)
self.assertNotIn("form", response_json)
self.assertIn("success", response_json)
self.assertEqual(response_json["doc_id"], self.doc.id)
self.assertTrue(response_json["success"])
self.check_doc_after_edit()
```
###Assistant :
This tests that a POST request to the edit view edits the document
|
1,777 | def test_message_hiding(qtbot, view):
with qtbot.wait_signal(view._clear_timer.timeout):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
assert not view._messages
| Messages should be hidden after the timer times out. | 9 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_message_hiding(qtbot, view):
with qtbot.wait_signal(view._clear_timer.timeout):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test'))
assert not view._messages
```
###Assistant : Messages should be hidden after the timer times out.
|
1,778 | def test_avatar_allowed_mime_type_per_room(self):
self._setup_local_files(
{
"good": {"mimetype": "image/png"},
"bad": {"mimetype": "application/octet-stream"},
}
)
room_id = self.helper.create_room_as(tok=self.owner_tok)
channel = self.make_request(
"PUT",
f"/rooms/{room_id}/state/m.room.member/{self.owner}",
content={"membership": "join", "avatar_url": "mxc://test/bad"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 403, channel.result)
self.assertEqual(
channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
)
channel = self.make_request(
"PUT",
f"/rooms/{room_id}/state/m.room.member/{self.owner}",
content={"membership": "join", "avatar_url": "mxc://test/good"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
| Tests that the MIME type whitelist for avatars is enforced when updating a
per-room profile.
| 15 | 48 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_avatar_allowed_mime_type_per_room(self):
self._setup_local_files(
{
"good": {"mimetype": "image/png"},
"bad": {"mimetype": "application/octet-stream"},
}
)
room_id = self.helper.create_room_as(tok=self.owner_tok)
channel = self.make_request(
"PUT",
f"/rooms/{room_id}/state/m.room.member/{self.owner}",
content={"membership": "join", "avatar_url": "mxc://test/bad"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 403, channel.result)
self.assertEqual(
channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body
)
channel = self.make_request(
"PUT",
f"/rooms/{room_id}/state/m.room.member/{self.owner}",
content={"membership": "join", "avatar_url": "mxc://test/good"},
access_token=self.owner_tok,
)
self.assertEqual(channel.code, 200, channel.result)
```
###Assistant : Tests that the MIME type whitelist for avatars is enforced when updating a
per-room profile.
|
1,779 | def container_name(self) -> t.Optional[str]:
return self.state.get('container_name')
| Return the stored container name, if any, otherwise None. | 9 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def container_name(self) -> t.Optional[str]:
return self.state.get('container_name')
```
###Assistant : Return the stored container name, if any, otherwise None.
|
1,780 | def get_scorer(scoring):
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
"Use sklearn.metrics.get_scorer_names() "
"to get valid options." % scoring
)
else:
scorer = scoring
return scorer
| Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
:func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
of all available scorers.
Parameters
----------
scoring : str or callable
Scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
Notes
-----
When passed a string, this function always returns a copy of the scorer
object. Calling `get_scorer` twice for the same scorer results in two
separate scorer objects.
| 78 | 37 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_scorer(scoring):
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
"Use sklearn.metrics.get_scorer_names() "
"to get valid options." % scoring
)
else:
scorer = scoring
return scorer
```
###Assistant : Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
:func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
of all available scorers.
Parameters
----------
scoring : str or callable
Scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
Notes
-----
When passed a string, this function always returns a copy of the scorer
object. Calling `get_scorer` twice for the same scorer results in two
separate scorer objects.
|
1,781 | def fit(self, X, y):
X, y = self._validate_data(
X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError(
"The number of samples must be more than the number of classes."
)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == "svd":
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported")
if self.covariance_estimator is not None:
raise ValueError(
"covariance estimator "
"is not supported "
"with svd solver. Try another solver"
)
self._solve_svd(X, y)
elif self.solver == "lsqr":
self._solve_lsqr(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
elif self.solver == "eigen":
self._solve_eigen(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
else:
raise ValueError(
"unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver)
)
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(
self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype
)
self.intercept_ = np.array(
self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype
)
self._n_features_out = self._max_components
return self
| Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
| 52 | 249 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fit(self, X, y):
X, y = self._validate_data(
X, y, ensure_min_samples=2, dtype=[np.float64, np.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError(
"The number of samples must be more than the number of classes."
)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == "svd":
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported")
if self.covariance_estimator is not None:
raise ValueError(
"covariance estimator "
"is not supported "
"with svd solver. Try another solver"
)
self._solve_svd(X, y)
elif self.solver == "lsqr":
self._solve_lsqr(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
elif self.solver == "eigen":
self._solve_eigen(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
else:
raise ValueError(
"unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver)
)
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(
self.coef_[1, :] - self.coef_[0, :], ndmin=2, dtype=X.dtype
)
self.intercept_ = np.array(
self.intercept_[1] - self.intercept_[0], ndmin=1, dtype=X.dtype
)
self._n_features_out = self._max_components
return self
```
###Assistant : Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
|
1,782 | def test_pr_opened_with_multiple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message =
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
| [hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~ | 25 | 20 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_pr_opened_with_multiple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message =
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
```
###Assistant : [hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~
|
1,783 | def make_grouping_by_key(schema, source, default=None):
return map_grouping(lambda s: source.get(s, default), schema)
|
Create a grouping from a schema by using the schema's scalar values to look up
items in the provided source object.
:param schema: A grouping of potential keys in source
:param source: Dict-like object to use to look up scalar grouping value using
scalar grouping values as keys
:param default: Default scalar value to use if grouping scalar key is not present
in source
:return: grouping
| 66 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def make_grouping_by_key(schema, source, default=None):
return map_grouping(lambda s: source.get(s, default), schema)
```
###Assistant :
Create a grouping from a schema by using the schema's scalar values to look up
items in the provided source object.
:param schema: A grouping of potential keys in source
:param source: Dict-like object to use to look up scalar grouping value using
scalar grouping values as keys
:param default: Default scalar value to use if grouping scalar key is not present
in source
:return: grouping
|
1,784 | def taxicab_distance(self, p):
s, p = Point._normalize_dimension(self, Point(p))
return Add(*(abs(a - b) for a, b in zip(s, p)))
| The Taxicab Distance from self to point p.
Returns the sum of the horizontal and vertical distances to point p.
Parameters
==========
p : Point
Returns
=======
taxicab_distance : The sum of the horizontal
and vertical distances to point p.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy import Point
>>> p1, p2 = Point(1, 1), Point(4, 5)
>>> p1.taxicab_distance(p2)
7
| 62 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def taxicab_distance(self, p):
s, p = Point._normalize_dimension(self, Point(p))
return Add(*(abs(a - b) for a, b in zip(s, p)))
```
###Assistant : The Taxicab Distance from self to point p.
Returns the sum of the horizontal and vertical distances to point p.
Parameters
==========
p : Point
Returns
=======
taxicab_distance : The sum of the horizontal
and vertical distances to point p.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy import Point
>>> p1, p2 = Point(1, 1), Point(4, 5)
>>> p1.taxicab_distance(p2)
7
|
1,785 | def cosine_similarity(y_true, y_pred, axis=-1):
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
return tf.reduce_sum(y_true * y_pred, axis=axis)
| Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value.
| 36 | 17 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cosine_similarity(y_true, y_pred, axis=-1):
y_true = tf.linalg.l2_normalize(y_true, axis=axis)
y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
return tf.reduce_sum(y_true * y_pred, axis=axis)
```
###Assistant : Computes the cosine similarity between labels and predictions.
Args:
y_true: The ground truth values.
y_pred: The prediction values.
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
Returns:
Cosine similarity value.
|
1,786 | def rotate(self, theta):
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b * yx
mtx[0, 1] = a * xy - b * yy
mtx[0, 2] = a * x0 - b * y0
mtx[1, 0] = b * xx + a * yx
mtx[1, 1] = b * xy + a * yy
mtx[1, 2] = b * x0 + a * y0
self.invalidate()
return self
|
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
| 28 | 110 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def rotate(self, theta):
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b * yx
mtx[0, 1] = a * xy - b * yy
mtx[0, 2] = a * x0 - b * y0
mtx[1, 0] = b * xx + a * yx
mtx[1, 1] = b * xy + a * yy
mtx[1, 2] = b * x0 + a * y0
self.invalidate()
return self
```
###Assistant :
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
|
1,787 | def call_load(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description=,
)
parser.add_argument(
"-c",
"--coin",
help="Coin to get. Must be coin symbol (e.g., btc, eth)",
dest="coin",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the crypto",
)
parser.add_argument(
"--exchange",
help="Exchange to search",
dest="exchange",
type=str,
default="binance",
choices=self.exchanges,
)
parser.add_argument(
"-e",
"--end",
type=valid_date,
default=datetime.now().strftime("%Y-%m-%d"),
dest="end",
help="The ending date (format YYYY-MM-DD) of the crypto",
)
parser.add_argument(
"-i",
"--interval",
action="store",
dest="interval",
type=str,
default="1440",
choices=["1", "5", "15", "30", "60", "240", "1440", "10080", "43200"],
help="The interval of the crypto",
)
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise", # noqa
dest="vs",
default="usdt",
type=str,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.source in ("YahooFinance", "CoinGecko"):
if ns_parser.vs == "usdt":
ns_parser.vs = "usd"
(self.current_df) = cryptocurrency_helpers.load(
symbol=ns_parser.coin.lower(),
vs_currency=ns_parser.vs,
end_date=ns_parser.end.strftime("%Y-%m-%d"),
start_date=ns_parser.start.strftime("%Y-%m-%d"),
interval=ns_parser.interval,
source=ns_parser.source,
exchange=ns_parser.exchange,
)
if not self.current_df.empty:
self.vs = ns_parser.vs
self.exchange = ns_parser.exchange
self.source = ns_parser.source
self.current_interval = ns_parser.interval
self.current_currency = ns_parser.vs
self.symbol = ns_parser.coin.lower()
cryptocurrency_helpers.show_quick_performance(
self.current_df,
self.symbol,
self.current_currency,
ns_parser.source,
ns_parser.exchange,
self.current_interval,
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
"load",
self.current_df.copy(),
)
| Process load command.Load crypto currency to perform analysis on.
Yahoo Finance is used as default source.
Other sources can be used such as 'ccxt' or 'cg' with --source.
If you select 'ccxt', you can then select any exchange with --exchange.
You can also select a specific interval with --interval. | 49 | 198 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def call_load(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description=,
)
parser.add_argument(
"-c",
"--coin",
help="Coin to get. Must be coin symbol (e.g., btc, eth)",
dest="coin",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=1100)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the crypto",
)
parser.add_argument(
"--exchange",
help="Exchange to search",
dest="exchange",
type=str,
default="binance",
choices=self.exchanges,
)
parser.add_argument(
"-e",
"--end",
type=valid_date,
default=datetime.now().strftime("%Y-%m-%d"),
dest="end",
help="The ending date (format YYYY-MM-DD) of the crypto",
)
parser.add_argument(
"-i",
"--interval",
action="store",
dest="interval",
type=str,
default="1440",
choices=["1", "5", "15", "30", "60", "240", "1440", "10080", "43200"],
help="The interval of the crypto",
)
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise", # noqa
dest="vs",
default="usdt",
type=str,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = self.parse_known_args_and_warn(
parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.source in ("YahooFinance", "CoinGecko"):
if ns_parser.vs == "usdt":
ns_parser.vs = "usd"
(self.current_df) = cryptocurrency_helpers.load(
symbol=ns_parser.coin.lower(),
vs_currency=ns_parser.vs,
end_date=ns_parser.end.strftime("%Y-%m-%d"),
start_date=ns_parser.start.strftime("%Y-%m-%d"),
interval=ns_parser.interval,
source=ns_parser.source,
exchange=ns_parser.exchange,
)
if not self.current_df.empty:
self.vs = ns_parser.vs
self.exchange = ns_parser.exchange
self.source = ns_parser.source
self.current_interval = ns_parser.interval
self.current_currency = ns_parser.vs
self.symbol = ns_parser.coin.lower()
cryptocurrency_helpers.show_quick_performance(
self.current_df,
self.symbol,
self.current_currency,
ns_parser.source,
ns_parser.exchange,
self.current_interval,
)
export_data(
ns_parser.export,
os.path.dirname(os.path.abspath(__file__)),
"load",
self.current_df.copy(),
)
```
###Assistant : Process load command.Load crypto currency to perform analysis on.
Yahoo Finance is used as default source.
Other sources can be used such as 'ccxt' or 'cg' with --source.
If you select 'ccxt', you can then select any exchange with --exchange.
You can also select a specific interval with --interval.
|
1,788 | def render_markdown(value):
schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)
# Strip HTML tags
value = strip_tags(value)
# Sanitize Markdown links
pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)'
value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE)
# Sanitize Markdown reference links
pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)'
value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE)
# Render Markdown
html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()])
# If the string is not empty wrap it in rendered-markdown to style tables
if html:
html = f'<div class="rendered-markdown">{html}</div>'
return mark_safe(html)
@register.filter('json') |
Render a string as Markdown. This filter is invoked as "markdown":
{{ md_source_text|markdown }}
| 14 | 72 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def render_markdown(value):
schemes = '|'.join(get_config().ALLOWED_URL_SCHEMES)
# Strip HTML tags
value = strip_tags(value)
# Sanitize Markdown links
pattern = fr'\[([^\]]+)\]\((?!({schemes})).*:(.+)\)'
value = re.sub(pattern, '[\\1](\\3)', value, flags=re.IGNORECASE)
# Sanitize Markdown reference links
pattern = fr'\[(.+)\]:\s*(?!({schemes}))\w*:(.+)'
value = re.sub(pattern, '[\\1]: \\3', value, flags=re.IGNORECASE)
# Render Markdown
html = markdown(value, extensions=['fenced_code', 'tables', StrikethroughExtension()])
# If the string is not empty wrap it in rendered-markdown to style tables
if html:
html = f'<div class="rendered-markdown">{html}</div>'
return mark_safe(html)
@register.filter('json')
```
###Assistant :
Render a string as Markdown. This filter is invoked as "markdown":
{{ md_source_text|markdown }}
|
1,789 | def _try_breadth_first(tasks, user):
tasks = tasks.annotate(annotations_count=Count('annotations'))
max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max']
if max_annotations_count == 0:
# there is no any labeled tasks found
return
# find any task with maximal amount of created annotations
not_solved_tasks_labeling_started = tasks.annotate(
reach_max_annotations_count=Case(
When(annotations_count=max_annotations_count, then=Value(True)),
default=Value(False),
output_field=BooleanField(),
)
)
not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter(
reach_max_annotations_count=True
)
if not_solved_tasks_labeling_with_max_annotations.exists():
# try to complete tasks that are already in progress
return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user)
| Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible
| 20 | 62 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _try_breadth_first(tasks, user):
tasks = tasks.annotate(annotations_count=Count('annotations'))
max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max']
if max_annotations_count == 0:
# there is no any labeled tasks found
return
# find any task with maximal amount of created annotations
not_solved_tasks_labeling_started = tasks.annotate(
reach_max_annotations_count=Case(
When(annotations_count=max_annotations_count, then=Value(True)),
default=Value(False),
output_field=BooleanField(),
)
)
not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter(
reach_max_annotations_count=True
)
if not_solved_tasks_labeling_with_max_annotations.exists():
# try to complete tasks that are already in progress
return _get_random_unlocked(not_solved_tasks_labeling_with_max_annotations, user)
```
###Assistant : Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible
|
1,790 | def test_pad_batch_dynamic_max(self):
view_requirements = {
"state_in_0": ViewRequirement(
"state_out_0",
shift=[-1],
used_for_training=False,
used_for_compute_actions=True,
batch_repeat_value=1,
)
}
max_seq_len = 20
num_seqs = np.random.randint(1, 20)
seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs))
max_len = np.max(seq_lens)
sum_seq_lens = np.sum(seq_lens)
s1 = SampleBatch(
{
"a": np.arange(sum_seq_lens),
"b": np.arange(sum_seq_lens),
"seq_lens": seq_lens,
"state_in_0": [[0]] * num_seqs,
},
_max_seq_len=max_seq_len,
)
pad_batch_to_sequences_of_same_size(
s1,
max_seq_len=max_seq_len,
feature_keys=["a", "b"],
view_requirements=view_requirements,
)
check(s1.max_seq_len, max_len)
check(s1["a"].shape[0], max_len * num_seqs)
check(s1["b"].shape[0], max_len * num_seqs)
| Test pad_batch_to_sequences_of_same_size when dynamic_max = True | 6 | 66 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_pad_batch_dynamic_max(self):
view_requirements = {
"state_in_0": ViewRequirement(
"state_out_0",
shift=[-1],
used_for_training=False,
used_for_compute_actions=True,
batch_repeat_value=1,
)
}
max_seq_len = 20
num_seqs = np.random.randint(1, 20)
seq_lens = np.random.randint(1, max_seq_len, size=(num_seqs))
max_len = np.max(seq_lens)
sum_seq_lens = np.sum(seq_lens)
s1 = SampleBatch(
{
"a": np.arange(sum_seq_lens),
"b": np.arange(sum_seq_lens),
"seq_lens": seq_lens,
"state_in_0": [[0]] * num_seqs,
},
_max_seq_len=max_seq_len,
)
pad_batch_to_sequences_of_same_size(
s1,
max_seq_len=max_seq_len,
feature_keys=["a", "b"],
view_requirements=view_requirements,
)
check(s1.max_seq_len, max_len)
check(s1["a"].shape[0], max_len * num_seqs)
check(s1["b"].shape[0], max_len * num_seqs)
```
###Assistant : Test pad_batch_to_sequences_of_same_size when dynamic_max = True
|
1,791 | def sensors_fans():
ret = collections.defaultdict(list)
basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
if not basenames:
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
basenames = sorted(set([x.split('_')[0] for x in basenames]))
for base in basenames:
try:
current = int(bcat(base + '_input'))
except (IOError, OSError) as err:
debug(err)
continue
unit_name = cat(os.path.join(os.path.dirname(base), 'name'))
label = cat(base + '_label', fallback='')
ret[unit_name].append(_common.sfan(label, current))
return dict(ret)
| Return hardware fans info (for CPU and other peripherals) as a
dict including hardware label and current speed.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
| 54 | 61 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def sensors_fans():
ret = collections.defaultdict(list)
basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
if not basenames:
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
basenames = sorted(set([x.split('_')[0] for x in basenames]))
for base in basenames:
try:
current = int(bcat(base + '_input'))
except (IOError, OSError) as err:
debug(err)
continue
unit_name = cat(os.path.join(os.path.dirname(base), 'name'))
label = cat(base + '_label', fallback='')
ret[unit_name].append(_common.sfan(label, current))
return dict(ret)
```
###Assistant : Return hardware fans info (for CPU and other peripherals) as a
dict including hardware label and current speed.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
|
1,792 | def factory(cls, loader):
cls.__check_eager_loader(loader)
return lambda *args, **kwargs: cls(loader(*args, **kwargs))
| Construct a callable which returns the eager loader made lazy. | 10 | 10 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def factory(cls, loader):
cls.__check_eager_loader(loader)
return lambda *args, **kwargs: cls(loader(*args, **kwargs))
```
###Assistant : Construct a callable which returns the eager loader made lazy.
|
1,793 | def test_table_block_caption_render(self):
value = {
"table_caption": "caption",
"first_row_is_table_header": False,
"first_col_is_header": False,
"data": [
["Test 1", "Test 2", "Test 3"],
[None, None, None],
[None, None, None],
],
}
block = TableBlock()
result = block.render(value)
expected =
self.assertHTMLEqual(result, expected)
self.assertIn("Test 2", result)
|
Test a generic render with caption.
<table>
<caption>caption</caption>
<tbody>
<tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
| 17 | 40 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_table_block_caption_render(self):
value = {
"table_caption": "caption",
"first_row_is_table_header": False,
"first_col_is_header": False,
"data": [
["Test 1", "Test 2", "Test 3"],
[None, None, None],
[None, None, None],
],
}
block = TableBlock()
result = block.render(value)
expected =
self.assertHTMLEqual(result, expected)
self.assertIn("Test 2", result)
```
###Assistant :
Test a generic render with caption.
<table>
<caption>caption</caption>
<tbody>
<tr><td>Test 1</td><td>Test 2</td><td>Test 3</td></tr>
<tr><td></td><td></td><td></td></tr>
<tr><td></td><td></td><td></td></tr>
</tbody>
</table>
|
1,794 | def stream_config_without_start_date():
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"is_sandbox": False,
"wait_timeout": 15,
}
| Generates streams settings for REST logic without start_date | 8 | 15 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def stream_config_without_start_date():
return {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"is_sandbox": False,
"wait_timeout": 15,
}
```
###Assistant : Generates streams settings for REST logic without start_date
|
1,795 | async def relay(self):
while True:
message = await self.queue.get()
try:
await self.send(message)
self.queue.task_done()
except RuntimeError:
# The connection was closed, just exit the task
return
|
Relay messages from the channel's queue and send them out. This is started
as a task.
| 16 | 25 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def relay(self):
while True:
message = await self.queue.get()
try:
await self.send(message)
self.queue.task_done()
except RuntimeError:
# The connection was closed, just exit the task
return
```
###Assistant :
Relay messages from the channel's queue and send them out. This is started
as a task.
|
1,796 | def q_sample(self, x_start, t, noise=None):
if noise is None:
# noise = th.randn_like(x_start)
noise = paddle.randn(x_start.shape, x_start.dtype)
assert noise.shape == x_start.shape
return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
| 52 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def q_sample(self, x_start, t, noise=None):
if noise is None:
# noise = th.randn_like(x_start)
noise = paddle.randn(x_start.shape, x_start.dtype)
assert noise.shape == x_start.shape
return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
```
###Assistant :
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
|
1,797 | def get_views(self):
query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"
result = self.run_native_query(query)
return result
|
List all views in PostgreSQL without the system views information_schema and pg_catalog
| 12 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_views(self):
query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')"
result = self.run_native_query(query)
return result
```
###Assistant :
List all views in PostgreSQL without the system views information_schema and pg_catalog
|
1,798 | def verify_ogr_field(self, ogr_field, model_field):
if isinstance(ogr_field, OFTString) and isinstance(
model_field, (models.CharField, models.TextField)
):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_str(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if (
model_field.max_length
and val is not None
and len(val) > model_field.max_length
):
raise InvalidString(
"%s model field maximum string length is %s, given %s characters."
% (model_field.name, model_field.max_length, len(val))
)
elif isinstance(ogr_field, OFTReal) and isinstance(
model_field, models.DecimalField
):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal(
"Could not construct decimal from: %s" % ogr_field.value
)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
"A DecimalField with max_digits %d, decimal_places %d must "
"round to an absolute value less than 10^%d."
% (model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(
model_field, models.IntegerField
):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger(
"Could not construct integer from: %s" % ogr_field.value
)
else:
val = ogr_field.value
return val
|
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
| 23 | 274 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def verify_ogr_field(self, ogr_field, model_field):
if isinstance(ogr_field, OFTString) and isinstance(
model_field, (models.CharField, models.TextField)
):
if self.encoding and ogr_field.value is not None:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_str(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if (
model_field.max_length
and val is not None
and len(val) > model_field.max_length
):
raise InvalidString(
"%s model field maximum string length is %s, given %s characters."
% (model_field.name, model_field.max_length, len(val))
)
elif isinstance(ogr_field, OFTReal) and isinstance(
model_field, models.DecimalField
):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal(
"Could not construct decimal from: %s" % ogr_field.value
)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
"A DecimalField with max_digits %d, decimal_places %d must "
"round to an absolute value less than 10^%d."
% (model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(
model_field, models.IntegerField
):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger(
"Could not construct integer from: %s" % ogr_field.value
)
else:
val = ogr_field.value
return val
```
###Assistant :
Verify if the OGR Field contents are acceptable to the model field. If
they are, return the verified value, otherwise raise an exception.
|
1,799 | def get_streamer():
if 'JINA_STREAMER_ARGS' in os.environ:
args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])
return GatewayStreamer(**args_dict)
else:
raise OSError('JINA_STREAMER_ARGS environment variable is not set')
|
Return a streamer object based on the current environment context.
The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable.
If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will
raise an error.
The streamer object does not have tracing/instrumentation capabilities.
:return: Returns an instance of `GatewayStreamer`
| 58 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_streamer():
if 'JINA_STREAMER_ARGS' in os.environ:
args_dict = json.loads(os.environ['JINA_STREAMER_ARGS'])
return GatewayStreamer(**args_dict)
else:
raise OSError('JINA_STREAMER_ARGS environment variable is not set')
```
###Assistant :
Return a streamer object based on the current environment context.
The streamer object is contructed using runtime arguments stored in the `JINA_STREAMER_ARGS` environment variable.
If this method is used outside a Jina context (process not controlled/orchestrated by jina), this method will
raise an error.
The streamer object does not have tracing/instrumentation capabilities.
:return: Returns an instance of `GatewayStreamer`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.