index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
64,588 |
humanize.number
|
fractional
|
Convert to fractional number.
There will be some cases where one might not want to show ugly decimal places for
floats and decimals.
This function returns a human-readable fractional number in form of fractions and
mixed fractions.
Pass in a string, or a number or a float, and this function returns:
* a string representation of a fraction
* or a whole number
* or a mixed fraction
* or the str output of the value, if it could not be converted
Examples:
```pycon
>>> fractional(0.3)
'3/10'
>>> fractional(1.3)
'1 3/10'
>>> fractional(float(1/3))
'1/3'
>>> fractional(1)
'1'
>>> fractional("ten")
'ten'
>>> fractional(None)
'None'
```
Args:
value (int, float, str): Integer to convert.
Returns:
str: Fractional number as a string.
|
def fractional(value: NumberOrString) -> str:
"""Convert to fractional number.
There will be some cases where one might not want to show ugly decimal places for
floats and decimals.
This function returns a human-readable fractional number in form of fractions and
mixed fractions.
Pass in a string, or a number or a float, and this function returns:
* a string representation of a fraction
* or a whole number
* or a mixed fraction
* or the str output of the value, if it could not be converted
Examples:
```pycon
>>> fractional(0.3)
'3/10'
>>> fractional(1.3)
'1 3/10'
>>> fractional(float(1/3))
'1/3'
>>> fractional(1)
'1'
>>> fractional("ten")
'ten'
>>> fractional(None)
'None'
```
Args:
value (int, float, str): Integer to convert.
Returns:
str: Fractional number as a string.
"""
try:
number = float(value)
if not math.isfinite(number):
return _format_not_finite(number)
except (TypeError, ValueError):
return str(value)
whole_number = int(number)
frac = Fraction(number - whole_number).limit_denominator(1000)
numerator = frac.numerator
denominator = frac.denominator
if whole_number and not numerator and denominator == 1:
# this means that an integer was passed in
# (or variants of that integer like 1.0000)
return f"{whole_number:.0f}"
if not whole_number:
return f"{numerator:.0f}/{denominator:.0f}"
return f"{whole_number:.0f} {numerator:.0f}/{denominator:.0f}"
|
(value: float | str) -> str
|
64,591 |
humanize.number
|
intcomma
|
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes "3,000" and 45000 becomes "45,000". To maintain some
compatibility with Django's `intcomma`, this function also accepts floats.
Examples:
```pycon
>>> intcomma(100)
'100'
>>> intcomma("1000")
'1,000'
>>> intcomma(1_000_000)
'1,000,000'
>>> intcomma(1_234_567.25)
'1,234,567.25'
>>> intcomma(1234.5454545, 2)
'1,234.55'
>>> intcomma(14308.40, 1)
'14,308.4'
>>> intcomma("14308.40", 1)
'14,308.4'
>>> intcomma(None)
'None'
```
Args:
value (int, float, str): Integer or float to convert.
ndigits (int, None): Digits of precision for rounding after the decimal point.
Returns:
str: String containing commas every three digits.
|
def intcomma(value: NumberOrString, ndigits: int | None = None) -> str:
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes "3,000" and 45000 becomes "45,000". To maintain some
compatibility with Django's `intcomma`, this function also accepts floats.
Examples:
```pycon
>>> intcomma(100)
'100'
>>> intcomma("1000")
'1,000'
>>> intcomma(1_000_000)
'1,000,000'
>>> intcomma(1_234_567.25)
'1,234,567.25'
>>> intcomma(1234.5454545, 2)
'1,234.55'
>>> intcomma(14308.40, 1)
'14,308.4'
>>> intcomma("14308.40", 1)
'14,308.4'
>>> intcomma(None)
'None'
```
Args:
value (int, float, str): Integer or float to convert.
ndigits (int, None): Digits of precision for rounding after the decimal point.
Returns:
str: String containing commas every three digits.
"""
thousands_sep = thousands_separator()
decimal_sep = decimal_separator()
try:
if isinstance(value, str):
value = value.replace(thousands_sep, "").replace(decimal_sep, ".")
if not math.isfinite(float(value)):
return _format_not_finite(float(value))
if "." in value:
value = float(value)
else:
value = int(value)
else:
if not math.isfinite(float(value)):
return _format_not_finite(float(value))
float(value)
except (TypeError, ValueError):
return str(value)
if ndigits is not None:
orig = "{0:.{1}f}".format(value, ndigits)
else:
orig = str(value)
orig = orig.replace(".", decimal_sep)
while True:
new = re.sub(r"^(-?\d+)(\d{3})", rf"\g<1>{thousands_sep}\g<2>", orig)
if orig == new:
return new
orig = new
|
(value: float | str, ndigits: Optional[int] = None) -> str
|
64,592 |
humanize.number
|
intword
|
Converts a large integer to a friendly text representation.
Works best for numbers over 1 million. For example, 1_000_000 becomes "1.0 million",
1200000 becomes "1.2 million" and "1_200_000_000" becomes "1.2 billion". Supports up
to decillion (33 digits) and googol (100 digits).
Examples:
```pycon
>>> intword("100")
'100'
>>> intword("12400")
'12.4 thousand'
>>> intword("1000000")
'1.0 million'
>>> intword(1_200_000_000)
'1.2 billion'
>>> intword(8100000000000000000000000000000000)
'8.1 decillion'
>>> intword(None)
'None'
>>> intword("1234000", "%0.3f")
'1.234 million'
```
Args:
value (int, float, str): Integer to convert.
format (str): To change the number of decimal or general format of the number
portion.
Returns:
str: Friendly text representation as a string, unless the value passed could not
be coaxed into an `int`.
|
def intword(value: NumberOrString, format: str = "%.1f") -> str:
"""Converts a large integer to a friendly text representation.
Works best for numbers over 1 million. For example, 1_000_000 becomes "1.0 million",
1200000 becomes "1.2 million" and "1_200_000_000" becomes "1.2 billion". Supports up
to decillion (33 digits) and googol (100 digits).
Examples:
```pycon
>>> intword("100")
'100'
>>> intword("12400")
'12.4 thousand'
>>> intword("1000000")
'1.0 million'
>>> intword(1_200_000_000)
'1.2 billion'
>>> intword(8100000000000000000000000000000000)
'8.1 decillion'
>>> intword(None)
'None'
>>> intword("1234000", "%0.3f")
'1.234 million'
```
Args:
value (int, float, str): Integer to convert.
format (str): To change the number of decimal or general format of the number
portion.
Returns:
str: Friendly text representation as a string, unless the value passed could not
be coaxed into an `int`.
"""
try:
if not math.isfinite(float(value)):
return _format_not_finite(float(value))
value = int(value)
except (TypeError, ValueError):
return str(value)
if value < 0:
value *= -1
negative_prefix = "-"
else:
negative_prefix = ""
if value < powers[0]:
return negative_prefix + str(value)
for ordinal_, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal_ - 1])
powers_difference = powers[ordinal_] / powers[ordinal_ - 1]
if float(format % chopped) == powers_difference:
chopped = value / float(powers[ordinal_])
singular, plural = human_powers[ordinal_]
return (
negative_prefix
+ " ".join(
[format, _ngettext(singular, plural, math.ceil(chopped))]
)
) % chopped
singular, plural = human_powers[ordinal_ - 1]
return (
negative_prefix
+ " ".join([format, _ngettext(singular, plural, math.ceil(chopped))])
) % chopped
return negative_prefix + str(value)
|
(value: float | str, format: str = '%.1f') -> str
|
64,593 |
humanize.number
|
metric
|
Return a value with a metric SI unit-prefix appended.
Examples:
```pycon
>>> metric(1500, "V")
'1.50 kV'
>>> metric(2e8, "W")
'200 MW'
>>> metric(220e-6, "F")
'220 μF'
>>> metric(1e-14, precision=4)
'10.00 f'
```
The unit prefix is always chosen so that non-significant zero digits are required.
i.e. `123,000` will become `123k` instead of `0.123M` and `1,230,000` will become
`1.23M` instead of `1230K`. For numbers that are either too huge or too tiny to
represent without resorting to either leading or trailing zeroes, it falls back to
`scientific()`.
```pycon
>>> metric(1e40)
'1.00 x 10⁴⁰'
```
Args:
value (int, float): Input number.
unit (str): Optional base unit.
precision (int): The number of digits the output should contain.
Returns:
str:
|
def metric(value: float, unit: str = "", precision: int = 3) -> str:
"""Return a value with a metric SI unit-prefix appended.
Examples:
```pycon
>>> metric(1500, "V")
'1.50 kV'
>>> metric(2e8, "W")
'200 MW'
>>> metric(220e-6, "F")
'220 μF'
>>> metric(1e-14, precision=4)
'10.00 f'
```
The unit prefix is always chosen so that non-significant zero digits are required.
i.e. `123,000` will become `123k` instead of `0.123M` and `1,230,000` will become
`1.23M` instead of `1230K`. For numbers that are either too huge or too tiny to
represent without resorting to either leading or trailing zeroes, it falls back to
`scientific()`.
```pycon
>>> metric(1e40)
'1.00 x 10⁴⁰'
```
Args:
value (int, float): Input number.
unit (str): Optional base unit.
precision (int): The number of digits the output should contain.
Returns:
str:
"""
if not math.isfinite(value):
return _format_not_finite(value)
exponent = int(math.floor(math.log10(abs(value)))) if value != 0 else 0
if exponent >= 33 or exponent < -30:
return scientific(value, precision - 1) + unit
value /= 10 ** (exponent // 3 * 3)
if exponent >= 3:
ordinal_ = "kMGTPEZYRQ"[exponent // 3 - 1]
elif exponent < 0:
ordinal_ = "mμnpfazyrq"[(-exponent - 1) // 3]
else:
ordinal_ = ""
value_ = format(value, ".%if" % (precision - (exponent % 3) - 1))
if not (unit or ordinal_) or unit in ("°", "′", "″"):
space = ""
else:
space = " "
return f"{value_}{space}{ordinal_}{unit}"
|
(value: float, unit: str = '', precision: int = 3) -> str
|
64,594 |
humanize.time
|
naturaldate
|
Like `naturalday`, but append a year for dates more than ~five months away.
|
def naturaldate(value: dt.date | dt.datetime) -> str:
"""Like `naturalday`, but append a year for dates more than ~five months away."""
try:
value = dt.date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return str(value)
except (OverflowError, ValueError):
# Date arguments out of range
return str(value)
delta = _abs_timedelta(value - dt.date.today())
if delta.days >= 5 * 365 / 12:
return naturalday(value, "%b %d %Y")
return naturalday(value)
|
(value: datetime.date | datetime.datetime) -> str
|
64,595 |
humanize.time
|
naturalday
|
Return a natural day.
For date values that are tomorrow, today or yesterday compared to
present day return representing string. Otherwise, return a string
formatted according to `format`.
|
def naturalday(value: dt.date | dt.datetime, format: str = "%b %d") -> str:
"""Return a natural day.
For date values that are tomorrow, today or yesterday compared to
present day return representing string. Otherwise, return a string
formatted according to `format`.
"""
try:
value = dt.date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return str(value)
except (OverflowError, ValueError):
# Date arguments out of range
return str(value)
delta = value - dt.date.today()
if delta.days == 0:
return _("today")
if delta.days == 1:
return _("tomorrow")
if delta.days == -1:
return _("yesterday")
return value.strftime(format)
|
(value: datetime.date | datetime.datetime, format: str = '%b %d') -> str
|
64,596 |
humanize.time
|
naturaldelta
|
Return a natural representation of a timedelta or number of seconds.
This is similar to `naturaltime`, but does not add tense to the result.
Args:
value (datetime.timedelta, int or float): A timedelta or a number of seconds.
months (bool): If `True`, then a number of months (based on 30.5 days) will be
used for fuzziness between years.
minimum_unit (str): The lowest unit that can be used.
Returns:
str (str or `value`): A natural representation of the amount of time
elapsed unless `value` is not datetime.timedelta or cannot be
converted to int. In that case, a `value` is returned unchanged.
Raises:
OverflowError: If `value` is too large to convert to datetime.timedelta.
Examples
Compare two timestamps in a custom local timezone::
import datetime as dt
from dateutil.tz import gettz
berlin = gettz("Europe/Berlin")
now = dt.datetime.now(tz=berlin)
later = now + dt.timedelta(minutes=30)
assert naturaldelta(later - now) == "30 minutes"
|
def naturaldelta(
value: dt.timedelta | float,
months: bool = True,
minimum_unit: str = "seconds",
) -> str:
"""Return a natural representation of a timedelta or number of seconds.
This is similar to `naturaltime`, but does not add tense to the result.
Args:
value (datetime.timedelta, int or float): A timedelta or a number of seconds.
months (bool): If `True`, then a number of months (based on 30.5 days) will be
used for fuzziness between years.
minimum_unit (str): The lowest unit that can be used.
Returns:
str (str or `value`): A natural representation of the amount of time
elapsed unless `value` is not datetime.timedelta or cannot be
converted to int. In that case, a `value` is returned unchanged.
Raises:
OverflowError: If `value` is too large to convert to datetime.timedelta.
Examples
Compare two timestamps in a custom local timezone::
import datetime as dt
from dateutil.tz import gettz
berlin = gettz("Europe/Berlin")
now = dt.datetime.now(tz=berlin)
later = now + dt.timedelta(minutes=30)
assert naturaldelta(later - now) == "30 minutes"
"""
tmp = Unit[minimum_unit.upper()]
if tmp not in (Unit.SECONDS, Unit.MILLISECONDS, Unit.MICROSECONDS):
msg = f"Minimum unit '{minimum_unit}' not supported"
raise ValueError(msg)
min_unit = tmp
if isinstance(value, dt.timedelta):
delta = value
else:
try:
value = int(value)
delta = dt.timedelta(seconds=value)
except (ValueError, TypeError):
return str(value)
use_months = months
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
num_months = int(days // 30.5)
if not years and days < 1:
if seconds == 0:
if min_unit == Unit.MICROSECONDS and delta.microseconds < 1000:
return (
_ngettext("%d microsecond", "%d microseconds", delta.microseconds)
% delta.microseconds
)
if min_unit == Unit.MILLISECONDS or (
min_unit == Unit.MICROSECONDS and 1000 <= delta.microseconds < 1_000_000
):
milliseconds = delta.microseconds / 1000
return (
_ngettext("%d millisecond", "%d milliseconds", int(milliseconds))
% milliseconds
)
return _("a moment")
if seconds == 1:
return _("a second")
if seconds < 60:
return _ngettext("%d second", "%d seconds", seconds) % seconds
if 60 <= seconds < 120:
return _("a minute")
if 120 <= seconds < 3600:
minutes = seconds // 60
return _ngettext("%d minute", "%d minutes", minutes) % minutes
if 3600 <= seconds < 3600 * 2:
return _("an hour")
if 3600 < seconds:
hours = seconds // 3600
return _ngettext("%d hour", "%d hours", hours) % hours
elif years == 0:
if days == 1:
return _("a day")
if not use_months:
return _ngettext("%d day", "%d days", days) % days
if not num_months:
return _ngettext("%d day", "%d days", days) % days
if num_months == 1:
return _("a month")
return _ngettext("%d month", "%d months", num_months) % num_months
elif years == 1:
if not num_months and not days:
return _("a year")
if not num_months:
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
if use_months:
if num_months == 1:
return _("1 year, 1 month")
return (
_ngettext("1 year, %d month", "1 year, %d months", num_months)
% num_months
)
return _ngettext("1 year, %d day", "1 year, %d days", days) % days
return _ngettext("%d year", "%d years", years).replace("%d", "%s") % intcomma(years)
|
(value: datetime.timedelta | float, months: bool = True, minimum_unit: str = 'seconds') -> str
|
64,597 |
humanize.filesize
|
naturalsize
|
Format a number of bytes like a human readable filesize (e.g. 10 kB).
By default, decimal suffixes (kB, MB) are used.
Non-GNU modes are compatible with jinja2's `filesizeformat` filter.
Examples:
```pycon
>>> naturalsize(3000000)
'3.0 MB'
>>> naturalsize(300, False, True)
'300B'
>>> naturalsize(3000, False, True)
'2.9K'
>>> naturalsize(3000, False, True, "%.3f")
'2.930K'
>>> naturalsize(3000, True)
'2.9 KiB'
>>> naturalsize(10**28)
'10000.0 YB'
>>> naturalsize(-4096, True)
'-4.0 KiB'
```
Args:
value (int, float, str): Integer to convert.
binary (bool): If `True`, uses binary suffixes (KiB, MiB) with base
2<sup>10</sup> instead of 10<sup>3</sup>.
gnu (bool): If `True`, the binary argument is ignored and GNU-style
(`ls -sh` style) prefixes are used (K, M) with the 2**10 definition.
format (str): Custom formatter.
Returns:
str: Human readable representation of a filesize.
|
def naturalsize(
value: float | str,
binary: bool = False,
gnu: bool = False,
format: str = "%.1f",
) -> str:
"""Format a number of bytes like a human readable filesize (e.g. 10 kB).
By default, decimal suffixes (kB, MB) are used.
Non-GNU modes are compatible with jinja2's `filesizeformat` filter.
Examples:
```pycon
>>> naturalsize(3000000)
'3.0 MB'
>>> naturalsize(300, False, True)
'300B'
>>> naturalsize(3000, False, True)
'2.9K'
>>> naturalsize(3000, False, True, "%.3f")
'2.930K'
>>> naturalsize(3000, True)
'2.9 KiB'
>>> naturalsize(10**28)
'10000.0 YB'
>>> naturalsize(-4096, True)
'-4.0 KiB'
```
Args:
value (int, float, str): Integer to convert.
binary (bool): If `True`, uses binary suffixes (KiB, MiB) with base
2<sup>10</sup> instead of 10<sup>3</sup>.
gnu (bool): If `True`, the binary argument is ignored and GNU-style
(`ls -sh` style) prefixes are used (K, M) with the 2**10 definition.
format (str): Custom formatter.
Returns:
str: Human readable representation of a filesize.
"""
if gnu:
suffix = suffixes["gnu"]
elif binary:
suffix = suffixes["binary"]
else:
suffix = suffixes["decimal"]
base = 1024 if (gnu or binary) else 1000
bytes_ = float(value)
abs_bytes = abs(bytes_)
if abs_bytes == 1 and not gnu:
return "%d Byte" % bytes_
if abs_bytes < base and not gnu:
return "%d Bytes" % bytes_
if abs_bytes < base and gnu:
return "%dB" % bytes_
for i, s in enumerate(suffix):
unit = base ** (i + 2)
if abs_bytes < unit:
break
ret: str = format % (base * bytes_ / unit) + s
return ret
|
(value: float | str, binary: bool = False, gnu: bool = False, format: str = '%.1f') -> str
|
64,598 |
humanize.time
|
naturaltime
|
Return a natural representation of a time in a resolution that makes sense.
This is more or less compatible with Django's `naturaltime` filter.
Args:
value (datetime.datetime, datetime.timedelta, int or float): A `datetime`, a
`timedelta`, or a number of seconds.
future (bool): Ignored for `datetime`s and `timedelta`s, where the tense is
always figured out based on the current time. For integers and floats, the
return value will be past tense by default, unless future is `True`.
months (bool): If `True`, then a number of months (based on 30.5 days) will be
used for fuzziness between years.
minimum_unit (str): The lowest unit that can be used.
when (datetime.datetime): Point in time relative to which _value_ is
interpreted. Defaults to the current time in the local timezone.
Returns:
str: A natural representation of the input in a resolution that makes sense.
|
def naturaltime(
value: dt.datetime | dt.timedelta | float,
future: bool = False,
months: bool = True,
minimum_unit: str = "seconds",
when: dt.datetime | None = None,
) -> str:
"""Return a natural representation of a time in a resolution that makes sense.
This is more or less compatible with Django's `naturaltime` filter.
Args:
value (datetime.datetime, datetime.timedelta, int or float): A `datetime`, a
`timedelta`, or a number of seconds.
future (bool): Ignored for `datetime`s and `timedelta`s, where the tense is
always figured out based on the current time. For integers and floats, the
return value will be past tense by default, unless future is `True`.
months (bool): If `True`, then a number of months (based on 30.5 days) will be
used for fuzziness between years.
minimum_unit (str): The lowest unit that can be used.
when (datetime.datetime): Point in time relative to which _value_ is
interpreted. Defaults to the current time in the local timezone.
Returns:
str: A natural representation of the input in a resolution that makes sense.
"""
value = _convert_aware_datetime(value)
when = _convert_aware_datetime(when)
now = when or _now()
date, delta = _date_and_delta(value, now=now)
if date is None:
return str(value)
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (dt.datetime, dt.timedelta)):
future = date > now
ago = _("%s from now") if future else _("%s ago")
delta = naturaldelta(delta, months, minimum_unit)
if delta == _("a moment"):
return _("now")
return str(ago % delta)
|
(value: datetime.datetime | datetime.timedelta | float, future: bool = False, months: bool = True, minimum_unit: str = 'seconds', when: Optional[datetime.datetime] = None) -> str
|
64,600 |
humanize.number
|
ordinal
|
Converts an integer to its ordinal as a string.
For example, 1 is "1st", 2 is "2nd", 3 is "3rd", etc. Works for any integer or
anything `int()` will turn into an integer. Anything else will return the output
of str(value).
Examples:
```pycon
>>> ordinal(1)
'1st'
>>> ordinal(1002)
'1002nd'
>>> ordinal(103)
'103rd'
>>> ordinal(4)
'4th'
>>> ordinal(12)
'12th'
>>> ordinal(101)
'101st'
>>> ordinal(111)
'111th'
>>> ordinal("something else")
'something else'
>>> ordinal([1, 2, 3]) == "[1, 2, 3]"
True
```
Args:
value (int, str, float): Integer to convert.
gender (str): Gender for translations. Accepts either "male" or "female".
Returns:
str: Ordinal string.
|
def ordinal(value: NumberOrString, gender: str = "male") -> str:
"""Converts an integer to its ordinal as a string.
For example, 1 is "1st", 2 is "2nd", 3 is "3rd", etc. Works for any integer or
anything `int()` will turn into an integer. Anything else will return the output
of str(value).
Examples:
```pycon
>>> ordinal(1)
'1st'
>>> ordinal(1002)
'1002nd'
>>> ordinal(103)
'103rd'
>>> ordinal(4)
'4th'
>>> ordinal(12)
'12th'
>>> ordinal(101)
'101st'
>>> ordinal(111)
'111th'
>>> ordinal("something else")
'something else'
>>> ordinal([1, 2, 3]) == "[1, 2, 3]"
True
```
Args:
value (int, str, float): Integer to convert.
gender (str): Gender for translations. Accepts either "male" or "female".
Returns:
str: Ordinal string.
"""
try:
if not math.isfinite(float(value)):
return _format_not_finite(float(value))
value = int(value)
except (TypeError, ValueError):
return str(value)
if gender == "male":
t = (
P_("0 (male)", "th"),
P_("1 (male)", "st"),
P_("2 (male)", "nd"),
P_("3 (male)", "rd"),
P_("4 (male)", "th"),
P_("5 (male)", "th"),
P_("6 (male)", "th"),
P_("7 (male)", "th"),
P_("8 (male)", "th"),
P_("9 (male)", "th"),
)
else:
t = (
P_("0 (female)", "th"),
P_("1 (female)", "st"),
P_("2 (female)", "nd"),
P_("3 (female)", "rd"),
P_("4 (female)", "th"),
P_("5 (female)", "th"),
P_("6 (female)", "th"),
P_("7 (female)", "th"),
P_("8 (female)", "th"),
P_("9 (female)", "th"),
)
if value % 100 in (11, 12, 13): # special case
return f"{value}{t[0]}"
return f"{value}{t[value % 10]}"
|
(value: float | str, gender: str = 'male') -> str
|
64,601 |
humanize.time
|
precisedelta
|
Return a precise representation of a timedelta.
```pycon
>>> import datetime as dt
>>> from humanize.time import precisedelta
>>> delta = dt.timedelta(seconds=3633, days=2, microseconds=123000)
>>> precisedelta(delta)
'2 days, 1 hour and 33.12 seconds'
```
A custom `format` can be specified to control how the fractional part
is represented:
```pycon
>>> precisedelta(delta, format="%0.4f")
'2 days, 1 hour and 33.1230 seconds'
```
Instead, the `minimum_unit` can be changed to have a better resolution;
the function will still readjust the unit to use the greatest of the
units that does not lose precision.
For example setting microseconds but still representing the date with milliseconds:
```pycon
>>> precisedelta(delta, minimum_unit="microseconds")
'2 days, 1 hour, 33 seconds and 123 milliseconds'
```
If desired, some units can be suppressed: you will not see them represented and the
time of the other units will be adjusted to keep representing the same timedelta:
```pycon
>>> precisedelta(delta, suppress=['days'])
'49 hours and 33.12 seconds'
```
Note that microseconds precision is lost if the seconds and all
the units below are suppressed:
```pycon
>>> delta = dt.timedelta(seconds=90, microseconds=100)
>>> precisedelta(delta, suppress=['seconds', 'milliseconds', 'microseconds'])
'1.50 minutes'
```
If the delta is too small to be represented with the minimum unit,
a value of zero will be returned:
```pycon
>>> delta = dt.timedelta(seconds=1)
>>> precisedelta(delta, minimum_unit="minutes")
'0.02 minutes'
>>> delta = dt.timedelta(seconds=0.1)
>>> precisedelta(delta, minimum_unit="minutes")
'0 minutes'
```
|
def precisedelta(
value: dt.timedelta | int | None,
minimum_unit: str = "seconds",
suppress: typing.Iterable[str] = (),
format: str = "%0.2f",
) -> str:
"""Return a precise representation of a timedelta.
```pycon
>>> import datetime as dt
>>> from humanize.time import precisedelta
>>> delta = dt.timedelta(seconds=3633, days=2, microseconds=123000)
>>> precisedelta(delta)
'2 days, 1 hour and 33.12 seconds'
```
A custom `format` can be specified to control how the fractional part
is represented:
```pycon
>>> precisedelta(delta, format="%0.4f")
'2 days, 1 hour and 33.1230 seconds'
```
Instead, the `minimum_unit` can be changed to have a better resolution;
the function will still readjust the unit to use the greatest of the
units that does not lose precision.
For example setting microseconds but still representing the date with milliseconds:
```pycon
>>> precisedelta(delta, minimum_unit="microseconds")
'2 days, 1 hour, 33 seconds and 123 milliseconds'
```
If desired, some units can be suppressed: you will not see them represented and the
time of the other units will be adjusted to keep representing the same timedelta:
```pycon
>>> precisedelta(delta, suppress=['days'])
'49 hours and 33.12 seconds'
```
Note that microseconds precision is lost if the seconds and all
the units below are suppressed:
```pycon
>>> delta = dt.timedelta(seconds=90, microseconds=100)
>>> precisedelta(delta, suppress=['seconds', 'milliseconds', 'microseconds'])
'1.50 minutes'
```
If the delta is too small to be represented with the minimum unit,
a value of zero will be returned:
```pycon
>>> delta = dt.timedelta(seconds=1)
>>> precisedelta(delta, minimum_unit="minutes")
'0.02 minutes'
>>> delta = dt.timedelta(seconds=0.1)
>>> precisedelta(delta, minimum_unit="minutes")
'0 minutes'
```
"""
date, delta = _date_and_delta(value)
if date is None:
return str(value)
suppress_set = {Unit[s.upper()] for s in suppress}
# Find a suitable minimum unit (it can be greater the one that the
# user gave us if it is suppressed).
min_unit = Unit[minimum_unit.upper()]
min_unit = _suitable_minimum_unit(min_unit, suppress_set)
del minimum_unit
# Expand the suppressed units list/set to include all the units
# that are below the minimum unit
suppress_set = _suppress_lower_units(min_unit, suppress_set)
# handy aliases
days = delta.days
secs = delta.seconds
usecs = delta.microseconds
MICROSECONDS, MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS, MONTHS, YEARS = list(
Unit
)
# Given DAYS compute YEARS and the remainder of DAYS as follows:
# if YEARS is the minimum unit, we cannot use DAYS so
# we will use a float for YEARS and 0 for DAYS:
# years, days = years/days, 0
#
# if YEARS is suppressed, use DAYS:
# years, days = 0, days
#
# otherwise:
# years, days = divmod(years, days)
#
# The same applies for months, hours, minutes and milliseconds below
years, days = _quotient_and_remainder(days, 365, YEARS, min_unit, suppress_set)
months, days = _quotient_and_remainder(days, 30.5, MONTHS, min_unit, suppress_set)
# If DAYS is not in suppress, we can represent the days but
# if it is a suppressed unit, we need to carry it to a lower unit,
# seconds in this case.
#
# The same applies for secs and usecs below
days, secs = _carry(days, secs, 24 * 3600, DAYS, min_unit, suppress_set)
hours, secs = _quotient_and_remainder(secs, 3600, HOURS, min_unit, suppress_set)
minutes, secs = _quotient_and_remainder(secs, 60, MINUTES, min_unit, suppress_set)
secs, usecs = _carry(secs, usecs, 1e6, SECONDS, min_unit, suppress_set)
msecs, usecs = _quotient_and_remainder(
usecs, 1000, MILLISECONDS, min_unit, suppress_set
)
# if _unused != 0 we had lost some precision
usecs, _unused = _carry(usecs, 0, 1, MICROSECONDS, min_unit, suppress_set)
fmts = [
("%d year", "%d years", years),
("%d month", "%d months", months),
("%d day", "%d days", days),
("%d hour", "%d hours", hours),
("%d minute", "%d minutes", minutes),
("%d second", "%d seconds", secs),
("%d millisecond", "%d milliseconds", msecs),
("%d microsecond", "%d microseconds", usecs),
]
texts: list[str] = []
for unit, fmt in zip(reversed(Unit), fmts):
singular_txt, plural_txt, fmt_value = fmt
if fmt_value > 0 or (not texts and unit == min_unit):
_fmt_value = 2 if 1 < fmt_value < 2 else int(fmt_value)
fmt_txt = _ngettext(singular_txt, plural_txt, _fmt_value)
if unit == min_unit and math.modf(fmt_value)[0] > 0:
fmt_txt = fmt_txt.replace("%d", format)
elif unit == YEARS:
fmt_txt = fmt_txt.replace("%d", "%s")
texts.append(fmt_txt % intcomma(fmt_value))
continue
texts.append(fmt_txt % fmt_value)
if unit == min_unit:
break
if len(texts) == 1:
return texts[0]
head = ", ".join(texts[:-1])
tail = texts[-1]
return _("%s and %s") % (head, tail)
|
(value: datetime.timedelta | int | None, minimum_unit: str = 'seconds', suppress: Iterable[str] = (), format: str = '%0.2f') -> str
|
64,602 |
humanize.number
|
scientific
|
Return number in string scientific notation z.wq x 10ⁿ.
Examples:
```pycon
>>> scientific(float(0.3))
'3.00 x 10⁻¹'
>>> scientific(int(500))
'5.00 x 10²'
>>> scientific(-1000)
'-1.00 x 10³'
>>> scientific(1000, 1)
'1.0 x 10³'
>>> scientific(1000, 3)
'1.000 x 10³'
>>> scientific("99")
'9.90 x 10¹'
>>> scientific("foo")
'foo'
>>> scientific(None)
'None'
```
Args:
value (int, float, str): Input number.
precision (int): Number of decimal for first part of the number.
Returns:
str: Number in scientific notation z.wq x 10ⁿ.
|
def scientific(value: NumberOrString, precision: int = 2) -> str:
"""Return number in string scientific notation z.wq x 10ⁿ.
Examples:
```pycon
>>> scientific(float(0.3))
'3.00 x 10⁻¹'
>>> scientific(int(500))
'5.00 x 10²'
>>> scientific(-1000)
'-1.00 x 10³'
>>> scientific(1000, 1)
'1.0 x 10³'
>>> scientific(1000, 3)
'1.000 x 10³'
>>> scientific("99")
'9.90 x 10¹'
>>> scientific("foo")
'foo'
>>> scientific(None)
'None'
```
Args:
value (int, float, str): Input number.
precision (int): Number of decimal for first part of the number.
Returns:
str: Number in scientific notation z.wq x 10ⁿ.
"""
exponents = {
"0": "⁰",
"1": "¹",
"2": "²",
"3": "³",
"4": "⁴",
"5": "⁵",
"6": "⁶",
"7": "⁷",
"8": "⁸",
"9": "⁹",
"-": "⁻",
}
try:
value = float(value)
if not math.isfinite(value):
return _format_not_finite(value)
except (ValueError, TypeError):
return str(value)
fmt = "{:.%se}" % str(int(precision))
n = fmt.format(value)
part1, part2 = n.split("e")
# Remove redundant leading '+' or '0's (preserving the last '0' for 10⁰).
part2 = re.sub(r"^\+?(\-?)0*(.+)$", r"\1\2", part2)
new_part2 = []
for char in part2:
new_part2.append(exponents[char])
final_str = part1 + " x 10" + "".join(new_part2)
return final_str
|
(value: float | str, precision: int = 2) -> str
|
64,603 |
humanize.i18n
|
thousands_separator
|
Return the thousands separator for a locale, default to comma.
Returns:
str: Thousands separator.
|
def thousands_separator() -> str:
"""Return the thousands separator for a locale, default to comma.
Returns:
str: Thousands separator.
"""
try:
sep = _THOUSANDS_SEPARATOR[_CURRENT.locale]
except (AttributeError, KeyError):
sep = ","
return sep
|
() -> str
|
64,605 |
pdpyras
|
APISession
|
PagerDuty REST API v2 session object class.
Implements the most generic and oft-implemented aspects of PagerDuty's REST
API v2 as an opinionated wrapper of `requests.Session`_.
Inherits from :class:`PDSession`.
:param api_key:
REST API access token to use for HTTP requests
:param default_from:
The default email address to use in the ``From`` header when making
API calls using an account-level API access key.
:param auth_type:
The type of credential in use. If authenticating with an OAuth access
token, this must be set to ``oauth2`` or ``bearer``.
:param debug:
Sets :attr:`print_debug`. Set to True to enable verbose command line
output.
:type token: str
:type name: str or None
:type default_from: str or None
:type debug: bool
:members:
|
class APISession(PDSession):
"""
PagerDuty REST API v2 session object class.
Implements the most generic and oft-implemented aspects of PagerDuty's REST
API v2 as an opinionated wrapper of `requests.Session`_.
Inherits from :class:`PDSession`.
:param api_key:
REST API access token to use for HTTP requests
:param default_from:
The default email address to use in the ``From`` header when making
API calls using an account-level API access key.
:param auth_type:
The type of credential in use. If authenticating with an OAuth access
token, this must be set to ``oauth2`` or ``bearer``.
:param debug:
Sets :attr:`print_debug`. Set to True to enable verbose command line
output.
:type token: str
:type name: str or None
:type default_from: str or None
:type debug: bool
:members:
"""
api_call_counts = None
"""A dict object recording the number of API calls per endpoint"""
api_time = None
"""A dict object recording the total time of API calls to each endpoint"""
default_from = None
"""The default value to use as the ``From`` request header"""
default_page_size = 100
"""
This will be the default number of results requested in each page when
iterating/querying an index (the ``limit`` parameter).
"""
permitted_methods = ('GET', 'POST', 'PUT', 'DELETE')
url = 'https://api.pagerduty.com'
"""Base URL of the REST API"""
def __init__(self, api_key: str, default_from=None,
auth_type='token', debug=False):
self.api_call_counts = {}
self.api_time = {}
self.auth_type = auth_type
super(APISession, self).__init__(api_key, debug=debug)
self.default_from = default_from
self.headers.update({
'Accept': 'application/vnd.pagerduty+json;version=2',
})
def after_set_api_key(self):
self._subdomain = None
@property
def api_key_access(self) -> str:
"""
Memoized API key access type getter.
Will be "user" if the API key is a user-level token (all users should
have permission to create an API key with the same permissions as they
have in the PagerDuty web UI).
If the API key in use is an account-level API token (as only a global
administrator user can create), this property will be "account".
"""
if not hasattr(self, '_api_key_access') or self._api_key_access is None:
response = self.get('/users/me')
if response.status_code == 400:
message = try_decoding(response).get('error', '')
if 'account-level access token' in message:
self._api_key_access = 'account'
else:
self._api_key_access = None
self.log.error("Failed to obtain API key access level; "
"the API did not respond as expected.")
self.log.debug("Body = %s", truncate_text(response.text))
else:
self._api_key_access = 'user'
return self._api_key_access
@property
def auth_type(self) -> str:
"""
Defines the method of API authentication.
By default this is "token"; if "oauth2", the API key will be used.
"""
return self._auth_type
@auth_type.setter
def auth_type(self, value: str):
if value not in ('token', 'bearer', 'oauth2'):
raise AttributeError("auth_type value must be \"token\" (default) "
"or \"bearer\" or \"oauth\" to use OAuth2 authentication.")
self._auth_type = value
@property
def auth_header(self) -> dict:
if self.auth_type in ('bearer', 'oauth2'):
return {"Authorization": "Bearer "+self.api_key}
else:
return {"Authorization": "Token token="+self.api_key}
def dict_all(self, path: str, **kw) -> dict:
"""
Dictionary representation of resource collection results
With the exception of ``by``, all keyword arguments passed to this
method are also passed to :attr:`iter_all`; see the documentation on
that method for further details.
:param path:
The index endpoint URL to use.
:param by:
The attribute of each object to use for the key values of the
dictionary. This is ``id`` by default. Please note, there is no
uniqueness validation, so if you use an attribute that is not
distinct for the data set, this function will omit some data in the
results.
"""
by = kw.pop('by', 'id')
iterator = self.iter_all(path, **kw)
return {obj[by]:obj for obj in iterator}
def find(self, resource, query, attribute='name', params=None) \
-> Union[dict, None]:
"""
Finds an object of a given resource type exactly matching a query.
Works by querying a given resource index endpoint using the ``query``
parameter. To use this function on any given resource, the resource's
index must support the ``query`` parameter; otherwise, the function may
not work as expected. If the index ignores the parameter, for instance,
this function will take much longer to return; results will not be
constrained to those matching the query, and so every result in the
index will be downloaded and compared against the query up until a
matching result is found or all results have been checked.
:param resource:
The name of the resource endpoint to query, i.e.
``escalation_policies``
:param query:
The string to query for in the the index.
:param attribute:
The property of each result to compare against the query value when
searching for an exact match. By default it is ``name``, but when
searching for user by email (for example) it can be set to ``email``
:param params:
Optional additional parameters to use when querying.
:type resource: str
:type query: str
:type attribute: str
:type params: dict or None
:returns:
The dictionary representation of the result, if found; ``None`` will
be returned if there is no exact match result.
"""
query_params = {}
if params is not None:
query_params.update(params)
query_params.update({'query':query})
# When determining uniqueness, web/the API is largely case-insensitive:
simplify = lambda s: s.lower()
search_term = simplify(query)
equiv = lambda s: simplify(s[attribute]) == search_term
obj_iter = self.iter_all(resource, params=query_params)
return next(iter(filter(equiv, obj_iter)), None)
def iter_all(self, url, params=None, page_size=None, item_hook=None,
total=False) -> Iterator[dict]:
"""
Iterator for the contents of an index endpoint or query.
Automatically paginates and yields the results in each page, until all
matching results have been yielded or a HTTP error response is received.
If the URL to use supports cursor-based pagintation, then this will
return :attr:`iter_cursor` with the same keyword arguments. Otherwise,
it implements classic pagination, a.k.a. numeric pagination.
Each yielded value is a dict object representing a result returned from
the index. For example, if requesting the ``/users`` endpoint, each
yielded value will be an entry of the ``users`` array property in the
response.
:param url:
The index endpoint URL to use.
:param params:
Additional URL parameters to include.
:param page_size:
If set, the ``page_size`` argument will override the
``default_page_size`` parameter on the session and set the ``limit``
parameter to a custom value (default is 100), altering the number of
pagination results. The actual number of results in the response
will still take precedence, if it differs; this parameter and
``default_page_size`` only dictate what is requested of the API.
:param item_hook:
Callable object that will be invoked for each iteration, i.e. for
printing progress. It will be called with three parameters: a dict
representing a given result in the iteration, an int representing
the number of the item in the series, and an int (or str, as of
v5.0.0) representing the total number of items in the series. If the
total isn't knowable, the value passed is "?".
:param total:
If True, the ``total`` parameter will be included in API calls, and
the value for the third parameter to the item hook will be the total
count of records that match the query. Leaving this as False confers
a small performance advantage, as the API in this case does not have
to compute the total count of results in the query.
:type url: str
:type params: dict or None
:type page_size: int or None
:type total: bool
"""
# Get entity wrapping and validate that the URL being requested is
# likely to support pagination:
path = canonical_path(self.url, url)
endpoint = f"GET {path}"
# Short-circuit to cursor-based pagination if appropriate:
if path in CURSOR_BASED_PAGINATION_PATHS:
return self.iter_cursor(url, params=params)
nodes = path.split('/')
if is_path_param(nodes[-1]):
# NOTE: If this happens for a newer API, the path might need to be
# added to the EXPAND_PATHS dictionary in
# scripts/get_path_list/get_path_list.py, after which
# CANONICAL_PATHS will then need to be updated accordingly based on
# the new output of the script.
raise URLError(f"Path {path} (URL={url}) is formatted like an " \
"individual resource versus a resource collection. It is " \
"therefore assumed to not support pagination.")
_, wrapper = entity_wrappers('GET', path)
if wrapper is None:
raise URLError(f"Pagination is not supported for {endpoint}.")
# Parameters to send:
data = {}
if page_size is None:
data['limit'] = self.default_page_size
else:
data['limit'] = page_size
if total:
data['total'] = 1
if isinstance(params, (dict, list)):
# Override defaults with values given:
data.update(dict(params))
more = True
offset = 0
if params is not None:
offset = int(params.get('offset', 0))
n = 0
while more:
# Check the offset and limit:
data['offset'] = offset
highest_record_index = int(data['offset']) + int(data['limit'])
if highest_record_index > ITERATION_LIMIT:
iter_limit = '%d'%ITERATION_LIMIT
warn(
f"Stopping iter_all on {endpoint} at " \
f"limit+offset={highest_record_index} " \
'as this exceeds the maximum permitted by the API ' \
f"({iter_limit}). The set of results may be incomplete."
)
return
# Make the request and validate/unpack the response:
r = successful_response(
self.get(url, params=data.copy()),
context='classic pagination'
)
body = try_decoding(r)
results = unwrap(r, wrapper)
# Validate and update pagination parameters
#
# Note, the number of the results in the actual response is always
# the most appropriate amount to increment the offset by after
# receiving each page. If this is the last page, agination should
# stop anyways because the ``more`` parameter should evaluate to
# false.
#
# In short, the reasons why we don't trust the echoed ``limit``
# value or stick to the limit requested and hope the server honors
# it is that it could potentially result in skipping results or
# yielding duplicates if there's a mismatch, or potentially issues
# like #61
data['limit'] = len(results)
offset += data['limit']
more = False
total_count = '?'
if 'more' in body:
more = body['more']
else:
warn(
f"Endpoint GET {path} responded with no \"more\" property" \
' in the response, so pagination is not supported ' \
'(or this is an API bug). Only results from the first ' \
'request will be yielded. You can use rget with this ' \
'endpoint instead to avoid this warning.'
)
if 'total' in body:
total_count = body['total']
# Perform per-page actions on the response data
for result in results:
n += 1
# Call a callable object for each item, i.e. to print progress:
if hasattr(item_hook, '__call__'):
item_hook(result, n, total_count)
yield result
def iter_cursor(self, url, params=None, item_hook=None) -> Iterator[dict]:
"""
Iterator for results from an endpoint using cursor-based pagination.
:param url:
The index endpoint URL to use.
:param params:
Query parameters to include in the request.
:param item_hook:
A callable object that accepts 3 positional arguments; see
"""
path = canonical_path(self.url, url)
if path not in CURSOR_BASED_PAGINATION_PATHS:
raise URLError(f"{path} does not support cursor-based pagination.")
_, wrapper = entity_wrappers('GET', path)
user_params = {}
if isinstance(params, (dict, list)):
# Override defaults with values given:
user_params.update(dict(params))
more = True
next_cursor = None
total = 0
while more:
# Update parameters and request a new page:
if next_cursor:
user_params.update({'cursor': next_cursor})
r = successful_response(
self.get(url, params=user_params),
context='cursor-based pagination',
)
# Unpack and yield results
body = try_decoding(r)
results = unwrap(r, wrapper)
for result in results:
total += 1
if hasattr(item_hook, '__call__'):
item_hook(result, total, '?')
yield result
# Advance to the next page
next_cursor = body.get('next_cursor', None)
more = bool(next_cursor)
@resource_url
@auto_json
def jget(self, url, **kw) -> Union[dict, list]:
"""
Performs a GET request, returning the JSON-decoded body as a dictionary
"""
return self.get(url, **kw)
@resource_url
@auto_json
def jpost(self, url, **kw) -> Union[dict, list]:
"""
Performs a POST request, returning the JSON-decoded body as a dictionary
"""
return self.post(url, **kw)
@resource_url
@auto_json
def jput(self, url, **kw) -> Union[dict, list]:
"""
Performs a PUT request, returning the JSON-decoded body as a dictionary
"""
return self.put(url, **kw)
def list_all(self, url, **kw) -> list:
"""
Returns a list of all objects from a given index endpoint.
All keyword arguments passed to this function are also passed directly
to :attr:`iter_all`; see the documentation on that method for details.
:param url:
The index endpoint URL to use.
"""
return list(self.iter_all(url, **kw))
def persist(self, resource, attr, values, update=False):
"""
Finds or creates and returns a resource with a matching attribute
Given a resource name, an attribute to use as an idempotency key and a
set of attribute:value pairs as a dict, create a resource with the
specified attributes if it doesn't exist already and return the resource
persisted via the API (whether or not it already existed).
:param resource:
The URL to use when creating the new resource or searching for an
existing one. The underlying AP must support entity wrapping to use
this method with it.
:param attr:
Name of the attribute to use as the idempotency key. For instance,
"email" when the resource is "users" will not create the user if a
user with the email address given in ``values`` already exists.
:param values:
The content of the resource to be created, if it does not already
exist. This must contain an item with a key that is the same as the
``attr`` argument.
:param update:
(New in 4.4.0) If set to True, any existing resource will be updated
with the values supplied.
:type resource: str
:type attr: str
:type values: dict
:type update: bool
:rtype: dict
"""
if attr not in values:
raise ValueError("Argument `values` must contain a key equal "
"to the `attr` argument (expected idempotency key: '%s')."%attr)
existing = self.find(resource, values[attr], attribute=attr)
if existing:
if update:
original = {}
original.update(existing)
existing.update(values)
if original != existing:
existing = self.rput(existing, json=existing)
return existing
else:
return self.rpost(resource, json=values)
def postprocess(self, response: Response, suffix=None):
"""
Records performance information / request metadata about the API call.
:param response:
The `requests.Response`_ object returned by the request method
:param suffix:
Optional suffix to append to the key
:type method: str
:type response: `requests.Response`_
:type suffix: str or None
"""
method = response.request.method.upper()
url = response.request.url
status = response.status_code
request_date = response.headers.get('date', '(missing header)')
request_id = response.headers.get('x-request-id', '(missing header)')
request_time = response.elapsed.total_seconds()
try:
endpoint = "%s %s"%(method, canonical_path(self.url, url))
except URLError:
# This is necessary so that profiling can also support using the
# basic get / post / put / delete methods with APIs that are not yet
# explicitly supported by inclusion in CANONICAL_PATHS.
endpoint = "%s %s"%(method, url)
self.api_call_counts.setdefault(endpoint, 0)
self.api_time.setdefault(endpoint, 0.0)
self.api_call_counts[endpoint] += 1
self.api_time[endpoint] += request_time
# Request ID / timestamp logging
self.log.debug("Request completed: #method=%s|#url=%s|#status=%d|"
"#x_request_id=%s|#date=%s|#wall_time_s=%g", method, url, status,
request_id, request_date, request_time)
if int(status/100) == 5:
self.log.error("PagerDuty API server error (%d)! "
"For additional diagnostics, contact PagerDuty support "
"and reference x_request_id=%s / date=%s",
status, request_id, request_date)
def prepare_headers(self, method, user_headers={}) -> dict:
headers = deepcopy(self.headers)
headers['User-Agent'] = self.user_agent
if self.default_from is not None:
headers['From'] = self.default_from
if method in ('POST', 'PUT'):
headers['Content-Type'] = 'application/json'
if user_headers:
headers.update(user_headers)
return headers
@resource_url
@requires_success
def rdelete(self, resource, **kw) -> Response:
"""
Delete a resource.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.delete``
:type resource: str or dict
"""
return self.delete(resource, **kw)
@resource_url
@wrapped_entities
def rget(self, resource, **kw) -> Union[dict, list]:
"""
Wrapped-entity-aware GET function.
Retrieves a resource via GET and returns the wrapped entity in the
response.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.get``
:returns:
Dictionary representation of the requested object
:type resource: str or dict
"""
return self.get(resource, **kw)
@wrapped_entities
def rpost(self, path, **kw) -> Union[dict, list]:
"""
Wrapped-entity-aware POST function.
Creates a resource and returns the created entity if successful.
:param path:
The path/URL to which to send the POST request, which should be an
index endpoint.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.post``
:returns:
Dictionary representation of the created object
:type path: str
"""
return self.post(path, **kw)
@resource_url
@wrapped_entities
def rput(self, resource, **kw) -> Union[dict, list]:
"""
Wrapped-entity-aware PUT function.
Update an individual resource, returning the wrapped entity.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.put``
:returns:
Dictionary representation of the updated object
"""
return self.put(resource, **kw)
@property
def subdomain(self) -> str:
"""
Subdomain of the PagerDuty account of the API access token.
:type: str or None
"""
if not hasattr(self, '_subdomain') or self._subdomain is None:
try:
url = self.rget('users', params={'limit':1})[0]['html_url']
self._subdomain = url.split('/')[2].split('.')[0]
except PDClientError as e:
self.log.error("Failed to obtain subdomain; encountered error.")
self._subdomain = None
raise e
return self._subdomain
@property
def total_call_count(self) -> int:
"""The total number of API calls made by this instance."""
return sum(self.api_call_counts.values())
@property
def total_call_time(self) -> float:
"""The total time spent making API calls."""
return sum(self.api_time.values())
@property
def trunc_token(self) -> str:
"""Truncated token for secure display/identification purposes."""
return last_4(self.api_key)
|
(api_key: str, default_from=None, auth_type='token', debug=False)
|
64,609 |
pdpyras
|
__init__
| null |
def __init__(self, api_key: str, default_from=None,
auth_type='token', debug=False):
self.api_call_counts = {}
self.api_time = {}
self.auth_type = auth_type
super(APISession, self).__init__(api_key, debug=debug)
self.default_from = default_from
self.headers.update({
'Accept': 'application/vnd.pagerduty+json;version=2',
})
|
(self, api_key: str, default_from=None, auth_type='token', debug=False)
|
64,611 |
pdpyras
|
after_set_api_key
| null |
def after_set_api_key(self):
self._subdomain = None
|
(self)
|
64,613 |
pdpyras
|
cooldown_factor
| null |
def cooldown_factor(self) -> float:
return self.sleep_timer_base*(1+self.stagger_cooldown*random())
|
(self) -> float
|
64,615 |
pdpyras
|
dict_all
|
Dictionary representation of resource collection results
With the exception of ``by``, all keyword arguments passed to this
method are also passed to :attr:`iter_all`; see the documentation on
that method for further details.
:param path:
The index endpoint URL to use.
:param by:
The attribute of each object to use for the key values of the
dictionary. This is ``id`` by default. Please note, there is no
uniqueness validation, so if you use an attribute that is not
distinct for the data set, this function will omit some data in the
results.
|
def dict_all(self, path: str, **kw) -> dict:
"""
Dictionary representation of resource collection results
With the exception of ``by``, all keyword arguments passed to this
method are also passed to :attr:`iter_all`; see the documentation on
that method for further details.
:param path:
The index endpoint URL to use.
:param by:
The attribute of each object to use for the key values of the
dictionary. This is ``id`` by default. Please note, there is no
uniqueness validation, so if you use an attribute that is not
distinct for the data set, this function will omit some data in the
results.
"""
by = kw.pop('by', 'id')
iterator = self.iter_all(path, **kw)
return {obj[by]:obj for obj in iterator}
|
(self, path: str, **kw) -> dict
|
64,616 |
pdpyras
|
find
|
Finds an object of a given resource type exactly matching a query.
Works by querying a given resource index endpoint using the ``query``
parameter. To use this function on any given resource, the resource's
index must support the ``query`` parameter; otherwise, the function may
not work as expected. If the index ignores the parameter, for instance,
this function will take much longer to return; results will not be
constrained to those matching the query, and so every result in the
index will be downloaded and compared against the query up until a
matching result is found or all results have been checked.
:param resource:
The name of the resource endpoint to query, i.e.
``escalation_policies``
:param query:
The string to query for in the the index.
:param attribute:
The property of each result to compare against the query value when
searching for an exact match. By default it is ``name``, but when
searching for user by email (for example) it can be set to ``email``
:param params:
Optional additional parameters to use when querying.
:type resource: str
:type query: str
:type attribute: str
:type params: dict or None
:returns:
The dictionary representation of the result, if found; ``None`` will
be returned if there is no exact match result.
|
def find(self, resource, query, attribute='name', params=None) \
-> Union[dict, None]:
"""
Finds an object of a given resource type exactly matching a query.
Works by querying a given resource index endpoint using the ``query``
parameter. To use this function on any given resource, the resource's
index must support the ``query`` parameter; otherwise, the function may
not work as expected. If the index ignores the parameter, for instance,
this function will take much longer to return; results will not be
constrained to those matching the query, and so every result in the
index will be downloaded and compared against the query up until a
matching result is found or all results have been checked.
:param resource:
The name of the resource endpoint to query, i.e.
``escalation_policies``
:param query:
The string to query for in the the index.
:param attribute:
The property of each result to compare against the query value when
searching for an exact match. By default it is ``name``, but when
searching for user by email (for example) it can be set to ``email``
:param params:
Optional additional parameters to use when querying.
:type resource: str
:type query: str
:type attribute: str
:type params: dict or None
:returns:
The dictionary representation of the result, if found; ``None`` will
be returned if there is no exact match result.
"""
query_params = {}
if params is not None:
query_params.update(params)
query_params.update({'query':query})
# When determining uniqueness, web/the API is largely case-insensitive:
simplify = lambda s: s.lower()
search_term = simplify(query)
equiv = lambda s: simplify(s[attribute]) == search_term
obj_iter = self.iter_all(resource, params=query_params)
return next(iter(filter(equiv, obj_iter)), None)
|
(self, resource, query, attribute='name', params=None) -> Optional[dict]
|
64,621 |
pdpyras
|
iter_all
|
Iterator for the contents of an index endpoint or query.
Automatically paginates and yields the results in each page, until all
matching results have been yielded or a HTTP error response is received.
If the URL to use supports cursor-based pagintation, then this will
return :attr:`iter_cursor` with the same keyword arguments. Otherwise,
it implements classic pagination, a.k.a. numeric pagination.
Each yielded value is a dict object representing a result returned from
the index. For example, if requesting the ``/users`` endpoint, each
yielded value will be an entry of the ``users`` array property in the
response.
:param url:
The index endpoint URL to use.
:param params:
Additional URL parameters to include.
:param page_size:
If set, the ``page_size`` argument will override the
``default_page_size`` parameter on the session and set the ``limit``
parameter to a custom value (default is 100), altering the number of
pagination results. The actual number of results in the response
will still take precedence, if it differs; this parameter and
``default_page_size`` only dictate what is requested of the API.
:param item_hook:
Callable object that will be invoked for each iteration, i.e. for
printing progress. It will be called with three parameters: a dict
representing a given result in the iteration, an int representing
the number of the item in the series, and an int (or str, as of
v5.0.0) representing the total number of items in the series. If the
total isn't knowable, the value passed is "?".
:param total:
If True, the ``total`` parameter will be included in API calls, and
the value for the third parameter to the item hook will be the total
count of records that match the query. Leaving this as False confers
a small performance advantage, as the API in this case does not have
to compute the total count of results in the query.
:type url: str
:type params: dict or None
:type page_size: int or None
:type total: bool
|
def iter_all(self, url, params=None, page_size=None, item_hook=None,
total=False) -> Iterator[dict]:
"""
Iterator for the contents of an index endpoint or query.
Automatically paginates and yields the results in each page, until all
matching results have been yielded or a HTTP error response is received.
If the URL to use supports cursor-based pagintation, then this will
return :attr:`iter_cursor` with the same keyword arguments. Otherwise,
it implements classic pagination, a.k.a. numeric pagination.
Each yielded value is a dict object representing a result returned from
the index. For example, if requesting the ``/users`` endpoint, each
yielded value will be an entry of the ``users`` array property in the
response.
:param url:
The index endpoint URL to use.
:param params:
Additional URL parameters to include.
:param page_size:
If set, the ``page_size`` argument will override the
``default_page_size`` parameter on the session and set the ``limit``
parameter to a custom value (default is 100), altering the number of
pagination results. The actual number of results in the response
will still take precedence, if it differs; this parameter and
``default_page_size`` only dictate what is requested of the API.
:param item_hook:
Callable object that will be invoked for each iteration, i.e. for
printing progress. It will be called with three parameters: a dict
representing a given result in the iteration, an int representing
the number of the item in the series, and an int (or str, as of
v5.0.0) representing the total number of items in the series. If the
total isn't knowable, the value passed is "?".
:param total:
If True, the ``total`` parameter will be included in API calls, and
the value for the third parameter to the item hook will be the total
count of records that match the query. Leaving this as False confers
a small performance advantage, as the API in this case does not have
to compute the total count of results in the query.
:type url: str
:type params: dict or None
:type page_size: int or None
:type total: bool
"""
# Get entity wrapping and validate that the URL being requested is
# likely to support pagination:
path = canonical_path(self.url, url)
endpoint = f"GET {path}"
# Short-circuit to cursor-based pagination if appropriate:
if path in CURSOR_BASED_PAGINATION_PATHS:
return self.iter_cursor(url, params=params)
nodes = path.split('/')
if is_path_param(nodes[-1]):
# NOTE: If this happens for a newer API, the path might need to be
# added to the EXPAND_PATHS dictionary in
# scripts/get_path_list/get_path_list.py, after which
# CANONICAL_PATHS will then need to be updated accordingly based on
# the new output of the script.
raise URLError(f"Path {path} (URL={url}) is formatted like an " \
"individual resource versus a resource collection. It is " \
"therefore assumed to not support pagination.")
_, wrapper = entity_wrappers('GET', path)
if wrapper is None:
raise URLError(f"Pagination is not supported for {endpoint}.")
# Parameters to send:
data = {}
if page_size is None:
data['limit'] = self.default_page_size
else:
data['limit'] = page_size
if total:
data['total'] = 1
if isinstance(params, (dict, list)):
# Override defaults with values given:
data.update(dict(params))
more = True
offset = 0
if params is not None:
offset = int(params.get('offset', 0))
n = 0
while more:
# Check the offset and limit:
data['offset'] = offset
highest_record_index = int(data['offset']) + int(data['limit'])
if highest_record_index > ITERATION_LIMIT:
iter_limit = '%d'%ITERATION_LIMIT
warn(
f"Stopping iter_all on {endpoint} at " \
f"limit+offset={highest_record_index} " \
'as this exceeds the maximum permitted by the API ' \
f"({iter_limit}). The set of results may be incomplete."
)
return
# Make the request and validate/unpack the response:
r = successful_response(
self.get(url, params=data.copy()),
context='classic pagination'
)
body = try_decoding(r)
results = unwrap(r, wrapper)
# Validate and update pagination parameters
#
# Note, the number of the results in the actual response is always
# the most appropriate amount to increment the offset by after
# receiving each page. If this is the last page, agination should
# stop anyways because the ``more`` parameter should evaluate to
# false.
#
# In short, the reasons why we don't trust the echoed ``limit``
# value or stick to the limit requested and hope the server honors
# it is that it could potentially result in skipping results or
# yielding duplicates if there's a mismatch, or potentially issues
# like #61
data['limit'] = len(results)
offset += data['limit']
more = False
total_count = '?'
if 'more' in body:
more = body['more']
else:
warn(
f"Endpoint GET {path} responded with no \"more\" property" \
' in the response, so pagination is not supported ' \
'(or this is an API bug). Only results from the first ' \
'request will be yielded. You can use rget with this ' \
'endpoint instead to avoid this warning.'
)
if 'total' in body:
total_count = body['total']
# Perform per-page actions on the response data
for result in results:
n += 1
# Call a callable object for each item, i.e. to print progress:
if hasattr(item_hook, '__call__'):
item_hook(result, n, total_count)
yield result
|
(self, url, params=None, page_size=None, item_hook=None, total=False) -> Iterator[dict]
|
64,622 |
pdpyras
|
iter_cursor
|
Iterator for results from an endpoint using cursor-based pagination.
:param url:
The index endpoint URL to use.
:param params:
Query parameters to include in the request.
:param item_hook:
A callable object that accepts 3 positional arguments; see
|
def iter_cursor(self, url, params=None, item_hook=None) -> Iterator[dict]:
"""
Iterator for results from an endpoint using cursor-based pagination.
:param url:
The index endpoint URL to use.
:param params:
Query parameters to include in the request.
:param item_hook:
A callable object that accepts 3 positional arguments; see
"""
path = canonical_path(self.url, url)
if path not in CURSOR_BASED_PAGINATION_PATHS:
raise URLError(f"{path} does not support cursor-based pagination.")
_, wrapper = entity_wrappers('GET', path)
user_params = {}
if isinstance(params, (dict, list)):
# Override defaults with values given:
user_params.update(dict(params))
more = True
next_cursor = None
total = 0
while more:
# Update parameters and request a new page:
if next_cursor:
user_params.update({'cursor': next_cursor})
r = successful_response(
self.get(url, params=user_params),
context='cursor-based pagination',
)
# Unpack and yield results
body = try_decoding(r)
results = unwrap(r, wrapper)
for result in results:
total += 1
if hasattr(item_hook, '__call__'):
item_hook(result, total, '?')
yield result
# Advance to the next page
next_cursor = body.get('next_cursor', None)
more = bool(next_cursor)
|
(self, url, params=None, item_hook=None) -> Iterator[dict]
|
64,623 |
pdpyras
|
call
|
Performs a GET request, returning the JSON-decoded body as a dictionary
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,624 |
pdpyras
|
call
|
Performs a POST request, returning the JSON-decoded body as a dictionary
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,625 |
pdpyras
|
call
|
Performs a PUT request, returning the JSON-decoded body as a dictionary
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,626 |
pdpyras
|
list_all
|
Returns a list of all objects from a given index endpoint.
All keyword arguments passed to this function are also passed directly
to :attr:`iter_all`; see the documentation on that method for details.
:param url:
The index endpoint URL to use.
|
def list_all(self, url, **kw) -> list:
"""
Returns a list of all objects from a given index endpoint.
All keyword arguments passed to this function are also passed directly
to :attr:`iter_all`; see the documentation on that method for details.
:param url:
The index endpoint URL to use.
"""
return list(self.iter_all(url, **kw))
|
(self, url, **kw) -> list
|
64,629 |
pdpyras
|
normalize_params
|
Modify the user-supplied parameters to ease implementation
Current behavior:
* If a parameter's value is of type list, and the parameter name does
not already end in "[]", then the square brackets are appended to keep
in line with the requirement that all set filters' parameter names end
in "[]".
:returns:
The query parameters after modification
|
def normalize_params(self, params) -> dict:
"""
Modify the user-supplied parameters to ease implementation
Current behavior:
* If a parameter's value is of type list, and the parameter name does
not already end in "[]", then the square brackets are appended to keep
in line with the requirement that all set filters' parameter names end
in "[]".
:returns:
The query parameters after modification
"""
updated_params = {}
for param, value in params.items():
if type(value) is list and not param.endswith('[]'):
updated_params[param+'[]'] = value
else:
updated_params[param] = value
return updated_params
|
(self, params) -> dict
|
64,630 |
pdpyras
|
normalize_url
|
Compose the URL whether it is a path or an already-complete URL
|
def normalize_url(self, url) -> str:
"""Compose the URL whether it is a path or an already-complete URL"""
return normalize_url(self.url, url)
|
(self, url) -> str
|
64,633 |
pdpyras
|
persist
|
Finds or creates and returns a resource with a matching attribute
Given a resource name, an attribute to use as an idempotency key and a
set of attribute:value pairs as a dict, create a resource with the
specified attributes if it doesn't exist already and return the resource
persisted via the API (whether or not it already existed).
:param resource:
The URL to use when creating the new resource or searching for an
existing one. The underlying AP must support entity wrapping to use
this method with it.
:param attr:
Name of the attribute to use as the idempotency key. For instance,
"email" when the resource is "users" will not create the user if a
user with the email address given in ``values`` already exists.
:param values:
The content of the resource to be created, if it does not already
exist. This must contain an item with a key that is the same as the
``attr`` argument.
:param update:
(New in 4.4.0) If set to True, any existing resource will be updated
with the values supplied.
:type resource: str
:type attr: str
:type values: dict
:type update: bool
:rtype: dict
|
def persist(self, resource, attr, values, update=False):
"""
Finds or creates and returns a resource with a matching attribute
Given a resource name, an attribute to use as an idempotency key and a
set of attribute:value pairs as a dict, create a resource with the
specified attributes if it doesn't exist already and return the resource
persisted via the API (whether or not it already existed).
:param resource:
The URL to use when creating the new resource or searching for an
existing one. The underlying AP must support entity wrapping to use
this method with it.
:param attr:
Name of the attribute to use as the idempotency key. For instance,
"email" when the resource is "users" will not create the user if a
user with the email address given in ``values`` already exists.
:param values:
The content of the resource to be created, if it does not already
exist. This must contain an item with a key that is the same as the
``attr`` argument.
:param update:
(New in 4.4.0) If set to True, any existing resource will be updated
with the values supplied.
:type resource: str
:type attr: str
:type values: dict
:type update: bool
:rtype: dict
"""
if attr not in values:
raise ValueError("Argument `values` must contain a key equal "
"to the `attr` argument (expected idempotency key: '%s')."%attr)
existing = self.find(resource, values[attr], attribute=attr)
if existing:
if update:
original = {}
original.update(existing)
existing.update(values)
if original != existing:
existing = self.rput(existing, json=existing)
return existing
else:
return self.rpost(resource, json=values)
|
(self, resource, attr, values, update=False)
|
64,635 |
pdpyras
|
postprocess
|
Records performance information / request metadata about the API call.
:param response:
The `requests.Response`_ object returned by the request method
:param suffix:
Optional suffix to append to the key
:type method: str
:type response: `requests.Response`_
:type suffix: str or None
|
def postprocess(self, response: Response, suffix=None):
"""
Records performance information / request metadata about the API call.
:param response:
The `requests.Response`_ object returned by the request method
:param suffix:
Optional suffix to append to the key
:type method: str
:type response: `requests.Response`_
:type suffix: str or None
"""
method = response.request.method.upper()
url = response.request.url
status = response.status_code
request_date = response.headers.get('date', '(missing header)')
request_id = response.headers.get('x-request-id', '(missing header)')
request_time = response.elapsed.total_seconds()
try:
endpoint = "%s %s"%(method, canonical_path(self.url, url))
except URLError:
# This is necessary so that profiling can also support using the
# basic get / post / put / delete methods with APIs that are not yet
# explicitly supported by inclusion in CANONICAL_PATHS.
endpoint = "%s %s"%(method, url)
self.api_call_counts.setdefault(endpoint, 0)
self.api_time.setdefault(endpoint, 0.0)
self.api_call_counts[endpoint] += 1
self.api_time[endpoint] += request_time
# Request ID / timestamp logging
self.log.debug("Request completed: #method=%s|#url=%s|#status=%d|"
"#x_request_id=%s|#date=%s|#wall_time_s=%g", method, url, status,
request_id, request_date, request_time)
if int(status/100) == 5:
self.log.error("PagerDuty API server error (%d)! "
"For additional diagnostics, contact PagerDuty support "
"and reference x_request_id=%s / date=%s",
status, request_id, request_date)
|
(self, response: requests.models.Response, suffix=None)
|
64,636 |
pdpyras
|
prepare_headers
| null |
def prepare_headers(self, method, user_headers={}) -> dict:
headers = deepcopy(self.headers)
headers['User-Agent'] = self.user_agent
if self.default_from is not None:
headers['From'] = self.default_from
if method in ('POST', 'PUT'):
headers['Content-Type'] = 'application/json'
if user_headers:
headers.update(user_headers)
return headers
|
(self, method, user_headers={}) -> dict
|
64,639 |
pdpyras
|
call
|
Delete a resource.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.delete``
:type resource: str or dict
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,643 |
pdpyras
|
request
|
Make a generic PagerDuty API request.
:param method:
The request method to use. Case-insensitive. May be one of get, put,
post or delete.
:param url:
The path/URL to request. If it does not start with the base URL, the
base URL will be prepended.
:param **kwargs:
Custom keyword arguments to pass to ``requests.Session.request``.
:type method: str
:type url: str
:returns:
The `requests.Response`_ object corresponding to the HTTP response
|
def request(self, method, url, **kwargs) -> Response:
"""
Make a generic PagerDuty API request.
:param method:
The request method to use. Case-insensitive. May be one of get, put,
post or delete.
:param url:
The path/URL to request. If it does not start with the base URL, the
base URL will be prepended.
:param **kwargs:
Custom keyword arguments to pass to ``requests.Session.request``.
:type method: str
:type url: str
:returns:
The `requests.Response`_ object corresponding to the HTTP response
"""
sleep_timer = self.sleep_timer
network_attempts = 0
http_attempts = {}
method = method.strip().upper()
if method not in self.permitted_methods:
m_str = ', '.join(self.permitted_methods)
raise PDClientError(f"Method {method} not supported by this API. " \
f"Permitted methods: {m_str}")
req_kw = deepcopy(kwargs)
full_url = self.normalize_url(url)
endpoint = "%s %s"%(method.upper(), full_url)
# Add in any headers specified in keyword arguments:
headers = kwargs.get('headers', {})
req_kw.update({
'headers': self.prepare_headers(method, user_headers=headers),
'stream': False,
'timeout': self.timeout
})
# Special changes to user-supplied parameters, for convenience
if 'params' in kwargs and kwargs['params']:
req_kw['params'] = self.normalize_params(kwargs['params'])
# Make the request (and repeat w/cooldown if the rate limit is reached):
while True:
try:
response = self.parent.request(method, full_url, **req_kw)
self.postprocess(response)
except (HTTPError, PoolError, RequestException) as e:
network_attempts += 1
if network_attempts > self.max_network_attempts:
error_msg = f"{endpoint}: Non-transient network " \
'error; exceeded maximum number of attempts ' \
f"({self.max_network_attempts}) to connect to the API."
raise PDClientError(error_msg) from e
sleep_timer *= self.cooldown_factor()
self.log.warning(
"%s: HTTP or network error: %s. retrying in %g seconds.",
endpoint, e.__class__.__name__, sleep_timer)
time.sleep(sleep_timer)
continue
status = response.status_code
retry_logic = self.retry.get(status, 0)
if not response.ok and retry_logic != 0:
# Take special action as defined by the retry logic
if retry_logic != -1:
# Retry a specific number of times (-1 implies infinite)
if http_attempts.get(status, 0)>=retry_logic or \
sum(http_attempts.values())>self.max_http_attempts:
lower_limit = retry_logic
if lower_limit > self.max_http_attempts:
lower_limit = self.max_http_attempts
self.log.error(
f"%s: Non-transient HTTP error: exceeded " \
'maximum number of attempts (%d) to make a ' \
'successful request. Currently encountering ' \
'status %d.', endpoint, lower_limit, status)
return response
http_attempts[status] = 1 + http_attempts.get(status, 0)
sleep_timer *= self.cooldown_factor()
self.log.warning("%s: HTTP error (%d); retrying in %g seconds.",
endpoint, status, sleep_timer)
time.sleep(sleep_timer)
continue
elif status == 429:
sleep_timer *= self.cooldown_factor()
self.log.debug("%s: Hit API rate limit (status 429); " \
"retrying in %g seconds", endpoint, sleep_timer)
time.sleep(sleep_timer)
continue
elif status == 401:
# Stop. Authentication failed. We shouldn't try doing any more,
# because we'll run into the same problem later anyway.
raise PDHTTPError(
"Received 401 Unauthorized response from the API. The key "
"(...%s) may be invalid or deactivated."%self.trunc_key,
response)
else:
# All went according to plan.
return response
|
(self, method, url, **kwargs) -> requests.models.Response
|
64,645 |
pdpyras
|
call
|
Wrapped-entity-aware GET function.
Retrieves a resource via GET and returns the wrapped entity in the
response.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.get``
:returns:
Dictionary representation of the requested object
:type resource: str or dict
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,646 |
pdpyras
|
call
|
Wrapped-entity-aware POST function.
Creates a resource and returns the created entity if successful.
:param path:
The path/URL to which to send the POST request, which should be an
index endpoint.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.post``
:returns:
Dictionary representation of the created object
:type path: str
|
def wrapped_entities(method):
"""
Automatically wrap request entities and unwrap response entities.
Used for methods :attr:`APISession.rget`, :attr:`APISession.rpost` and
:attr:`APISession.rput`. It makes them always return an object representing
the resource entity in the response (whether wrapped in a root-level
property or not) rather than the full response body. When making a post /
put request, and passing the ``json`` keyword argument to specify the
content to be JSON-encoded as the body, that keyword argument can be either
the to-be-wrapped content or the full body including the entity wrapper, and
the ``json`` keyword argument will be normalized to include the wrapper.
Methods using this decorator will raise a :class:`PDHTTPError` with its
``response`` property being being the `requests.Response`_ object in the
case of any error (as of version 4.2 this is subclassed as
:class:`PDHTTPError`), so that the implementer can access it by catching the
exception, and thus design their own custom logic around different types of
error responses.
:param method: Method being decorated. Must take one positional argument
after ``self`` that is the URL/path to the resource, followed by keyword
any number of keyword arguments, and must return an object of class
`requests.Response`_, and be named after the HTTP method but with "r"
prepended.
:returns: A callable object; the reformed method
"""
http_method = method.__name__.lstrip('r')
doc = method.__doc__
def call(self, url, **kw):
pass_kw = deepcopy(kw) # Make a copy for modification
path = canonical_path(self.url, url)
endpoint = "%s %s"%(http_method.upper(), path)
req_w, res_w = entity_wrappers(http_method, path)
# Validate the abbreviated (or full) request payload, and automatically
# wrap the request entity for the implementer if necessary:
if req_w is not None and http_method in ('post', 'put') \
and 'json' in pass_kw and req_w not in pass_kw['json']:
pass_kw['json'] = {req_w: pass_kw['json']}
# Make the request:
r = successful_response(method(self, url, **pass_kw))
# Unpack the response:
return unwrap(r, res_w)
call.__doc__ = doc
return call
|
(self, url, **kw)
|
64,647 |
pdpyras
|
call
|
Wrapped-entity-aware PUT function.
Update an individual resource, returning the wrapped entity.
:param resource:
The path/URL to which to send the request, or a dict object
representing an API resource that contains an item with key ``self``
whose value is the URL of the resource.
:param **kw:
Custom keyword arguments to pass to ``requests.Session.put``
:returns:
Dictionary representation of the updated object
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(self, resource, **kw)
|
64,650 |
pdpyras
|
ChangeEventsAPISession
|
Session class for submitting events to the PagerDuty v2 Change Events API.
Implements methods for submitting change events to PagerDuty's change events
API. See the `Change Events API documentation
<https://developer.pagerduty.com/docs/events-api-v2/send-change-events/>`_
for more details.
Inherits from :class:`PDSession`.
|
class ChangeEventsAPISession(PDSession):
"""
Session class for submitting events to the PagerDuty v2 Change Events API.
Implements methods for submitting change events to PagerDuty's change events
API. See the `Change Events API documentation
<https://developer.pagerduty.com/docs/events-api-v2/send-change-events/>`_
for more details.
Inherits from :class:`PDSession`.
"""
permitted_methods = ('POST',)
url = "https://events.pagerduty.com"
def __init__(self, api_key: str, debug=False):
super(ChangeEventsAPISession, self).__init__(api_key, debug)
# See: https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgw-events-api-v2-overview#response-codes--retry-logic
self.retry[500] = 2 # internal server error, 3 requests total
self.retry[502] = 4 # bad gateway, 5 requests total
self.retry[503] = 6 # service unavailable, 7 requests total
@property
def auth_header(self) -> dict:
return {}
@property
def event_timestamp(self) -> str:
return datetime.utcnow().isoformat()+'Z'
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Add user agent and content type headers for Change Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
"""
headers = deepcopy(self.headers)
headers.update({
'Content-Type': 'application/json',
'User-Agent': self.user_agent,
})
if user_headers:
headers.update(user_headers)
return headers
def send_change_event(self, **properties):
"""
Send a change event to the v2 Change Events API.
See: https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
:param **properties:
Properties to set, i.e. ``payload`` and ``links``
:returns:
The response ID
"""
event = deepcopy(properties)
response = self.post('/v2/change/enqueue', json=event)
response_body = try_decoding(successful_response(
response,
context="submitting change event",
))
return response_body.get("id", None)
def submit(self, summary, source=None, custom_details=None, links=None,
timestamp=None) -> str:
"""
Submit an incident change
:param summary:
Summary / brief description of the change.
:param source:
A human-readable name identifying the source of the change.
:param custom_details:
The ``payload.custom_details`` property of the payload.
:param links:
Set the ``links`` property of the event.
:param timestamp:
Specifies an event timestamp. Must be an ISO8601-format date/time.
:type summary: str
:type source: str
:type custom_details: dict
:type links: list
:type timestamp: str
:returns:
The response ID
"""
local_var = locals()['custom_details']
if not (local_var is None or isinstance(local_var, dict)):
raise ValueError("custom_details must be a dict")
if timestamp is None:
timestamp = self.event_timestamp
event = {
'routing_key': self.api_key,
'payload': {
'summary': summary,
'timestamp': timestamp,
}
}
if isinstance(source, str):
event['payload']['source'] = source
if isinstance(custom_details, dict):
event['payload']['custom_details'] = custom_details
if links:
event['links'] = links
return self.send_change_event(**event)
|
(api_key: str, debug=False)
|
64,654 |
pdpyras
|
__init__
| null |
def __init__(self, api_key: str, debug=False):
super(ChangeEventsAPISession, self).__init__(api_key, debug)
# See: https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgw-events-api-v2-overview#response-codes--retry-logic
self.retry[500] = 2 # internal server error, 3 requests total
self.retry[502] = 4 # bad gateway, 5 requests total
self.retry[503] = 6 # service unavailable, 7 requests total
|
(self, api_key: str, debug=False)
|
64,656 |
pdpyras
|
after_set_api_key
|
Setter hook for setting or updating the API key.
Child classes should implement this to perform additional steps.
|
def after_set_api_key(self):
"""
Setter hook for setting or updating the API key.
Child classes should implement this to perform additional steps.
"""
pass
|
(self)
|
64,671 |
pdpyras
|
postprocess
|
Perform supplemental actions immediately after receiving a response.
This method is called once per request not including retries, and can be
extended in child classes.
|
def postprocess(self, response):
"""
Perform supplemental actions immediately after receiving a response.
This method is called once per request not including retries, and can be
extended in child classes.
"""
pass
|
(self, response)
|
64,672 |
pdpyras
|
prepare_headers
|
Add user agent and content type headers for Change Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
|
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Add user agent and content type headers for Change Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
"""
headers = deepcopy(self.headers)
headers.update({
'Content-Type': 'application/json',
'User-Agent': self.user_agent,
})
if user_headers:
headers.update(user_headers)
return headers
|
(self, method, user_headers={}) -> dict
|
64,681 |
pdpyras
|
send_change_event
|
Send a change event to the v2 Change Events API.
See: https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
:param **properties:
Properties to set, i.e. ``payload`` and ``links``
:returns:
The response ID
|
def send_change_event(self, **properties):
"""
Send a change event to the v2 Change Events API.
See: https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
:param **properties:
Properties to set, i.e. ``payload`` and ``links``
:returns:
The response ID
"""
event = deepcopy(properties)
response = self.post('/v2/change/enqueue', json=event)
response_body = try_decoding(successful_response(
response,
context="submitting change event",
))
return response_body.get("id", None)
|
(self, **properties)
|
64,683 |
pdpyras
|
submit
|
Submit an incident change
:param summary:
Summary / brief description of the change.
:param source:
A human-readable name identifying the source of the change.
:param custom_details:
The ``payload.custom_details`` property of the payload.
:param links:
Set the ``links`` property of the event.
:param timestamp:
Specifies an event timestamp. Must be an ISO8601-format date/time.
:type summary: str
:type source: str
:type custom_details: dict
:type links: list
:type timestamp: str
:returns:
The response ID
|
def submit(self, summary, source=None, custom_details=None, links=None,
timestamp=None) -> str:
"""
Submit an incident change
:param summary:
Summary / brief description of the change.
:param source:
A human-readable name identifying the source of the change.
:param custom_details:
The ``payload.custom_details`` property of the payload.
:param links:
Set the ``links`` property of the event.
:param timestamp:
Specifies an event timestamp. Must be an ISO8601-format date/time.
:type summary: str
:type source: str
:type custom_details: dict
:type links: list
:type timestamp: str
:returns:
The response ID
"""
local_var = locals()['custom_details']
if not (local_var is None or isinstance(local_var, dict)):
raise ValueError("custom_details must be a dict")
if timestamp is None:
timestamp = self.event_timestamp
event = {
'routing_key': self.api_key,
'payload': {
'summary': summary,
'timestamp': timestamp,
}
}
if isinstance(source, str):
event['payload']['source'] = source
if isinstance(custom_details, dict):
event['payload']['custom_details'] = custom_details
if links:
event['links'] = links
return self.send_change_event(**event)
|
(self, summary, source=None, custom_details=None, links=None, timestamp=None) -> str
|
64,684 |
pdpyras
|
EventsAPISession
|
Session class for submitting events to the PagerDuty v2 Events API.
Implements methods for submitting events to PagerDuty through the Events API
and inherits from :class:`pdpyras.PDSession`. For more details on usage of
this API, refer to the `Events API v2 documentation
<https://developer.pagerduty.com/docs/events-api-v2/overview/>`_
Inherits from :class:`PDSession`.
|
class EventsAPISession(PDSession):
"""
Session class for submitting events to the PagerDuty v2 Events API.
Implements methods for submitting events to PagerDuty through the Events API
and inherits from :class:`pdpyras.PDSession`. For more details on usage of
this API, refer to the `Events API v2 documentation
<https://developer.pagerduty.com/docs/events-api-v2/overview/>`_
Inherits from :class:`PDSession`.
"""
permitted_methods = ('POST',)
url = "https://events.pagerduty.com"
def __init__(self, api_key: str, debug=False):
super(EventsAPISession, self).__init__(api_key, debug)
# See: https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgw-events-api-v2-overview#response-codes--retry-logic
self.retry[500] = 2 # internal server error, 3 requests total
self.retry[502] = 4 # bad gateway, 5 requests total
self.retry[503] = 6 # service unavailable, 7 requests total
@property
def auth_header(self) -> dict:
return {}
def acknowledge(self, dedup_key) -> str:
"""
Acknowledge an alert via Events API.
:param dedup_key:
The deduplication key of the alert to set to the acknowledged state.
:returns:
The deduplication key
"""
return self.send_event('acknowledge', dedup_key=dedup_key)
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Add user agent and content type headers for Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
"""
headers = {}
headers.update(self.headers)
headers.update({
'Content-Type': 'application/json',
'User-Agent': self.user_agent,
})
headers.update(user_headers)
return headers
def resolve(self, dedup_key) -> str:
"""
Resolve an alert via Events API.
:param dedup_key:
The deduplication key of the alert to resolve.
"""
return self.send_event('resolve', dedup_key=dedup_key)
def send_event(self, action, dedup_key=None, **properties) -> str:
"""
Send an event to the v2 Events API.
See: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
:param action:
The action to perform through the Events API: trigger, acknowledge
or resolve.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param **properties:
Additional properties to set, i.e. if ``action`` is ``trigger``
this would include ``payload``.
:type action: str
:type dedup_key: str
:returns:
The deduplication key of the incident
"""
actions = ('trigger', 'acknowledge', 'resolve')
if action not in actions:
raise ValueError("Event action must be one of: "+', '.join(actions))
event = {'event_action':action}
event.update(properties)
if isinstance(dedup_key, str):
event['dedup_key'] = dedup_key
elif not action == 'trigger':
raise ValueError("The dedup_key property is required for"
"event_action=%s events, and it must be a string."%action)
response = successful_response(
self.post('/v2/enqueue', json=event),
context='submitting an event to the events API',
)
response_body = try_decoding(response)
if type(response_body) is not dict or 'dedup_key' not in response_body:
err_msg = 'Malformed response body from the events API; it is ' \
'not a dict that has a key named "dedup_key" after ' \
'decoding. Body = '+truncate_text(response.text)
raise PDServerError(err_msg, response)
return response_body['dedup_key']
def post(self, *args, **kw) -> Response:
"""
Override of ``requests.Session.post``
Adds the ``routing_key`` parameter to the body before sending.
"""
if 'json' in kw and hasattr(kw['json'], 'update'):
kw['json'].update({'routing_key': self.api_key})
return super(EventsAPISession, self).post(*args, **kw)
def trigger(self, summary, source, dedup_key=None, severity='critical',
payload=None, custom_details=None, images=None, links=None) -> str:
"""
Trigger an incident
:param summary:
Summary / brief description of what is wrong.
:param source:
A human-readable name identifying the system that is affected.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param severity:
Alert severity. Sets the ``payload.severity`` property.
:param payload:
Set the payload directly. Can be used in conjunction with other
parameters that also set payload properties; these properties will
be merged into the default payload, and any properties in this
parameter will take precedence except with regard to
``custom_details``.
:param custom_details:
The ``payload.custom_details`` property of the payload. Will
override the property set in the ``payload`` parameter if given.
:param images:
Set the ``images`` property of the event.
:param links:
Set the ``links`` property of the event.
:type action: str
:type custom_details: dict
:type dedup_key: str
:type images: list
:type links: list
:type payload: dict
:type severity: str
:type source: str
:type summary: str
:returns:
The deduplication key of the incident, if any.
"""
for local in ('payload', 'custom_details'):
local_var = locals()[local]
if not (local_var is None or type(local_var) is dict):
raise ValueError(local+" must be a dict")
event = {'payload': {'summary':summary, 'source':source,
'severity':severity}}
if type(payload) is dict:
event['payload'].update(payload)
if type(custom_details) is dict:
details = event.setdefault('payload', {}).get('custom_details', {})
details.update(custom_details)
event['payload']['custom_details'] = details
if images:
event['images'] = images
if links:
event['links'] = links
return self.send_event('trigger', dedup_key=dedup_key, **event)
|
(api_key: str, debug=False)
|
64,688 |
pdpyras
|
__init__
| null |
def __init__(self, api_key: str, debug=False):
super(EventsAPISession, self).__init__(api_key, debug)
# See: https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgw-events-api-v2-overview#response-codes--retry-logic
self.retry[500] = 2 # internal server error, 3 requests total
self.retry[502] = 4 # bad gateway, 5 requests total
self.retry[503] = 6 # service unavailable, 7 requests total
|
(self, api_key: str, debug=False)
|
64,690 |
pdpyras
|
acknowledge
|
Acknowledge an alert via Events API.
:param dedup_key:
The deduplication key of the alert to set to the acknowledged state.
:returns:
The deduplication key
|
def acknowledge(self, dedup_key) -> str:
"""
Acknowledge an alert via Events API.
:param dedup_key:
The deduplication key of the alert to set to the acknowledged state.
:returns:
The deduplication key
"""
return self.send_event('acknowledge', dedup_key=dedup_key)
|
(self, dedup_key) -> str
|
64,705 |
pdpyras
|
post
|
Override of ``requests.Session.post``
Adds the ``routing_key`` parameter to the body before sending.
|
def post(self, *args, **kw) -> Response:
"""
Override of ``requests.Session.post``
Adds the ``routing_key`` parameter to the body before sending.
"""
if 'json' in kw and hasattr(kw['json'], 'update'):
kw['json'].update({'routing_key': self.api_key})
return super(EventsAPISession, self).post(*args, **kw)
|
(self, *args, **kw) -> requests.models.Response
|
64,707 |
pdpyras
|
prepare_headers
|
Add user agent and content type headers for Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
|
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Add user agent and content type headers for Events API requests.
:param user_headers: User-supplied headers that will override defaults
:returns:
The final list of headers to use in the request
"""
headers = {}
headers.update(self.headers)
headers.update({
'Content-Type': 'application/json',
'User-Agent': self.user_agent,
})
headers.update(user_headers)
return headers
|
(self, method, user_headers={}) -> dict
|
64,714 |
pdpyras
|
resolve
|
Resolve an alert via Events API.
:param dedup_key:
The deduplication key of the alert to resolve.
|
def resolve(self, dedup_key) -> str:
"""
Resolve an alert via Events API.
:param dedup_key:
The deduplication key of the alert to resolve.
"""
return self.send_event('resolve', dedup_key=dedup_key)
|
(self, dedup_key) -> str
|
64,717 |
pdpyras
|
send_event
|
Send an event to the v2 Events API.
See: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
:param action:
The action to perform through the Events API: trigger, acknowledge
or resolve.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param **properties:
Additional properties to set, i.e. if ``action`` is ``trigger``
this would include ``payload``.
:type action: str
:type dedup_key: str
:returns:
The deduplication key of the incident
|
def send_event(self, action, dedup_key=None, **properties) -> str:
"""
Send an event to the v2 Events API.
See: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
:param action:
The action to perform through the Events API: trigger, acknowledge
or resolve.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param **properties:
Additional properties to set, i.e. if ``action`` is ``trigger``
this would include ``payload``.
:type action: str
:type dedup_key: str
:returns:
The deduplication key of the incident
"""
actions = ('trigger', 'acknowledge', 'resolve')
if action not in actions:
raise ValueError("Event action must be one of: "+', '.join(actions))
event = {'event_action':action}
event.update(properties)
if isinstance(dedup_key, str):
event['dedup_key'] = dedup_key
elif not action == 'trigger':
raise ValueError("The dedup_key property is required for"
"event_action=%s events, and it must be a string."%action)
response = successful_response(
self.post('/v2/enqueue', json=event),
context='submitting an event to the events API',
)
response_body = try_decoding(response)
if type(response_body) is not dict or 'dedup_key' not in response_body:
err_msg = 'Malformed response body from the events API; it is ' \
'not a dict that has a key named "dedup_key" after ' \
'decoding. Body = '+truncate_text(response.text)
raise PDServerError(err_msg, response)
return response_body['dedup_key']
|
(self, action, dedup_key=None, **properties) -> str
|
64,719 |
pdpyras
|
trigger
|
Trigger an incident
:param summary:
Summary / brief description of what is wrong.
:param source:
A human-readable name identifying the system that is affected.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param severity:
Alert severity. Sets the ``payload.severity`` property.
:param payload:
Set the payload directly. Can be used in conjunction with other
parameters that also set payload properties; these properties will
be merged into the default payload, and any properties in this
parameter will take precedence except with regard to
``custom_details``.
:param custom_details:
The ``payload.custom_details`` property of the payload. Will
override the property set in the ``payload`` parameter if given.
:param images:
Set the ``images`` property of the event.
:param links:
Set the ``links`` property of the event.
:type action: str
:type custom_details: dict
:type dedup_key: str
:type images: list
:type links: list
:type payload: dict
:type severity: str
:type source: str
:type summary: str
:returns:
The deduplication key of the incident, if any.
|
def trigger(self, summary, source, dedup_key=None, severity='critical',
payload=None, custom_details=None, images=None, links=None) -> str:
"""
Trigger an incident
:param summary:
Summary / brief description of what is wrong.
:param source:
A human-readable name identifying the system that is affected.
:param dedup_key:
The deduplication key; used for determining event uniqueness and
associating actions with existing incidents.
:param severity:
Alert severity. Sets the ``payload.severity`` property.
:param payload:
Set the payload directly. Can be used in conjunction with other
parameters that also set payload properties; these properties will
be merged into the default payload, and any properties in this
parameter will take precedence except with regard to
``custom_details``.
:param custom_details:
The ``payload.custom_details`` property of the payload. Will
override the property set in the ``payload`` parameter if given.
:param images:
Set the ``images`` property of the event.
:param links:
Set the ``links`` property of the event.
:type action: str
:type custom_details: dict
:type dedup_key: str
:type images: list
:type links: list
:type payload: dict
:type severity: str
:type source: str
:type summary: str
:returns:
The deduplication key of the incident, if any.
"""
for local in ('payload', 'custom_details'):
local_var = locals()[local]
if not (local_var is None or type(local_var) is dict):
raise ValueError(local+" must be a dict")
event = {'payload': {'summary':summary, 'source':source,
'severity':severity}}
if type(payload) is dict:
event['payload'].update(payload)
if type(custom_details) is dict:
details = event.setdefault('payload', {}).get('custom_details', {})
details.update(custom_details)
event['payload']['custom_details'] = details
if images:
event['images'] = images
if links:
event['links'] = links
return self.send_event('trigger', dedup_key=dedup_key, **event)
|
(self, summary, source, dedup_key=None, severity='critical', payload=None, custom_details=None, images=None, links=None) -> str
|
64,720 |
urllib3.exceptions
|
HTTPError
|
Base exception used by this module.
|
class HTTPError(Exception):
"""Base exception used by this module."""
| null |
64,721 |
pdpyras
|
PDClientError
|
General API errors base class.
Note, the name of this class does not imply it solely includes errors
experienced by the client or HTTP status 4xx responses, but descendants can
include issues with the API backend.
|
class PDClientError(Exception):
"""
General API errors base class.
Note, the name of this class does not imply it solely includes errors
experienced by the client or HTTP status 4xx responses, but descendants can
include issues with the API backend.
"""
response = None
"""
The HTTP response object, if a response was successfully received.
In the case of network errors, this property will be None.
"""
def __init__(self, message, response=None):
self.msg = message
self.response = response
super(PDClientError, self).__init__(message)
|
(message, response=None)
|
64,722 |
pdpyras
|
__init__
| null |
def __init__(self, message, response=None):
self.msg = message
self.response = response
super(PDClientError, self).__init__(message)
|
(self, message, response=None)
|
64,723 |
pdpyras
|
PDHTTPError
|
Error class representing errors strictly associated with HTTP responses.
This class was created to make it easier to more cleanly handle errors by
way of a class that is guaranteed to have its ``response`` be a valid
`requests.Response`_ object.
Whereas, the more generic :class:`PDClientError` could also be used
to denote such things as non-transient network errors wherein no response
was recevied from the API.
For instance, instead of this:
::
try:
user = session.rget('/users/PABC123')
except pdpyras.PDClientError as e:
if e.response is not None:
print("HTTP error: "+str(e.response.status_code))
else:
raise e
one could write this:
::
try:
user = session.rget('/users/PABC123')
except pdpyras.PDHTTPError as e:
print("HTTP error: "+str(e.response.status_code))
|
class PDHTTPError(PDClientError):
"""
Error class representing errors strictly associated with HTTP responses.
This class was created to make it easier to more cleanly handle errors by
way of a class that is guaranteed to have its ``response`` be a valid
`requests.Response`_ object.
Whereas, the more generic :class:`PDClientError` could also be used
to denote such things as non-transient network errors wherein no response
was recevied from the API.
For instance, instead of this:
::
try:
user = session.rget('/users/PABC123')
except pdpyras.PDClientError as e:
if e.response is not None:
print("HTTP error: "+str(e.response.status_code))
else:
raise e
one could write this:
::
try:
user = session.rget('/users/PABC123')
except pdpyras.PDHTTPError as e:
print("HTTP error: "+str(e.response.status_code))
"""
def __init__(self, message, response: Response):
super(PDHTTPError, self).__init__(message, response=response)
|
(message, response: requests.models.Response)
|
64,724 |
pdpyras
|
__init__
| null |
def __init__(self, message, response: Response):
super(PDHTTPError, self).__init__(message, response=response)
|
(self, message, response: requests.models.Response)
|
64,725 |
pdpyras
|
PDServerError
|
Error class representing failed expectations made of the server
This is raised in cases where the response schema differs from the expected
schema because of an API bug, or because it's an early access endpoint and
changes before GA, or in cases of HTTP status 5xx where a successful
response is required.
|
class PDServerError(PDHTTPError):
"""
Error class representing failed expectations made of the server
This is raised in cases where the response schema differs from the expected
schema because of an API bug, or because it's an early access endpoint and
changes before GA, or in cases of HTTP status 5xx where a successful
response is required.
"""
pass
|
(message, response: requests.models.Response)
|
64,727 |
pdpyras
|
PDSession
|
Base class for making HTTP requests to PagerDuty APIs
This is an opinionated wrapper of `requests.Session`_, with a few additional
features:
- The client will reattempt the request with auto-increasing cooldown/retry
intervals, with attempt limits configurable through the :attr:`retry`
attribute.
- When making requests, headers specified ad-hoc in calls to HTTP verb
functions will not replace, but will be merged into, default headers.
- The request URL, if it doesn't already start with the REST API base URL,
will be prepended with the default REST API base URL.
- It will only perform requests with methods as given in the
:attr:`permitted_methods` list, and will raise :class:`PDClientError` for
any other HTTP methods.
:param api_key:
REST API access token to use for HTTP requests
:param debug:
Sets :attr:`print_debug`. Set to True to enable verbose command line
output.
:type token: str
:type debug: bool
|
class PDSession(Session):
"""
Base class for making HTTP requests to PagerDuty APIs
This is an opinionated wrapper of `requests.Session`_, with a few additional
features:
- The client will reattempt the request with auto-increasing cooldown/retry
intervals, with attempt limits configurable through the :attr:`retry`
attribute.
- When making requests, headers specified ad-hoc in calls to HTTP verb
functions will not replace, but will be merged into, default headers.
- The request URL, if it doesn't already start with the REST API base URL,
will be prepended with the default REST API base URL.
- It will only perform requests with methods as given in the
:attr:`permitted_methods` list, and will raise :class:`PDClientError` for
any other HTTP methods.
:param api_key:
REST API access token to use for HTTP requests
:param debug:
Sets :attr:`print_debug`. Set to True to enable verbose command line
output.
:type token: str
:type debug: bool
"""
log = None
"""
A ``logging.Logger`` object for logging messages. By default it is
configured without any handlers and so no messages will be emitted. See
`logger objects
<https://docs.python.org/3/library/logging.html#logger-objects>`_
"""
max_http_attempts = 10
"""
The number of times that the client will retry after error statuses, for any
that are defined greater than zero in :attr:`retry`.
"""
max_network_attempts = 3
"""
The number of times that connecting to the API will be attempted before
treating the failure as non-transient; a :class:`PDClientError` exception
will be raised if this happens.
"""
parent = None
"""The ``super`` object (`requests.Session`_)"""
permitted_methods = ()
"""
A tuple of the methods permitted by the API which the client implements.
For instance:
* The REST API accepts GET, POST, PUT and DELETE.
* The Events API and Change Events APIs only accept POST.
"""
retry = {}
"""
A dict defining the retry behavior for each HTTP response status code.
Note, any value set for this class variable will not be reflected in
instances and so it must be set separately for each instance.
Each key in this dictionary is an int representing a HTTP response code. The
behavior is specified by the int value at each key as follows:
* ``-1`` to retry infinitely
* ``0`` to return the `requests.Response`_ object and exit (which is the
default behavior)
* ``n``, where ``n > 0``, to retry ``n`` times (or up
to :attr:`max_http_attempts` total for all statuses, whichever is
encountered first), and raise a :class:`PDClientError` after that many
attempts. For each successive attempt, the wait time will increase by a
factor of :attr:`sleep_timer_base`.
The default behavior is to retry infinitely on a 429, and return the
response in any other case (assuming a HTTP response was received from the
server).
"""
sleep_timer = 1.5
"""
Default initial cooldown time factor for rate limiting and network errors.
Each time that the request makes a followup request, there will be a delay
in seconds equal to this number times :attr:`sleep_timer_base` to the power
of how many attempts have already been made so far, unless
:attr:`stagger_cooldown` is nonzero.
"""
sleep_timer_base = 2
"""
After each retry, the time to sleep before reattempting the API connection
and request will increase by a factor of this amount.
"""
timeout = TIMEOUT
"""
This is the value sent to `Requests`_ as the ``timeout`` parameter that
determines the TCP read timeout.
"""
url = ""
def __init__(self, api_key: str, debug=False):
self.parent = super(PDSession, self)
self.parent.__init__()
self.api_key = api_key
self.log = logging.getLogger(__name__)
self.print_debug = debug
self.retry = {}
def after_set_api_key(self):
"""
Setter hook for setting or updating the API key.
Child classes should implement this to perform additional steps.
"""
pass
@property
def api_key(self) -> str:
"""
API Key property getter.
Returns the _api_key attribute's value.
"""
return self._api_key
@api_key.setter
def api_key(self, api_key):
if not (isinstance(api_key, str) and api_key):
raise ValueError("API credential must be a non-empty string.")
self._api_key = api_key
self.headers.update(self.auth_header)
self.after_set_api_key()
@property
def auth_header(self) -> dict:
"""
Generates the header with the API credential used for authentication.
"""
raise NotImplementedError
def cooldown_factor(self) -> float:
return self.sleep_timer_base*(1+self.stagger_cooldown*random())
def normalize_params(self, params) -> dict:
"""
Modify the user-supplied parameters to ease implementation
Current behavior:
* If a parameter's value is of type list, and the parameter name does
not already end in "[]", then the square brackets are appended to keep
in line with the requirement that all set filters' parameter names end
in "[]".
:returns:
The query parameters after modification
"""
updated_params = {}
for param, value in params.items():
if type(value) is list and not param.endswith('[]'):
updated_params[param+'[]'] = value
else:
updated_params[param] = value
return updated_params
def normalize_url(self, url) -> str:
"""Compose the URL whether it is a path or an already-complete URL"""
return normalize_url(self.url, url)
def postprocess(self, response):
"""
Perform supplemental actions immediately after receiving a response.
This method is called once per request not including retries, and can be
extended in child classes.
"""
pass
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Append special additional per-request headers.
:param method:
The HTTP method, in upper case.
:param user_headers:
Headers that can be specified to override default values.
:returns:
The final list of headers to use in the request
"""
headers = deepcopy(self.headers)
if user_headers:
headers.update(user_headers)
return headers
@property
def print_debug(self) -> bool:
"""
Printing debug flag
If set to True, the logging level of :attr:`log` is set to
``logging.DEBUG`` and all log messages are emitted to ``sys.stderr``.
If set to False, the logging level of :attr:`log` is set to
``logging.NOTSET`` and the debugging log handler that prints messages to
``sys.stderr`` is removed. This value thus can be toggled to enable and
disable verbose command line output.
It is ``False`` by default and it is recommended to keep it that way in
production settings.
"""
return self._debug
@print_debug.setter
def print_debug(self, debug: bool):
self._debug = debug
if debug and not hasattr(self, '_debugHandler'):
self.log.setLevel(logging.DEBUG)
self._debugHandler = logging.StreamHandler()
self.log.addHandler(self._debugHandler)
elif not debug and hasattr(self, '_debugHandler'):
self.log.setLevel(logging.NOTSET)
self.log.removeHandler(self._debugHandler)
delattr(self, '_debugHandler')
# else: no-op; only happens if debug is set to the same value twice
def request(self, method, url, **kwargs) -> Response:
"""
Make a generic PagerDuty API request.
:param method:
The request method to use. Case-insensitive. May be one of get, put,
post or delete.
:param url:
The path/URL to request. If it does not start with the base URL, the
base URL will be prepended.
:param **kwargs:
Custom keyword arguments to pass to ``requests.Session.request``.
:type method: str
:type url: str
:returns:
The `requests.Response`_ object corresponding to the HTTP response
"""
sleep_timer = self.sleep_timer
network_attempts = 0
http_attempts = {}
method = method.strip().upper()
if method not in self.permitted_methods:
m_str = ', '.join(self.permitted_methods)
raise PDClientError(f"Method {method} not supported by this API. " \
f"Permitted methods: {m_str}")
req_kw = deepcopy(kwargs)
full_url = self.normalize_url(url)
endpoint = "%s %s"%(method.upper(), full_url)
# Add in any headers specified in keyword arguments:
headers = kwargs.get('headers', {})
req_kw.update({
'headers': self.prepare_headers(method, user_headers=headers),
'stream': False,
'timeout': self.timeout
})
# Special changes to user-supplied parameters, for convenience
if 'params' in kwargs and kwargs['params']:
req_kw['params'] = self.normalize_params(kwargs['params'])
# Make the request (and repeat w/cooldown if the rate limit is reached):
while True:
try:
response = self.parent.request(method, full_url, **req_kw)
self.postprocess(response)
except (HTTPError, PoolError, RequestException) as e:
network_attempts += 1
if network_attempts > self.max_network_attempts:
error_msg = f"{endpoint}: Non-transient network " \
'error; exceeded maximum number of attempts ' \
f"({self.max_network_attempts}) to connect to the API."
raise PDClientError(error_msg) from e
sleep_timer *= self.cooldown_factor()
self.log.warning(
"%s: HTTP or network error: %s. retrying in %g seconds.",
endpoint, e.__class__.__name__, sleep_timer)
time.sleep(sleep_timer)
continue
status = response.status_code
retry_logic = self.retry.get(status, 0)
if not response.ok and retry_logic != 0:
# Take special action as defined by the retry logic
if retry_logic != -1:
# Retry a specific number of times (-1 implies infinite)
if http_attempts.get(status, 0)>=retry_logic or \
sum(http_attempts.values())>self.max_http_attempts:
lower_limit = retry_logic
if lower_limit > self.max_http_attempts:
lower_limit = self.max_http_attempts
self.log.error(
f"%s: Non-transient HTTP error: exceeded " \
'maximum number of attempts (%d) to make a ' \
'successful request. Currently encountering ' \
'status %d.', endpoint, lower_limit, status)
return response
http_attempts[status] = 1 + http_attempts.get(status, 0)
sleep_timer *= self.cooldown_factor()
self.log.warning("%s: HTTP error (%d); retrying in %g seconds.",
endpoint, status, sleep_timer)
time.sleep(sleep_timer)
continue
elif status == 429:
sleep_timer *= self.cooldown_factor()
self.log.debug("%s: Hit API rate limit (status 429); " \
"retrying in %g seconds", endpoint, sleep_timer)
time.sleep(sleep_timer)
continue
elif status == 401:
# Stop. Authentication failed. We shouldn't try doing any more,
# because we'll run into the same problem later anyway.
raise PDHTTPError(
"Received 401 Unauthorized response from the API. The key "
"(...%s) may be invalid or deactivated."%self.trunc_key,
response)
else:
# All went according to plan.
return response
@property
def stagger_cooldown(self) -> float:
"""
Randomizing factor for wait times between retries during rate limiting.
If set to number greater than 0, the sleep time for rate limiting will
(for each successive sleep) be adjusted by a factor of one plus a
uniformly-distributed random number between 0 and 1 times this number,
on top of the base sleep timer :attr:`sleep_timer_base`.
For example:
* If this is 1, and :attr:`sleep_timer_base` is 2 (default), then after
each status 429 response, the sleep time will change overall by a
random factor between 2 and 4, whereas if it is zero, it will change
by a factor of 2.
* If :attr:`sleep_timer_base` is 1, then the cooldown time will be
adjusted by a random factor between one and one plus this number.
If the number is set to zero, then this behavior is effectively
disabled, and the cooldown factor (by which the sleep time is adjusted)
will just be :attr:`sleep_timer_base`
Setting this to a nonzero number helps avoid the "thundering herd"
effect that can potentially be caused by many API clients making
simultaneous concurrent API requests and consequently waiting for the
same amount of time before retrying. It is currently zero by default
for consistent behavior with previous versions.
"""
if hasattr(self, '_stagger_cooldown'):
return self._stagger_cooldown
else:
return 0
@stagger_cooldown.setter
def stagger_cooldown(self, val):
if type(val) not in [float, int] or val<0:
raise ValueError("Cooldown randomization factor stagger_cooldown "
"must be a positive real number")
self._stagger_cooldown = val
@property
def trunc_key(self) -> str:
"""Truncated key for secure display/identification purposes."""
return last_4(self.api_key)
@property
def user_agent(self) -> str:
return 'pdpyras/%s python-requests/%s Python/%d.%d'%(
__version__,
REQUESTS_VERSION,
sys.version_info.major,
sys.version_info.minor
)
|
(api_key: str, debug=False)
|
64,731 |
pdpyras
|
__init__
| null |
def __init__(self, api_key: str, debug=False):
self.parent = super(PDSession, self)
self.parent.__init__()
self.api_key = api_key
self.log = logging.getLogger(__name__)
self.print_debug = debug
self.retry = {}
|
(self, api_key: str, debug=False)
|
64,749 |
pdpyras
|
prepare_headers
|
Append special additional per-request headers.
:param method:
The HTTP method, in upper case.
:param user_headers:
Headers that can be specified to override default values.
:returns:
The final list of headers to use in the request
|
def prepare_headers(self, method, user_headers={}) -> dict:
"""
Append special additional per-request headers.
:param method:
The HTTP method, in upper case.
:param user_headers:
Headers that can be specified to override default values.
:returns:
The final list of headers to use in the request
"""
headers = deepcopy(self.headers)
if user_headers:
headers.update(user_headers)
return headers
|
(self, method, user_headers={}) -> dict
|
64,759 |
urllib3.exceptions
|
PoolError
|
Base exception for errors caused within a pool.
|
class PoolError(HTTPError):
"""Base exception for errors caused within a pool."""
def __init__(self, pool: ConnectionPool, message: str) -> None:
self.pool = pool
super().__init__(f"{pool}: {message}")
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, None)
|
(pool: 'ConnectionPool', message: 'str') -> 'None'
|
64,760 |
urllib3.exceptions
|
__init__
| null |
def __init__(self, pool: ConnectionPool, message: str) -> None:
self.pool = pool
super().__init__(f"{pool}: {message}")
|
(self, pool: 'ConnectionPool', message: 'str') -> 'None'
|
64,761 |
urllib3.exceptions
|
__reduce__
| null |
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, None)
|
(self) -> Tuple[Callable[..., object], Tuple[object, ...]]
|
64,764 |
requests.models
|
Response
|
The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
|
class Response:
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
"_content",
"status_code",
"headers",
"url",
"history",
"encoding",
"reason",
"cookies",
"elapsed",
"request",
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, "_content_consumed", True)
setattr(self, "raw", None)
def __repr__(self):
return f"<Response [{self.status_code}]>"
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return "location" in self.headers and self.status_code in REDIRECT_STATI
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return "location" in self.headers and self.status_code in (
codes.moved_permanently,
codes.permanent_redirect,
)
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
return chardet.detect(self.content)["encoding"]
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, "stream"):
try:
yield from self.raw.stream(chunk_size, decode_content=True)
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except SSLError as e:
raise RequestsSSLError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError(
f"chunk_size must be an int, it is instead a {type(chunk_size)}."
)
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(
self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(
chunk_size=chunk_size, decode_unicode=decode_unicode
):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
yield from lines
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError("The content for this response was already consumed")
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``charset_normalizer`` or ``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return ""
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors="replace")
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors="replace")
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises requests.exceptions.JSONDecodeError: If the response body does not
contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using charset_normalizer to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
except JSONDecodeError as e:
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
try:
return complexjson.loads(self.text, **kwargs)
except JSONDecodeError as e:
# Catch JSON-related errors and raise as requests.JSONDecodeError
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get("link")
resolved_links = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get("rel") or link.get("url")
resolved_links[key] = link
return resolved_links
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ""
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode("utf-8")
except UnicodeDecodeError:
reason = self.reason.decode("iso-8859-1")
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = (
f"{self.status_code} Client Error: {reason} for url: {self.url}"
)
elif 500 <= self.status_code < 600:
http_error_msg = (
f"{self.status_code} Server Error: {reason} for url: {self.url}"
)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, "release_conn", None)
if release_conn is not None:
release_conn()
|
()
|
64,769 |
requests.models
|
__init__
| null |
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
|
(self)
|
64,777 |
requests.models
|
json
|
Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises requests.exceptions.JSONDecodeError: If the response body does not
contain valid json.
|
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises requests.exceptions.JSONDecodeError: If the response body does not
contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using charset_normalizer to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
except JSONDecodeError as e:
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
try:
return complexjson.loads(self.text, **kwargs)
except JSONDecodeError as e:
# Catch JSON-related errors and raise as requests.JSONDecodeError
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
|
(self, **kwargs)
|
64,805 |
pdpyras
|
URLError
|
Exception class for unsupported URLs or malformed input.
|
class URLError(Exception):
"""
Exception class for unsupported URLs or malformed input.
"""
pass
| null |
64,806 |
pdpyras
|
auto_json
|
Makes methods return the full response body object after decoding from JSON.
Intended for use on functions that take a URL positional argument followed
by keyword arguments and return a `requests.Response`_ object.
|
def auto_json(method):
"""
Makes methods return the full response body object after decoding from JSON.
Intended for use on functions that take a URL positional argument followed
by keyword arguments and return a `requests.Response`_ object.
"""
doc = method.__doc__
def call(self, url, **kw):
return try_decoding(successful_response(method(self, url, **kw)))
call.__doc__ = doc
return call
|
(method)
|
64,807 |
pdpyras
|
canonical_path
|
The canonical path from the API documentation corresponding to a URL
This is used to identify and classify URLs according to which particular API
within REST API v2 it belongs to.
Explicitly supported canonical paths are defined in the list
:attr:`CANONICAL_PATHS` and are the path part of any given API's URL. The
path for a given API is what is shown at the top of its reference page, i.e.
``/users/{id}/contact_methods`` for retrieving a user's contact methods
(GET) or creating a new one (POST).
:param base_url: The base URL of the API
:param url: A non-normalized URL (a path or full URL)
:returns:
The canonical REST API v2 path corresponding to a URL.
|
def canonical_path(base_url: str, url: str) -> str:
"""
The canonical path from the API documentation corresponding to a URL
This is used to identify and classify URLs according to which particular API
within REST API v2 it belongs to.
Explicitly supported canonical paths are defined in the list
:attr:`CANONICAL_PATHS` and are the path part of any given API's URL. The
path for a given API is what is shown at the top of its reference page, i.e.
``/users/{id}/contact_methods`` for retrieving a user's contact methods
(GET) or creating a new one (POST).
:param base_url: The base URL of the API
:param url: A non-normalized URL (a path or full URL)
:returns:
The canonical REST API v2 path corresponding to a URL.
"""
full_url = normalize_url(base_url, url)
# Starting with / after hostname before the query string:
url_path = full_url.replace(base_url.rstrip('/'), '').split('?')[0]
# Root node (blank) counts so we include it:
n_nodes = url_path.count('/')
# First winnow the list down to paths with the same number of nodes:
patterns = list(filter(
lambda p: p.count('/') == n_nodes,
CANONICAL_PATHS
))
# Match against each node, skipping index zero because the root node always
# matches, and using the adjusted index "j":
for i, node in enumerate(url_path.split('/')[1:]):
j = i+1
patterns = list(filter(
lambda p: p.split('/')[j] == node or is_path_param(p.split('/')[j]),
patterns
))
# Don't break early if len(patterns) == 1, but require an exact match...
if len(patterns) == 0:
raise URLError(f"URL {url} does not match any canonical API path " \
'supported by this client.')
elif len(patterns) > 1:
# If there's multiple matches but one matches exactly, return that.
if url_path in patterns:
return url_path
# ...otherwise this is ambiguous.
raise Exception(f"Ambiguous URL {url} matches more than one " \
"canonical path pattern: "+', '.join(patterns)+'; this is likely ' \
'a bug.')
else:
return patterns[0]
|
(base_url: str, url: str) -> str
|
64,810 |
pdpyras
|
deprecated_kwarg
|
Raises a warning if a deprecated keyword argument is used.
:param deprecated_name: The name of the deprecated function
:param details: An optional message to append to the deprecation message
|
def deprecated_kwarg(deprecated_name: str, details=None):
"""
Raises a warning if a deprecated keyword argument is used.
:param deprecated_name: The name of the deprecated function
:param details: An optional message to append to the deprecation message
"""
details_msg = ''
if details is not None:
details_msg = f" {details}"
warn(f"Keyword argument \"{deprecated_name}\" is deprecated.{details_msg}")
|
(deprecated_name: str, details=None)
|
64,811 |
pdpyras
|
endpoint_matches
|
Whether an endpoint (method and canonical path) matches a given pattern
This is the filtering logic used for finding the appropriate entry in
:attr:`ENTITY_WRAPPER_CONFIG` to use for a given method and API path.
:param endpoint_pattern:
The endpoint pattern in the form ``METHOD PATH`` where ``METHOD`` is the
HTTP method in uppercase or ``*`` to match all methods, and ``PATH`` is
a canonical API path.
:param method:
The HTTP method.
:param path:
The canonical API path (i.e. as returned by :func:`canonical_path`)
:returns:
True or False based on whether the pattern matches the endpoint
|
def endpoint_matches(endpoint_pattern: str, method: str, path: str) -> bool:
"""
Whether an endpoint (method and canonical path) matches a given pattern
This is the filtering logic used for finding the appropriate entry in
:attr:`ENTITY_WRAPPER_CONFIG` to use for a given method and API path.
:param endpoint_pattern:
The endpoint pattern in the form ``METHOD PATH`` where ``METHOD`` is the
HTTP method in uppercase or ``*`` to match all methods, and ``PATH`` is
a canonical API path.
:param method:
The HTTP method.
:param path:
The canonical API path (i.e. as returned by :func:`canonical_path`)
:returns:
True or False based on whether the pattern matches the endpoint
"""
return (
endpoint_pattern.startswith(method.upper()) \
or endpoint_pattern.startswith('*')
) and endpoint_pattern.endswith(f" {path}")
|
(endpoint_pattern: str, method: str, path: str) -> bool
|
64,812 |
pdpyras
|
entity_wrappers
|
Obtains entity wrapping information for a given endpoint (path and method)
:param method: The HTTP method
:param path: A canonical API path i.e. as returned by ``canonical_path``
:returns:
A 2-tuple. The first element is the wrapper name that should be used for
the request body, and the second is the wrapper name to be used for the
response body. For either elements, if ``None`` is returned, that
signals to disable wrapping and pass the user-supplied request body or
API response body object unmodified.
|
def entity_wrappers(method: str, path: str) -> tuple:
"""
Obtains entity wrapping information for a given endpoint (path and method)
:param method: The HTTP method
:param path: A canonical API path i.e. as returned by ``canonical_path``
:returns:
A 2-tuple. The first element is the wrapper name that should be used for
the request body, and the second is the wrapper name to be used for the
response body. For either elements, if ``None`` is returned, that
signals to disable wrapping and pass the user-supplied request body or
API response body object unmodified.
"""
m = method.upper()
endpoint = "%s %s"%(m, path)
match = list(filter(
lambda k: endpoint_matches(k, m, path),
ENTITY_WRAPPER_CONFIG.keys()
))
if len(match) == 1:
# Look up entity wrapping info from the global dictionary and validate:
wrapper = ENTITY_WRAPPER_CONFIG[match[0]]
invalid_config_error = 'Invalid entity wrapping configuration for ' \
f"{endpoint}: {wrapper}; this is most likely a bug."
if wrapper is not None and type(wrapper) not in (tuple, str):
raise Exception(invalid_config_error)
elif wrapper is None or type(wrapper) is str:
# Both request and response have the same wrapping at this endpoint.
return (wrapper, wrapper)
elif type(wrapper) is tuple and len(wrapper) == 2:
# Endpoint uses different wrapping for request and response bodies.
#
# Both elements must be either str or None. The first element is the
# request body wrapper and the second is the response body wrapper.
# If a value is None, that indicates that the request or response
# value should be encoded and decoded as-is without modifications.
if False in [w is None or type(w) is str for w in wrapper]:
raise Exception(invalid_config_error)
return wrapper
elif len(match) == 0:
# Nothing in entity wrapper config matches. In this case it is assumed
# that the endpoint follows classic API patterns and the wrapper name
# can be inferred from the URL and request method:
wrapper = infer_entity_wrapper(method, path)
return (wrapper, wrapper)
else:
matches_str = ', '.join(match)
raise Exception(f"{endpoint} matches more than one pattern:" + \
f"{matches_str}; this is most likely a bug in pdpyras.")
|
(method: str, path: str) -> tuple
|
64,813 |
pdpyras
|
http_error_message
|
Formats a message describing a HTTP error.
:param r:
The response object.
:param context:
A description of when the error was received, or None to not include it
:returns:
The message to include in the HTTP error
|
def http_error_message(r: Response, context=None) -> str:
"""
Formats a message describing a HTTP error.
:param r:
The response object.
:param context:
A description of when the error was received, or None to not include it
:returns:
The message to include in the HTTP error
"""
received_http_response = bool(r.status_code)
endpoint = "%s %s"%(r.request.method.upper(), r.request.url)
context_msg = ""
if type(context) is str:
context_msg=f" in {context}"
if received_http_response and not r.ok:
err_type = 'unknown'
if r.status_code / 100 == 4:
err_type = 'client'
elif r.status_code / 100 == 5:
err_type = 'server'
tr_bod = truncate_text(r.text)
return f"{endpoint}: API responded with {err_type} error (status " \
f"{r.status_code}){context_msg}: {tr_bod}"
elif not received_http_response:
return f"{endpoint}: Network or other unknown error{context_msg}"
else:
return f"{endpoint}: Success (status {r.status_code}) but an " \
f"expectation still failed{context_msg}"
|
(r: requests.models.Response, context=None) -> str
|
64,814 |
pdpyras
|
infer_entity_wrapper
|
Infer the entity wrapper name from the endpoint using orthodox patterns.
This is based on patterns that are broadly applicable but not universal in
the v2 REST API, where the wrapper name is predictable from the path and
method. This is the default logic applied to determine the wrapper name
based on the path if there is no explicit entity wrapping defined for the
given path in :attr:`ENTITY_WRAPPER_CONFIG`.
:param method: The HTTP method
:param path: A canonical API path i.e. as returned by ``canonical_path``
|
def infer_entity_wrapper(method: str, path: str) -> str:
"""
Infer the entity wrapper name from the endpoint using orthodox patterns.
This is based on patterns that are broadly applicable but not universal in
the v2 REST API, where the wrapper name is predictable from the path and
method. This is the default logic applied to determine the wrapper name
based on the path if there is no explicit entity wrapping defined for the
given path in :attr:`ENTITY_WRAPPER_CONFIG`.
:param method: The HTTP method
:param path: A canonical API path i.e. as returned by ``canonical_path``
"""
m = method.upper()
path_nodes = path.split('/')
if is_path_param(path_nodes[-1]):
# Singular if it's an individual resource's URL for read/update/delete
# (named similarly to the second to last node, as the last is its ID and
# the second to last denotes the API resource collection it is part of):
return singular_name(path_nodes[-2])
elif m == 'POST':
# Singular if creating a new resource by POSTing to the index containing
# similar resources (named simiarly to the last path node):
return singular_name(path_nodes[-1])
else:
# Plural if listing via GET to the index endpoint, or doing a multi-put:
return path_nodes[-1]
|
(method: str, path: str) -> str
|
64,815 |
pdpyras
|
is_path_param
|
Whether a part of a canonical path represents a variable parameter
:param path_node: The node (value between slashes) in the path
:returns:
True if the node is an arbitrary variable, False if it is a fixed value
|
def is_path_param(path_node: str) -> bool:
"""
Whether a part of a canonical path represents a variable parameter
:param path_node: The node (value between slashes) in the path
:returns:
True if the node is an arbitrary variable, False if it is a fixed value
"""
return path_node.startswith('{') and path_node.endswith('}')
|
(path_node: str) -> bool
|
64,816 |
pdpyras
|
last_4
|
Truncate a sensitive value to its last 4 characters
:param secret: text to truncate
:returns:
The truncated text
|
def last_4(secret: str) -> str:
"""
Truncate a sensitive value to its last 4 characters
:param secret: text to truncate
:returns:
The truncated text
"""
return '*'+str(secret)[-4:]
|
(secret: str) -> str
|
64,818 |
pdpyras
|
normalize_url
|
Normalize a URL to a complete API URL.
The ``url`` argument may be a path relative to the base URL or a full URL.
:param url: The URL to normalize
:param baseurl:
The base API URL, excluding any trailing slash, i.e.
"https://api.pagerduty.com"
:returns: The full API endpoint URL
|
def normalize_url(base_url: str, url: str) -> str:
"""
Normalize a URL to a complete API URL.
The ``url`` argument may be a path relative to the base URL or a full URL.
:param url: The URL to normalize
:param baseurl:
The base API URL, excluding any trailing slash, i.e.
"https://api.pagerduty.com"
:returns: The full API endpoint URL
"""
if url.startswith(base_url):
return url
elif not (url.startswith('http://') or url.startswith('https://')):
return base_url.rstrip('/') + "/" + url.lstrip('/')
else:
raise URLError(
f"URL {url} does not start with the API base URL {base_url}"
)
|
(base_url: str, url: str) -> str
|
64,819 |
pdpyras
|
plural_name
|
Pluralizes a name, i.e. the API name from the ``type`` property
:param obj_type:
The object type, i.e. ``user`` or ``user_reference``
:returns:
The name of the resource, i.e. the last part of the URL for the
resource's index URL
|
def plural_name(obj_type: str) -> str:
"""
Pluralizes a name, i.e. the API name from the ``type`` property
:param obj_type:
The object type, i.e. ``user`` or ``user_reference``
:returns:
The name of the resource, i.e. the last part of the URL for the
resource's index URL
"""
if obj_type.endswith('_reference'):
# Strip down to basic type if it's a reference
obj_type = obj_type[:obj_type.index('_reference')]
if obj_type.endswith('y'):
# Because English
return obj_type[:-1]+'ies'
else:
return obj_type+'s'
|
(obj_type: str) -> str
|
64,820 |
pdpyras
|
requires_success
|
Decorator that validates HTTP responses.
|
def requires_success(method):
"""
Decorator that validates HTTP responses.
"""
doc = method.__doc__
def call(self, url, **kw):
return successful_response(method(self, url, **kw))
call.__doc__ = doc
return call
|
(method)
|
64,821 |
pdpyras
|
resource_url
|
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
|
def resource_url(method):
"""
API call decorator that allows passing a resource dict as the path/URL
Most resources returned by the API will contain a ``self`` attribute that is
the URL of the resource itself.
Using this decorator allows the implementer to pass either a URL/path or
such a resource dictionary as the ``path`` argument, thus eliminating the
need to re-construct the resource URL or hold it in a temporary variable.
"""
doc = method.__doc__
def call(self, resource, **kw):
url = resource
if type(resource) is dict and 'self' in resource: # passing an object
url = resource['self']
elif type(resource) is not str:
name = method.__name__
raise URLError(f"Value passed to {name} is not a str or dict with "
"key 'self'")
return method(self, url, **kw)
call.__doc__ = doc
return call
|
(method)
|
64,822 |
pdpyras
|
singular_name
|
Singularizes a name, i.e. for the entity wrapper in a POST request
:para r_name:
The "resource" name, i.e. "escalation_policies", a plural noun that
forms the part of the canonical path identifying what kind of resource
lives in the collection there, for an API that follows classic wrapped
entity naming patterns.
:returns:
The singularized name
|
def singular_name(r_name: str) -> str:
"""
Singularizes a name, i.e. for the entity wrapper in a POST request
:para r_name:
The "resource" name, i.e. "escalation_policies", a plural noun that
forms the part of the canonical path identifying what kind of resource
lives in the collection there, for an API that follows classic wrapped
entity naming patterns.
:returns:
The singularized name
"""
if r_name.endswith('ies'):
# Because English
return r_name[:-3]+'y'
else:
return r_name.rstrip('s')
|
(r_name: str) -> str
|
64,823 |
pdpyras
|
successful_response
|
Validates the response as successful.
Returns the response if it was successful; otherwise, raises an exception.
:param r:
Response object corresponding to the response received.
:param context:
A description of when the HTTP request is happening, for error reporting
:returns:
The response object, if it was successful
|
def successful_response(r: Response, context=None) -> Response:
"""Validates the response as successful.
Returns the response if it was successful; otherwise, raises an exception.
:param r:
Response object corresponding to the response received.
:param context:
A description of when the HTTP request is happening, for error reporting
:returns:
The response object, if it was successful
"""
if r.ok and bool(r.status_code):
return r
elif r.status_code / 100 == 5:
raise PDServerError(http_error_message(r, context=context), r)
elif bool(r.status_code):
raise PDHTTPError(http_error_message(r, context=context), r)
else:
raise PDClientError(http_error_message(r, context=context))
|
(r: requests.models.Response, context=None) -> requests.models.Response
|
64,826 |
pdpyras
|
truncate_text
|
Truncates a string longer than :attr:`TEXT_LEN_LIMIT`
:param text: The string to truncate if longer than the limit.
|
def truncate_text(text: str) -> str:
"""Truncates a string longer than :attr:`TEXT_LEN_LIMIT`
:param text: The string to truncate if longer than the limit.
"""
if len(text) > TEXT_LEN_LIMIT:
return text[:TEXT_LEN_LIMIT-1]+'...'
else:
return text
|
(text: str) -> str
|
64,827 |
pdpyras
|
try_decoding
|
JSON-decode a response body
Returns the decoded body if successful; raises :class:`PDServerError`
otherwise.
:param r:
The response object
|
def try_decoding(r: Response) -> Union[dict, list, str]:
"""
JSON-decode a response body
Returns the decoded body if successful; raises :class:`PDServerError`
otherwise.
:param r:
The response object
"""
try:
return r.json()
except ValueError as e:
raise PDServerError(
"API responded with invalid JSON: " + truncate_text(r.text),
r,
)
|
(r: requests.models.Response) -> Union[dict, list, str]
|
64,828 |
pdpyras
|
unwrap
|
Unwraps a wrapped entity.
:param response: The response object
:param wrapper: The entity wrapper
:type wrapper: str or None
:returns:
The value associated with the wrapper key in the JSON-decoded body of
the response, which is expected to be a dictionary (map).
|
def unwrap(response: Response, wrapper) -> Union[dict, list]:
"""
Unwraps a wrapped entity.
:param response: The response object
:param wrapper: The entity wrapper
:type wrapper: str or None
:returns:
The value associated with the wrapper key in the JSON-decoded body of
the response, which is expected to be a dictionary (map).
"""
body = try_decoding(response)
endpoint = "%s %s"%(response.request.method.upper(), response.request.url)
if wrapper is not None:
# There is a wrapped entity to unpack:
bod_type = type(body)
error_msg = f"Expected response body from {endpoint} after JSON-" \
f"decoding to be a dictionary with a key \"{wrapper}\", but "
if bod_type is dict:
if wrapper in body:
return body[wrapper]
else:
keys = truncate_text(', '.join(body.keys()))
raise PDServerError(
error_msg + f"its keys are: {keys}",
response
)
else:
raise PDServerError(
error_msg + f"its type is {bod_type}.",
response
)
else:
# Wrapping is disabled for responses:
return body
|
(response: requests.models.Response, wrapper) -> Union[dict, list]
|
64,829 |
pdpyras
|
wrapped_entities
|
Automatically wrap request entities and unwrap response entities.
Used for methods :attr:`APISession.rget`, :attr:`APISession.rpost` and
:attr:`APISession.rput`. It makes them always return an object representing
the resource entity in the response (whether wrapped in a root-level
property or not) rather than the full response body. When making a post /
put request, and passing the ``json`` keyword argument to specify the
content to be JSON-encoded as the body, that keyword argument can be either
the to-be-wrapped content or the full body including the entity wrapper, and
the ``json`` keyword argument will be normalized to include the wrapper.
Methods using this decorator will raise a :class:`PDHTTPError` with its
``response`` property being being the `requests.Response`_ object in the
case of any error (as of version 4.2 this is subclassed as
:class:`PDHTTPError`), so that the implementer can access it by catching the
exception, and thus design their own custom logic around different types of
error responses.
:param method: Method being decorated. Must take one positional argument
after ``self`` that is the URL/path to the resource, followed by keyword
any number of keyword arguments, and must return an object of class
`requests.Response`_, and be named after the HTTP method but with "r"
prepended.
:returns: A callable object; the reformed method
|
def wrapped_entities(method):
"""
Automatically wrap request entities and unwrap response entities.
Used for methods :attr:`APISession.rget`, :attr:`APISession.rpost` and
:attr:`APISession.rput`. It makes them always return an object representing
the resource entity in the response (whether wrapped in a root-level
property or not) rather than the full response body. When making a post /
put request, and passing the ``json`` keyword argument to specify the
content to be JSON-encoded as the body, that keyword argument can be either
the to-be-wrapped content or the full body including the entity wrapper, and
the ``json`` keyword argument will be normalized to include the wrapper.
Methods using this decorator will raise a :class:`PDHTTPError` with its
``response`` property being being the `requests.Response`_ object in the
case of any error (as of version 4.2 this is subclassed as
:class:`PDHTTPError`), so that the implementer can access it by catching the
exception, and thus design their own custom logic around different types of
error responses.
:param method: Method being decorated. Must take one positional argument
after ``self`` that is the URL/path to the resource, followed by keyword
any number of keyword arguments, and must return an object of class
`requests.Response`_, and be named after the HTTP method but with "r"
prepended.
:returns: A callable object; the reformed method
"""
http_method = method.__name__.lstrip('r')
doc = method.__doc__
def call(self, url, **kw):
pass_kw = deepcopy(kw) # Make a copy for modification
path = canonical_path(self.url, url)
endpoint = "%s %s"%(http_method.upper(), path)
req_w, res_w = entity_wrappers(http_method, path)
# Validate the abbreviated (or full) request payload, and automatically
# wrap the request entity for the implementer if necessary:
if req_w is not None and http_method in ('post', 'put') \
and 'json' in pass_kw and req_w not in pass_kw['json']:
pass_kw['json'] = {req_w: pass_kw['json']}
# Make the request:
r = successful_response(method(self, url, **pass_kw))
# Unpack the response:
return unwrap(r, res_w)
call.__doc__ = doc
return call
|
(method)
|
64,830 |
canopen.profiles.p402
|
BaseNode402
|
A CANopen CiA 402 profile slave node.
:param int node_id:
Node ID (set to None or 0 if specified by object dictionary)
:param object_dictionary:
Object dictionary as either a path to a file, an ``ObjectDictionary``
or a file like object.
:type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`
|
class BaseNode402(RemoteNode):
"""A CANopen CiA 402 profile slave node.
:param int node_id:
Node ID (set to None or 0 if specified by object dictionary)
:param object_dictionary:
Object dictionary as either a path to a file, an ``ObjectDictionary``
or a file like object.
:type object_dictionary: :class:`str`, :class:`canopen.ObjectDictionary`
"""
TIMEOUT_RESET_FAULT = 0.4 # seconds
TIMEOUT_SWITCH_OP_MODE = 0.5 # seconds
TIMEOUT_SWITCH_STATE_FINAL = 0.8 # seconds
TIMEOUT_SWITCH_STATE_SINGLE = 0.4 # seconds
TIMEOUT_CHECK_TPDO = 0.2 # seconds
TIMEOUT_HOMING_DEFAULT = 30 # seconds
def __init__(self, node_id, object_dictionary):
super(BaseNode402, self).__init__(node_id, object_dictionary)
self.tpdo_values = {} # { index: value from last received TPDO }
self.tpdo_pointers = {} # { index: pdo.Map instance }
self.rpdo_pointers = {} # { index: pdo.Map instance }
def setup_402_state_machine(self, read_pdos=True):
"""Configure the state machine by searching for a TPDO that has the StatusWord mapped.
:param bool read_pdos: Upload current PDO configuration from node.
:raises ValueError:
If the the node can't find a Statusword configured in any of the TPDOs.
"""
self.setup_pdos(read_pdos)
self._check_controlword_configured()
self._check_statusword_configured()
self._check_op_mode_configured()
def setup_pdos(self, upload=True):
"""Find the relevant PDO configuration to handle the state machine.
:param bool upload:
Retrieve up-to-date configuration via SDO. If False, the node's mappings must
already be configured in the object, matching the drive's settings.
:raises AssertionError:
When the node's NMT state disallows SDOs for reading the PDO configuration.
"""
if upload:
assert self.nmt.state in 'PRE-OPERATIONAL', 'OPERATIONAL'
self.pdo.read() # TPDO and RPDO configurations
else:
self.pdo.subscribe() # Get notified on reception, usually a side-effect of read()
self._init_tpdo_values()
self._init_rpdo_pointers()
def _init_tpdo_values(self):
for tpdo in self.tpdo.values():
if tpdo.enabled:
tpdo.add_callback(self.on_TPDOs_update_callback)
for obj in tpdo:
logger.debug('Configured TPDO: {0}'.format(obj.index))
if obj.index not in self.tpdo_values:
self.tpdo_values[obj.index] = 0
self.tpdo_pointers[obj.index] = obj
def _init_rpdo_pointers(self):
# If RPDOs have overlapping indecies, rpdo_pointers will point to
# the first RPDO that has that index configured.
for rpdo in self.rpdo.values():
if rpdo.enabled:
for obj in rpdo:
logger.debug('Configured RPDO: {0}'.format(obj.index))
if obj.index not in self.rpdo_pointers:
self.rpdo_pointers[obj.index] = obj
def _check_controlword_configured(self):
if 0x6040 not in self.rpdo_pointers: # Controlword
logger.warning(
"Controlword not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
def _check_statusword_configured(self):
if 0x6041 not in self.tpdo_values: # Statusword
logger.warning(
"Statusword not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
def _check_op_mode_configured(self):
if 0x6060 not in self.rpdo_pointers: # Operation Mode
logger.warning(
"Operation Mode not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
if 0x6061 not in self.tpdo_values: # Operation Mode Display
logger.warning(
"Operation Mode Display not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
def reset_from_fault(self):
"""Reset node from fault and set it to Operation Enable state."""
if self.state == 'FAULT':
# Resets the Fault Reset bit (rising edge 0 -> 1)
self.controlword = State402.CW_DISABLE_VOLTAGE
# FIXME! The rising edge happens with the transitions toward OPERATION
# ENABLED below, but until then the loop will always reach the timeout!
timeout = time.monotonic() + self.TIMEOUT_RESET_FAULT
while self.is_faulted():
if time.monotonic() > timeout:
break
self.check_statusword()
self.state = 'OPERATION ENABLED'
def is_faulted(self):
bitmask, bits = State402.SW_MASK['FAULT']
return self.statusword & bitmask == bits
def _homing_status(self):
"""Interpret the current Statusword bits as homing state string."""
# Wait to make sure a TPDO was received
self.check_statusword()
status = None
for key, value in Homing.STATES.items():
bitmask, bits = value
if self.statusword & bitmask == bits:
status = key
return status
def is_homed(self, restore_op_mode=False):
"""Switch to homing mode and determine its status.
:param bool restore_op_mode: Switch back to the previous operation mode when done.
:return: If the status indicates successful homing.
:rtype: bool
"""
previous_op_mode = self.op_mode
if previous_op_mode != 'HOMING':
logger.info('Switch to HOMING from %s', previous_op_mode)
self.op_mode = 'HOMING' # blocks until confirmed
homingstatus = self._homing_status()
if restore_op_mode:
self.op_mode = previous_op_mode
return homingstatus in ('TARGET REACHED', 'ATTAINED')
def homing(self, timeout=None, restore_op_mode=False):
"""Execute the configured Homing method on the node.
:param int timeout: Timeout value (default: 30, zero to disable).
:param bool restore_op_mode:
Switch back to the previous operation mode after homing (default: no).
:return: If the homing was complete with success.
:rtype: bool
"""
if timeout is None:
timeout = self.TIMEOUT_HOMING_DEFAULT
if restore_op_mode:
previous_op_mode = self.op_mode
self.op_mode = 'HOMING'
# The homing process will initialize at operation enabled
self.state = 'OPERATION ENABLED'
homingstatus = 'UNKNOWN'
self.controlword = State402.CW_OPERATION_ENABLED | Homing.CW_START # does not block
# Wait for one extra cycle, to make sure the controlword was received
self.check_statusword()
t = time.monotonic() + timeout
try:
while homingstatus not in ('TARGET REACHED', 'ATTAINED'):
homingstatus = self._homing_status()
if homingstatus in ('INTERRUPTED', 'ERROR VELOCITY IS NOT ZERO',
'ERROR VELOCITY IS ZERO'):
raise RuntimeError('Unable to home. Reason: {0}'.format(homingstatus))
if timeout and time.monotonic() > t:
raise RuntimeError('Unable to home, timeout reached')
logger.info('Homing mode carried out successfully.')
return True
except RuntimeError as e:
logger.info(str(e))
finally:
if restore_op_mode:
self.op_mode = previous_op_mode
return False
@property
def op_mode(self):
"""The node's Operation Mode stored in the object 0x6061.
Uses SDO or PDO to access the current value. The modes are passed as one of the
following strings:
- 'NO MODE'
- 'PROFILED POSITION'
- 'VELOCITY'
- 'PROFILED VELOCITY'
- 'PROFILED TORQUE'
- 'HOMING'
- 'INTERPOLATED POSITION'
- 'CYCLIC SYNCHRONOUS POSITION'
- 'CYCLIC SYNCHRONOUS VELOCITY'
- 'CYCLIC SYNCHRONOUS TORQUE'
- 'OPEN LOOP SCALAR MODE'
- 'OPEN LOOP VECTOR MODE'
:raises TypeError: When setting a mode not advertised as supported by the node.
:raises RuntimeError: If the switch is not confirmed within the configured timeout.
"""
try:
pdo = self.tpdo_pointers[0x6061].pdo_parent
if pdo.is_periodic:
timestamp = pdo.wait_for_reception(timeout=self.TIMEOUT_CHECK_TPDO)
if timestamp is None:
raise RuntimeError("Timeout getting node {0}'s mode of operation.".format(
self.id))
code = self.tpdo_values[0x6061]
except KeyError:
logger.warning('The object 0x6061 is not a configured TPDO, fallback to SDO')
code = self.sdo[0x6061].raw
return OperationMode.CODE2NAME[code]
@op_mode.setter
def op_mode(self, mode):
try:
if not self.is_op_mode_supported(mode):
raise TypeError(
'Operation mode {m} not suppported on node {n}.'.format(n=self.id, m=mode))
# Update operation mode in RPDO if possible, fall back to SDO
if 0x6060 in self.rpdo_pointers:
self.rpdo_pointers[0x6060].raw = OperationMode.NAME2CODE[mode]
pdo = self.rpdo_pointers[0x6060].pdo_parent
if not pdo.is_periodic:
pdo.transmit()
else:
self.sdo[0x6060].raw = OperationMode.NAME2CODE[mode]
timeout = time.monotonic() + self.TIMEOUT_SWITCH_OP_MODE
while self.op_mode != mode:
if time.monotonic() > timeout:
raise RuntimeError(
"Timeout setting node {0}'s new mode of operation to {1}.".format(
self.id, mode))
logger.info('Set node {n} operation mode to {m}.'.format(n=self.id, m=mode))
except SdoCommunicationError as e:
logger.warning('[SDO communication error] Cause: {0}'.format(str(e)))
except (RuntimeError, ValueError) as e:
logger.warning('{0}'.format(str(e)))
def _clear_target_values(self):
# [target velocity, target position, target torque]
for target_index in [0x60FF, 0x607A, 0x6071]:
if target_index in self.sdo.keys():
self.sdo[target_index].raw = 0
def is_op_mode_supported(self, mode):
"""Check if the operation mode is supported by the node.
The object listing the supported modes is retrieved once using SDO, then cached
for later checks.
:param str mode: Same format as the :attr:`op_mode` property.
:return: If the operation mode is supported.
:rtype: bool
"""
if not hasattr(self, '_op_mode_support'):
# Cache value only on first lookup, this object should never change.
self._op_mode_support = self.sdo[0x6502].raw
logger.info('Caching node {n} supported operation modes 0x{m:04X}'.format(
n=self.id, m=self._op_mode_support))
bits = OperationMode.SUPPORTED[mode]
return self._op_mode_support & bits == bits
def on_TPDOs_update_callback(self, mapobject):
"""Cache updated values from a TPDO received from this node.
:param mapobject: The received PDO message.
:type mapobject: canopen.pdo.Map
"""
for obj in mapobject:
self.tpdo_values[obj.index] = obj.raw
@property
def statusword(self):
"""Return the last read value of the Statusword (0x6041) from the device.
If the object 0x6041 is not configured in any TPDO it will fall back to the SDO
mechanism and try to get the value.
"""
try:
return self.tpdo_values[0x6041]
except KeyError:
logger.warning('The object 0x6041 is not a configured TPDO, fallback to SDO')
return self.sdo[0x6041].raw
def check_statusword(self, timeout=None):
"""Report an up-to-date reading of the Statusword (0x6041) from the device.
If the TPDO with the Statusword is configured as periodic, this method blocks
until one was received. Otherwise, it uses the SDO fallback of the ``statusword``
property.
:param timeout: Maximum time in seconds to wait for TPDO reception.
:raises RuntimeError: Occurs when the given timeout expires without a TPDO.
:return: Updated value of the ``statusword`` property.
:rtype: int
"""
if 0x6041 in self.tpdo_pointers:
pdo = self.tpdo_pointers[0x6041].pdo_parent
if pdo.is_periodic:
timestamp = pdo.wait_for_reception(timeout or self.TIMEOUT_CHECK_TPDO)
if timestamp is None:
raise RuntimeError('Timeout waiting for updated statusword')
else:
return self.sdo[0x6041].raw
return self.statusword
@property
def controlword(self):
"""Send a state change command using PDO or SDO.
:param int value: Controlword value to set.
:raises RuntimeError: Read access to the controlword is not intended.
"""
raise RuntimeError('The Controlword is write-only.')
@controlword.setter
def controlword(self, value):
if 0x6040 in self.rpdo_pointers:
self.rpdo_pointers[0x6040].raw = value
pdo = self.rpdo_pointers[0x6040].pdo_parent
if not pdo.is_periodic:
pdo.transmit()
else:
self.sdo[0x6040].raw = value
@property
def state(self):
"""Manipulate current state of the DS402 State Machine on the node.
Uses the last received Statusword value for read access, and manipulates the
:attr:`controlword` for changing states. The states are passed as one of the
following strings:
- 'NOT READY TO SWITCH ON' (cannot be switched to deliberately)
- 'SWITCH ON DISABLED'
- 'READY TO SWITCH ON'
- 'SWITCHED ON'
- 'OPERATION ENABLED'
- 'FAULT' (cannot be switched to deliberately)
- 'FAULT REACTION ACTIVE' (cannot be switched to deliberately)
- 'QUICK STOP ACTIVE'
- 'DISABLE VOLTAGE' (only as a command when writing)
:raises RuntimeError: If the switch is not confirmed within the configured timeout.
:raises ValueError: Trying to execute a illegal transition in the state machine.
"""
for state, mask_val_pair in State402.SW_MASK.items():
bitmask, bits = mask_val_pair
if self.statusword & bitmask == bits:
return state
return 'UNKNOWN'
@state.setter
def state(self, target_state):
timeout = time.monotonic() + self.TIMEOUT_SWITCH_STATE_FINAL
while self.state != target_state:
next_state = self._next_state(target_state)
if self._change_state(next_state):
continue
if time.monotonic() > timeout:
raise RuntimeError('Timeout when trying to change state')
self.check_statusword()
def _next_state(self, target_state):
if target_state in ('NOT READY TO SWITCH ON',
'FAULT REACTION ACTIVE',
'FAULT'):
raise ValueError(
'Target state {} cannot be entered programmatically'.format(target_state))
from_state = self.state
if (from_state, target_state) in State402.TRANSITIONTABLE:
return target_state
else:
return State402.next_state_indirect(from_state)
def _change_state(self, target_state):
try:
self.controlword = State402.TRANSITIONTABLE[(self.state, target_state)]
except KeyError:
raise ValueError(
'Illegal state transition from {f} to {t}'.format(f=self.state, t=target_state))
timeout = time.monotonic() + self.TIMEOUT_SWITCH_STATE_SINGLE
while self.state != target_state:
if time.monotonic() > timeout:
return False
self.check_statusword()
return True
|
(node_id, object_dictionary)
|
64,831 |
canopen.node.remote
|
__load_configuration_helper
|
Helper function to send SDOs to the remote node
:param index: Object index
:param subindex: Object sub-index (if it does not exist e should be None)
:param name: Object name
:param value: Value to set in the object
|
def __load_configuration_helper(self, index, subindex, name, value):
"""Helper function to send SDOs to the remote node
:param index: Object index
:param subindex: Object sub-index (if it does not exist e should be None)
:param name: Object name
:param value: Value to set in the object
"""
try:
if subindex is not None:
logger.info(str('SDO [{index:#06x}][{subindex:#06x}]: {name}: {value:#06x}'.format(
index=index,
subindex=subindex,
name=name,
value=value)))
self.sdo[index][subindex].raw = value
else:
self.sdo[index].raw = value
logger.info(str('SDO [{index:#06x}]: {name}: {value:#06x}'.format(
index=index,
name=name,
value=value)))
except SdoCommunicationError as e:
logger.warning(str(e))
except SdoAbortedError as e:
# WORKAROUND for broken implementations: the SDO is set but the error
# "Attempt to write a read-only object" is raised any way.
if e.code != 0x06010002:
# Abort codes other than "Attempt to write a read-only object"
# should still be reported.
logger.warning('[ERROR SETTING object {0:#06x}:{1:#06x}] {2}'.format(index, subindex, str(e)))
raise
|
(self, index, subindex, name, value)
|
64,832 |
canopen.profiles.p402
|
__init__
| null |
def __init__(self, node_id, object_dictionary):
super(BaseNode402, self).__init__(node_id, object_dictionary)
self.tpdo_values = {} # { index: value from last received TPDO }
self.tpdo_pointers = {} # { index: pdo.Map instance }
self.rpdo_pointers = {} # { index: pdo.Map instance }
|
(self, node_id, object_dictionary)
|
64,833 |
canopen.profiles.p402
|
_change_state
| null |
def _change_state(self, target_state):
try:
self.controlword = State402.TRANSITIONTABLE[(self.state, target_state)]
except KeyError:
raise ValueError(
'Illegal state transition from {f} to {t}'.format(f=self.state, t=target_state))
timeout = time.monotonic() + self.TIMEOUT_SWITCH_STATE_SINGLE
while self.state != target_state:
if time.monotonic() > timeout:
return False
self.check_statusword()
return True
|
(self, target_state)
|
64,834 |
canopen.profiles.p402
|
_check_controlword_configured
| null |
def _check_controlword_configured(self):
if 0x6040 not in self.rpdo_pointers: # Controlword
logger.warning(
"Controlword not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
|
(self)
|
64,835 |
canopen.profiles.p402
|
_check_op_mode_configured
| null |
def _check_op_mode_configured(self):
if 0x6060 not in self.rpdo_pointers: # Operation Mode
logger.warning(
"Operation Mode not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
if 0x6061 not in self.tpdo_values: # Operation Mode Display
logger.warning(
"Operation Mode Display not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
|
(self)
|
64,836 |
canopen.profiles.p402
|
_check_statusword_configured
| null |
def _check_statusword_configured(self):
if 0x6041 not in self.tpdo_values: # Statusword
logger.warning(
"Statusword not configured in node {0}'s PDOs. Using SDOs can cause slow performance.".format(
self.id))
|
(self)
|
64,837 |
canopen.profiles.p402
|
_clear_target_values
| null |
def _clear_target_values(self):
# [target velocity, target position, target torque]
for target_index in [0x60FF, 0x607A, 0x6071]:
if target_index in self.sdo.keys():
self.sdo[target_index].raw = 0
|
(self)
|
64,838 |
canopen.profiles.p402
|
_homing_status
|
Interpret the current Statusword bits as homing state string.
|
def _homing_status(self):
"""Interpret the current Statusword bits as homing state string."""
# Wait to make sure a TPDO was received
self.check_statusword()
status = None
for key, value in Homing.STATES.items():
bitmask, bits = value
if self.statusword & bitmask == bits:
status = key
return status
|
(self)
|
64,839 |
canopen.profiles.p402
|
_init_rpdo_pointers
| null |
def _init_rpdo_pointers(self):
# If RPDOs have overlapping indecies, rpdo_pointers will point to
# the first RPDO that has that index configured.
for rpdo in self.rpdo.values():
if rpdo.enabled:
for obj in rpdo:
logger.debug('Configured RPDO: {0}'.format(obj.index))
if obj.index not in self.rpdo_pointers:
self.rpdo_pointers[obj.index] = obj
|
(self)
|
64,840 |
canopen.profiles.p402
|
_init_tpdo_values
| null |
def _init_tpdo_values(self):
for tpdo in self.tpdo.values():
if tpdo.enabled:
tpdo.add_callback(self.on_TPDOs_update_callback)
for obj in tpdo:
logger.debug('Configured TPDO: {0}'.format(obj.index))
if obj.index not in self.tpdo_values:
self.tpdo_values[obj.index] = 0
self.tpdo_pointers[obj.index] = obj
|
(self)
|
64,841 |
canopen.profiles.p402
|
_next_state
| null |
def _next_state(self, target_state):
if target_state in ('NOT READY TO SWITCH ON',
'FAULT REACTION ACTIVE',
'FAULT'):
raise ValueError(
'Target state {} cannot be entered programmatically'.format(target_state))
from_state = self.state
if (from_state, target_state) in State402.TRANSITIONTABLE:
return target_state
else:
return State402.next_state_indirect(from_state)
|
(self, target_state)
|
64,842 |
canopen.node.remote
|
add_sdo
|
Add an additional SDO channel.
The SDO client will be added to :attr:`sdo_channels`.
:param int rx_cobid:
COB-ID that the server receives on
:param int tx_cobid:
COB-ID that the server responds with
:return: The SDO client created
:rtype: canopen.sdo.SdoClient
|
def add_sdo(self, rx_cobid, tx_cobid):
"""Add an additional SDO channel.
The SDO client will be added to :attr:`sdo_channels`.
:param int rx_cobid:
COB-ID that the server receives on
:param int tx_cobid:
COB-ID that the server responds with
:return: The SDO client created
:rtype: canopen.sdo.SdoClient
"""
client = SdoClient(rx_cobid, tx_cobid, self.object_dictionary)
self.sdo_channels.append(client)
if self.network is not None:
self.network.subscribe(client.tx_cobid, client.on_response)
return client
|
(self, rx_cobid, tx_cobid)
|
64,843 |
canopen.node.remote
|
associate_network
| null |
def associate_network(self, network):
self.network = network
self.sdo.network = network
self.pdo.network = network
self.tpdo.network = network
self.rpdo.network = network
self.nmt.network = network
for sdo in self.sdo_channels:
network.subscribe(sdo.tx_cobid, sdo.on_response)
network.subscribe(0x700 + self.id, self.nmt.on_heartbeat)
network.subscribe(0x80 + self.id, self.emcy.on_emcy)
network.subscribe(0, self.nmt.on_command)
|
(self, network)
|
64,844 |
canopen.profiles.p402
|
check_statusword
|
Report an up-to-date reading of the Statusword (0x6041) from the device.
If the TPDO with the Statusword is configured as periodic, this method blocks
until one was received. Otherwise, it uses the SDO fallback of the ``statusword``
property.
:param timeout: Maximum time in seconds to wait for TPDO reception.
:raises RuntimeError: Occurs when the given timeout expires without a TPDO.
:return: Updated value of the ``statusword`` property.
:rtype: int
|
def check_statusword(self, timeout=None):
"""Report an up-to-date reading of the Statusword (0x6041) from the device.
If the TPDO with the Statusword is configured as periodic, this method blocks
until one was received. Otherwise, it uses the SDO fallback of the ``statusword``
property.
:param timeout: Maximum time in seconds to wait for TPDO reception.
:raises RuntimeError: Occurs when the given timeout expires without a TPDO.
:return: Updated value of the ``statusword`` property.
:rtype: int
"""
if 0x6041 in self.tpdo_pointers:
pdo = self.tpdo_pointers[0x6041].pdo_parent
if pdo.is_periodic:
timestamp = pdo.wait_for_reception(timeout or self.TIMEOUT_CHECK_TPDO)
if timestamp is None:
raise RuntimeError('Timeout waiting for updated statusword')
else:
return self.sdo[0x6041].raw
return self.statusword
|
(self, timeout=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.