index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
35,797 | pefile | __pack__ | null | def __pack__(self):
self._pack_bitfield_attributes()
try:
data = super(StructureWithBitfields, self).__pack__()
finally:
self._unpack_bitfield_attributes()
return data
| (self) |
35,800 | pefile | __unpack__ | null | def __unpack__(self, data):
# calling the original routine to deal with special cases/spurious data
# structures
super(StructureWithBitfields, self).__unpack__(data)
self._unpack_bitfield_attributes()
| (self, data) |
35,801 | pefile | _pack_bitfield_attributes | Pack attributes into a compound bitfield | def _pack_bitfield_attributes(self):
"""Pack attributes into a compound bitfield"""
for i in self.__compound_fields__.keys():
cf_name = self.__keys__[i][0]
offst, acc_val = 0, 0
for sf in self.__compound_fields__[i][StructureWithBitfields.CF_SUBFLD_IDX]:
mask = (1 << sf[StructureWithBitfields.BTF_BITCNT_IDX]) - 1
field_val = (
getattr(self, sf[StructureWithBitfields.BTF_NAME_IDX]) & mask
)
acc_val |= field_val << offst
offst += sf[StructureWithBitfields.BTF_BITCNT_IDX]
setattr(self, cf_name, acc_val)
| (self) |
35,802 | pefile | _unpack_bitfield_attributes | Replace compound attributes corresponding to bitfields with separate
sub-fields.
| def _unpack_bitfield_attributes(self):
"""Replace compound attributes corresponding to bitfields with separate
sub-fields.
"""
for i in self.__compound_fields__.keys():
cf_name = self.__keys__[i][0]
cval = getattr(self, cf_name)
delattr(self, cf_name)
offst = 0
for sf in self.__compound_fields__[i][StructureWithBitfields.CF_SUBFLD_IDX]:
mask = (1 << sf[StructureWithBitfields.BTF_BITCNT_IDX]) - 1
mask <<= offst
setattr(
self,
sf[StructureWithBitfields.BTF_NAME_IDX],
(cval & mask) >> offst,
)
offst += sf[StructureWithBitfields.BTF_BITCNT_IDX]
| (self) |
35,804 | pefile | dump | null | def dump(self, indentation=0):
tk = self.__keys__
self.__keys__ = self.__keys_ext__
try:
ret = super(StructureWithBitfields, self).dump(indentation)
finally:
self.__keys__ = tk
return ret
| (self, indentation=0) |
35,805 | pefile | dump_dict | null | def dump_dict(self):
tk = self.__keys__
self.__keys__ = self.__keys_ext__
try:
ret = super(StructureWithBitfields, self).dump_dict()
finally:
self.__keys__ = tk
return ret
| (self) |
35,811 | pefile | TlsData | Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
| class TlsData(DataContainer):
"""Holds TLS information.
struct: IMAGE_TLS_DIRECTORY structure
"""
| (**args) |
35,813 | pefile | UnicodeStringWrapperPostProcessor | This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type. | class UnicodeStringWrapperPostProcessor:
"""This class attempts to help the process of identifying strings
that might be plain Unicode or Pascal. A list of strings will be
wrapped on it with the hope the overlappings will help make the
decision about their type."""
def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
def __str__(self):
"""Return the escaped UTF-8 representation of the string."""
return self.decode("utf-8", "backslashreplace_")
def decode(self, *args):
if not self.string:
return ""
return self.string.decode(*args)
def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
def render_pascal_16(self):
try:
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr + 2, max_length=self.get_pascal_16_length()
)
except PEFormatError:
self.pe.get_warnings().append(
"Failed rendering pascal string, "
"attempting to read from RVA 0x{0:x}".format(self.rva_ptr + 2)
)
def get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(rva, 2)
except PEFormatError:
return False
if len(data) < 2:
return False
return struct.unpack("<H", data)[0]
def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr - 2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
def render_unicode_16(self):
try:
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
except PEFormatError:
self.pe.get_warnings().append(
"Failed rendering unicode string, "
"attempting to read from RVA 0x{0:x}".format(self.rva_ptr)
)
| (pe, rva_ptr) |
35,814 | pefile | __get_word_value_at_rva | null | def __get_word_value_at_rva(self, rva):
try:
data = self.pe.get_data(rva, 2)
except PEFormatError:
return False
if len(data) < 2:
return False
return struct.unpack("<H", data)[0]
| (self, rva) |
35,815 | pefile | __init__ | null | def __init__(self, pe, rva_ptr):
self.pe = pe
self.rva_ptr = rva_ptr
self.string = None
| (self, pe, rva_ptr) |
35,816 | pefile | __str__ | Return the escaped UTF-8 representation of the string. | def __str__(self):
"""Return the escaped UTF-8 representation of the string."""
return self.decode("utf-8", "backslashreplace_")
| (self) |
35,817 | pefile | ask_unicode_16 | The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
| def ask_unicode_16(self, next_rva_ptr):
"""The next RVA is taken to be the one immediately following this one.
Such RVA could indicate the natural end of the string and will be checked
to see if there's a Unicode NULL character there.
"""
if self.__get_word_value_at_rva(next_rva_ptr - 2) == 0:
self.length = next_rva_ptr - self.rva_ptr
return True
return False
| (self, next_rva_ptr) |
35,818 | pefile | decode | null | def decode(self, *args):
if not self.string:
return ""
return self.string.decode(*args)
| (self, *args) |
35,819 | pefile | get_pascal_16_length | null | def get_pascal_16_length(self):
return self.__get_word_value_at_rva(self.rva_ptr)
| (self) |
35,820 | pefile | get_rva | Get the RVA of the string. | def get_rva(self):
"""Get the RVA of the string."""
return self.rva_ptr
| (self) |
35,821 | pefile | invalidate | Make this instance None, to express it's no known string type. | def invalidate(self):
"""Make this instance None, to express it's no known string type."""
self = None
| (self) |
35,822 | pefile | render_pascal_16 | null | def render_pascal_16(self):
try:
self.string = self.pe.get_string_u_at_rva(
self.rva_ptr + 2, max_length=self.get_pascal_16_length()
)
except PEFormatError:
self.pe.get_warnings().append(
"Failed rendering pascal string, "
"attempting to read from RVA 0x{0:x}".format(self.rva_ptr + 2)
)
| (self) |
35,823 | pefile | render_unicode_16 | null | def render_unicode_16(self):
try:
self.string = self.pe.get_string_u_at_rva(self.rva_ptr)
except PEFormatError:
self.pe.get_warnings().append(
"Failed rendering unicode string, "
"attempting to read from RVA 0x{0:x}".format(self.rva_ptr)
)
| (self) |
35,824 | pefile | UnwindInfo | Handles the complexities of UNWIND_INFO structure:
* variable number of UWIND_CODEs
* optional ExceptionHandler and FunctionEntry fields
| class UnwindInfo(StructureWithBitfields):
"""Handles the complexities of UNWIND_INFO structure:
* variable number of UWIND_CODEs
* optional ExceptionHandler and FunctionEntry fields
"""
def __init__(self, file_offset=0):
super(UnwindInfo, self).__init__(
(
"UNWIND_INFO",
(
"B:3,Version",
"B:5,Flags",
"B,SizeOfProlog",
"B,CountOfCodes",
"B:4,FrameRegister",
"B:4,FrameOffset",
),
),
file_offset=file_offset,
)
self._full_size = super(UnwindInfo, self).sizeof()
self._opt_field_name = None
self._code_info = StructureWithBitfields(
("UNWIND_CODE", ("B,CodeOffset", "B:4,UnwindOp", "B:4,OpInfo")),
file_offset=0,
)
self._chained_entry = None
self._finished_unpacking = False
def unpack_in_stages(self, data):
"""Unpacks the UNWIND_INFO "in two calls", with the first call establishing
a full size of the structure and the second, performing the actual unpacking.
"""
if self._finished_unpacking:
return None
super(UnwindInfo, self).__unpack__(data)
codes_cnt_max = (self.CountOfCodes + 1) & ~1
hdlr_offset = (
super(UnwindInfo, self).sizeof() + codes_cnt_max * self._code_info.sizeof()
)
self._full_size = hdlr_offset + (
0 if self.Flags == 0 else STRUCT_SIZEOF_TYPES["I"]
)
if len(data) < self._full_size:
return None
if self.Version != 1 and self.Version != 2:
return "Unsupported version of UNWIND_INFO at " + hex(self.__file_offset__)
self.UnwindCodes = []
ro = super(UnwindInfo, self).sizeof()
codes_left = self.CountOfCodes
while codes_left > 0:
self._code_info.__unpack__(data[ro : ro + self._code_info.sizeof()])
ucode = PrologEpilogOpsFactory.create(self._code_info)
if ucode is None:
return "Unknown UNWIND_CODE at " + hex(self.__file_offset__ + ro)
len_in_codes = ucode.length_in_code_structures(self._code_info, self)
opc_size = self._code_info.sizeof() * len_in_codes
ucode.initialize(
self._code_info,
data[ro : ro + opc_size],
self,
self.__file_offset__ + ro,
)
ro += opc_size
codes_left -= len_in_codes
self.UnwindCodes.append(ucode)
if self.UNW_FLAG_EHANDLER or self.UNW_FLAG_UHANDLER:
self._opt_field_name = "ExceptionHandler"
if self.UNW_FLAG_CHAININFO:
self._opt_field_name = "FunctionEntry"
if self._opt_field_name != None:
setattr(
self,
self._opt_field_name,
struct.unpack(
"<I", data[hdlr_offset : hdlr_offset + STRUCT_SIZEOF_TYPES["I"]]
)[0],
)
self._finished_unpacking = True
return None
def dump(self, indentation=0):
# Because __keys_ext__ are shared among all the instances with the same
# format string, we have to add and sunsequently remove the optional field
# each time.
# It saves space (as compared to keeping a copy self.__keys_ext__ per
# UnwindInfo instance), but makes our dump() implementation thread-unsafe.
if self._opt_field_name != None:
self.__field_offsets__[self._opt_field_name] = (
self._full_size - STRUCT_SIZEOF_TYPES["I"]
)
self.__keys_ext__.append([self._opt_field_name])
try:
dump = super(UnwindInfo, self).dump(indentation)
finally:
if self._opt_field_name != None:
self.__keys_ext__.pop()
dump.append(
"Flags: "
+ ", ".join([s[0] for s in unwind_info_flags if getattr(self, s[0])])
)
dump.append(
"Unwind codes: "
+ "; ".join([str(c) for c in self.UnwindCodes if c.is_valid()])
)
return dump
def dump_dict(self):
if self._opt_field_name != None:
self.__field_offsets__[self._opt_field_name] = (
self._full_size - STRUCT_SIZEOF_TYPES["I"]
)
self.__keys_ext__.append([self._opt_field_name])
try:
ret = super(UnwindInfo, self).dump_dict()
finally:
if self._opt_field_name != None:
self.__keys_ext__.pop()
return ret
def __setattr__(self, name, val):
if name == "Flags":
set_flags(self, val, unwind_info_flags)
elif "UNW_FLAG_" in name and hasattr(self, name):
if val:
self.__dict__["Flags"] |= UNWIND_INFO_FLAGS[name]
else:
self.__dict__["Flags"] ^= UNWIND_INFO_FLAGS[name]
self.__dict__[name] = val
def sizeof(self):
return self._full_size
def __pack__(self):
data = bytearray(self._full_size)
data[0 : super(UnwindInfo, self).sizeof()] = super(UnwindInfo, self).__pack__()
cur_offset = super(UnwindInfo, self).sizeof()
for uc in self.UnwindCodes:
if cur_offset + uc.struct.sizeof() > self._full_size:
break
data[cur_offset : cur_offset + uc.struct.sizeof()] = uc.struct.__pack__()
cur_offset += uc.struct.sizeof()
if self._opt_field_name != None:
data[
self._full_size - STRUCT_SIZEOF_TYPES["I"] : self._full_size
] = struct.pack("<I", getattr(self, self._opt_field_name))
return data
def get_chained_function_entry(self):
return self._chained_entry
def set_chained_function_entry(self, entry):
if self._chained_entry != None:
raise PEFormatError("Chained function entry cannot be changed")
self._chained_entry = entry
| (file_offset=0) |
35,826 | pefile | __init__ | null | def __init__(self, file_offset=0):
super(UnwindInfo, self).__init__(
(
"UNWIND_INFO",
(
"B:3,Version",
"B:5,Flags",
"B,SizeOfProlog",
"B,CountOfCodes",
"B:4,FrameRegister",
"B:4,FrameOffset",
),
),
file_offset=file_offset,
)
self._full_size = super(UnwindInfo, self).sizeof()
self._opt_field_name = None
self._code_info = StructureWithBitfields(
("UNWIND_CODE", ("B,CodeOffset", "B:4,UnwindOp", "B:4,OpInfo")),
file_offset=0,
)
self._chained_entry = None
self._finished_unpacking = False
| (self, file_offset=0) |
35,827 | pefile | __pack__ | null | def __pack__(self):
data = bytearray(self._full_size)
data[0 : super(UnwindInfo, self).sizeof()] = super(UnwindInfo, self).__pack__()
cur_offset = super(UnwindInfo, self).sizeof()
for uc in self.UnwindCodes:
if cur_offset + uc.struct.sizeof() > self._full_size:
break
data[cur_offset : cur_offset + uc.struct.sizeof()] = uc.struct.__pack__()
cur_offset += uc.struct.sizeof()
if self._opt_field_name != None:
data[
self._full_size - STRUCT_SIZEOF_TYPES["I"] : self._full_size
] = struct.pack("<I", getattr(self, self._opt_field_name))
return data
| (self) |
35,829 | pefile | __setattr__ | null | def __setattr__(self, name, val):
if name == "Flags":
set_flags(self, val, unwind_info_flags)
elif "UNW_FLAG_" in name and hasattr(self, name):
if val:
self.__dict__["Flags"] |= UNWIND_INFO_FLAGS[name]
else:
self.__dict__["Flags"] ^= UNWIND_INFO_FLAGS[name]
self.__dict__[name] = val
| (self, name, val) |
35,835 | pefile | dump | null | def dump(self, indentation=0):
# Because __keys_ext__ are shared among all the instances with the same
# format string, we have to add and sunsequently remove the optional field
# each time.
# It saves space (as compared to keeping a copy self.__keys_ext__ per
# UnwindInfo instance), but makes our dump() implementation thread-unsafe.
if self._opt_field_name != None:
self.__field_offsets__[self._opt_field_name] = (
self._full_size - STRUCT_SIZEOF_TYPES["I"]
)
self.__keys_ext__.append([self._opt_field_name])
try:
dump = super(UnwindInfo, self).dump(indentation)
finally:
if self._opt_field_name != None:
self.__keys_ext__.pop()
dump.append(
"Flags: "
+ ", ".join([s[0] for s in unwind_info_flags if getattr(self, s[0])])
)
dump.append(
"Unwind codes: "
+ "; ".join([str(c) for c in self.UnwindCodes if c.is_valid()])
)
return dump
| (self, indentation=0) |
35,836 | pefile | dump_dict | null | def dump_dict(self):
if self._opt_field_name != None:
self.__field_offsets__[self._opt_field_name] = (
self._full_size - STRUCT_SIZEOF_TYPES["I"]
)
self.__keys_ext__.append([self._opt_field_name])
try:
ret = super(UnwindInfo, self).dump_dict()
finally:
if self._opt_field_name != None:
self.__keys_ext__.pop()
return ret
| (self) |
35,837 | pefile | get_chained_function_entry | null | def get_chained_function_entry(self):
return self._chained_entry
| (self) |
35,841 | pefile | set_chained_function_entry | null | def set_chained_function_entry(self, entry):
if self._chained_entry != None:
raise PEFormatError("Chained function entry cannot be changed")
self._chained_entry = entry
| (self, entry) |
35,843 | pefile | sizeof | null | def sizeof(self):
return self._full_size
| (self) |
35,844 | pefile | unpack_in_stages | Unpacks the UNWIND_INFO "in two calls", with the first call establishing
a full size of the structure and the second, performing the actual unpacking.
| def unpack_in_stages(self, data):
"""Unpacks the UNWIND_INFO "in two calls", with the first call establishing
a full size of the structure and the second, performing the actual unpacking.
"""
if self._finished_unpacking:
return None
super(UnwindInfo, self).__unpack__(data)
codes_cnt_max = (self.CountOfCodes + 1) & ~1
hdlr_offset = (
super(UnwindInfo, self).sizeof() + codes_cnt_max * self._code_info.sizeof()
)
self._full_size = hdlr_offset + (
0 if self.Flags == 0 else STRUCT_SIZEOF_TYPES["I"]
)
if len(data) < self._full_size:
return None
if self.Version != 1 and self.Version != 2:
return "Unsupported version of UNWIND_INFO at " + hex(self.__file_offset__)
self.UnwindCodes = []
ro = super(UnwindInfo, self).sizeof()
codes_left = self.CountOfCodes
while codes_left > 0:
self._code_info.__unpack__(data[ro : ro + self._code_info.sizeof()])
ucode = PrologEpilogOpsFactory.create(self._code_info)
if ucode is None:
return "Unknown UNWIND_CODE at " + hex(self.__file_offset__ + ro)
len_in_codes = ucode.length_in_code_structures(self._code_info, self)
opc_size = self._code_info.sizeof() * len_in_codes
ucode.initialize(
self._code_info,
data[ro : ro + opc_size],
self,
self.__file_offset__ + ro,
)
ro += opc_size
codes_left -= len_in_codes
self.UnwindCodes.append(ucode)
if self.UNW_FLAG_EHANDLER or self.UNW_FLAG_UHANDLER:
self._opt_field_name = "ExceptionHandler"
if self.UNW_FLAG_CHAININFO:
self._opt_field_name = "FunctionEntry"
if self._opt_field_name != None:
setattr(
self,
self._opt_field_name,
struct.unpack(
"<I", data[hdlr_offset : hdlr_offset + STRUCT_SIZEOF_TYPES["I"]]
)[0],
)
self._finished_unpacking = True
return None
| (self, data) |
35,845 | pefile | b | null | def b(x):
if isinstance(x, bytes):
return x
elif isinstance(x, bytearray):
return bytes(x)
else:
return codecs.encode(x, "cp1252")
| (x) |
35,849 | pefile | count_zeroes | null | def count_zeroes(data):
return data.count(0)
| (data) |
35,851 | pefile | get_sublang_name_for_lang | null | def get_sublang_name_for_lang(lang_value, sublang_value):
lang_name = LANG.get(lang_value, "*unknown*")
for sublang_name in SUBLANG.get(sublang_value, []):
# if the main language is a substring of sublang's name, then
# return that
if lang_name in sublang_name:
return sublang_name
# otherwise return the first sublang name
return SUBLANG.get(sublang_value, ["*unknown*"])[0]
| (lang_value, sublang_value) |
35,852 | pefile | is_valid_dos_filename | null | def is_valid_dos_filename(s):
if s is None or not isinstance(s, (str, bytes, bytearray)):
return False
# Allow path separators as import names can contain directories.
allowed = allowed_filename + b"\\/"
return all(c in allowed for c in set(s))
| (s) |
35,854 | pefile | lru_cache | null | def lru_cache(maxsize=128, typed=False, copy=False):
if not copy:
return functools.lru_cache(maxsize, typed)
def decorator(f):
cached_func = functools.lru_cache(maxsize, typed)(f)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# return copymod.deepcopy(cached_func(*args, **kwargs))
return copymod.copy(cached_func(*args, **kwargs))
return wrapper
return decorator
| (maxsize=128, typed=False, copy=False) |
35,855 | pefile | main | null | def main():
import sys
usage = """\
pefile.py <filename>
pefile.py exports <filename>"""
if not sys.argv[1:]:
print(usage)
elif sys.argv[1] == "exports":
if not sys.argv[2:]:
sys.exit("error: <filename> required")
pe = PE(sys.argv[2])
for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:
print(
hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal
)
else:
print(PE(sys.argv[1]).dump_info())
| () |
35,860 | pefile | parse_strings | null | def parse_strings(data, counter, l):
i = 0
error_count = 0
while i < len(data):
data_slice = data[i : i + 2]
if len(data_slice) < 2:
break
len_ = struct.unpack("<h", data_slice)[0]
i += 2
if len_ != 0 and 0 <= len_ * 2 <= len(data):
try:
l[counter] = b(data[i : i + len_ * 2]).decode("utf-16le")
except UnicodeDecodeError:
error_count += 1
pass
if error_count >= 3:
break
i += len_ * 2
counter += 1
| (data, counter, l) |
35,861 | pefile | power_of_two | null | def power_of_two(val):
return val != 0 and (val & (val - 1)) == 0
| (val) |
35,862 | pefile | retrieve_flags | Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
| def retrieve_flags(flag_dict, flag_filter):
"""Read the flags from a dictionary and return them in a usable form.
Will return a list of (flag, value) for all flags in "flag_dict"
matching the filter "flag_filter".
"""
return [
(flag, flag_dict[flag])
for flag in flag_dict.keys()
if isinstance(flag, (str, bytes)) and flag.startswith(flag_filter)
]
| (flag_dict, flag_filter) |
35,863 | pefile | set_flags | Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
| def set_flags(obj, flag_field, flags):
"""Will process the flags and set attributes in the object accordingly.
The object "obj" will gain attributes named after the flags provided in
"flags" and valued True/False, matching the results of applying each
flag value from "flags" to flag_field.
"""
for flag, value in flags:
if value & flag_field:
obj.__dict__[flag] = True
else:
obj.__dict__[flag] = False
| (obj, flag_field, flags) |
35,868 | pefile | two_way_dict | null | def two_way_dict(pairs):
return dict([(e[1], e[0]) for e in pairs] + pairs)
| (pairs) |
35,870 | text_generation.client | AsyncClient | Asynchronous Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import AsyncClient
>>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
| class AsyncClient:
"""Asynchronous Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import AsyncClient
>>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
async def chat(
self,
messages: List[Message],
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
stream: bool = False,
seed: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
tools: Optional[List[Tool]] = None,
tool_choice: Optional[str] = None,
) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]:
"""
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_choice (`str`):
The tool to use
"""
request = ChatRequest(
model="tgi",
messages=messages,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
top_logprobs=top_logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stream=stream,
seed=seed,
temperature=temperature,
top_p=top_p,
tools=tools,
tool_choice=tool_choice,
)
if not stream:
return await self._chat_single_response(request)
else:
return self._chat_stream_response(request)
async def _chat_single_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/chat/completions", json=request.dict()
) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return ChatComplete(**payload)
async def _chat_stream_response(self, request):
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(
f"{self.base_url}/v1/chat/completions", json=request.dict()
) as resp:
async for byte_payload in resp.content:
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = ChatCompletionChunk(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
async def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Response:
"""
Given a prompt, generate the following text asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
decoder_input_details=decoder_input_details,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
payload = await resp.json()
if resp.status != 200:
raise parse_error(resp.status, payload)
return Response(**payload[0])
async def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> AsyncIterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
AsyncIterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
async with ClientSession(
headers=self.headers, cookies=self.cookies, timeout=self.timeout
) as session:
async with session.post(self.base_url, json=request.dict()) as resp:
if resp.status != 200:
raise parse_error(resp.status, await resp.json())
# Parse ServerSentEvents
async for byte_payload in resp.content:
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status, json_payload)
yield response
| (base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10) |
35,871 | text_generation.client | __init__ |
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
| def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
| (self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10) |
35,872 | text_generation.client | _chat_single_response | null | def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
| (self, request) |
35,874 | text_generation.client | chat |
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_choice (`str`):
The tool to use
| def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
| (self, messages: List[text_generation.types.Message], repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[List[float]] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[float] = None, stream: bool = False, seed: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, tools: Optional[List[text_generation.types.Tool]] = None, tool_choice: Optional[str] = None) -> Union[text_generation.types.ChatComplete, AsyncIterator[text_generation.types.ChatCompletionChunk]] |
35,875 | text_generation.client | generate |
Given a prompt, generate the following text asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
| def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
| (self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[text_generation.types.Grammar] = None) -> text_generation.types.Response |
35,876 | text_generation.client | generate_stream |
Given a prompt, generate the following stream of tokens asynchronously
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
AsyncIterator[StreamResponse]: stream of generated tokens
| def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = ClientTimeout(timeout)
| (self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[text_generation.types.Grammar] = None) -> AsyncIterator[text_generation.types.StreamResponse] |
35,877 | text_generation.client | Client | Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import Client
>>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
| class Client:
"""Client to make calls to a text-generation-inference instance
Example:
```python
>>> from text_generation import Client
>>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = timeout
def chat(
self,
messages: List[Message],
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
stream: bool = False,
seed: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
tools: Optional[List[Tool]] = None,
tool_choice: Optional[str] = None,
):
"""
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for repetition penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_choice (`str`):
The tool to use
"""
request = ChatRequest(
model="tgi",
messages=messages,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
top_logprobs=top_logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stream=stream,
seed=seed,
temperature=temperature,
top_p=top_p,
tools=tools,
tool_choice=tool_choice,
)
if not stream:
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return ChatComplete(**payload)
else:
return self._chat_stream_response(request)
def _chat_stream_response(self, request):
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
# iterate and print stream
for byte_payload in resp.iter_lines():
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = ChatCompletionChunk(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Response:
"""
Given a prompt, generate the following text
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
decoder_input_details=decoder_input_details,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return Response(**payload[0])
def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Iterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Iterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
if resp.status_code != 200:
raise parse_error(resp.status_code, resp.json())
# Parse ServerSentEvents
for byte_payload in resp.iter_lines():
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status_code, json_payload)
yield response
| (base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10) |
35,878 | text_generation.client | __init__ |
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
| def __init__(
self,
base_url: str,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: int = 10,
):
"""
Args:
base_url (`str`):
text-generation-inference instance base url
headers (`Optional[Dict[str, str]]`):
Additional headers
cookies (`Optional[Dict[str, str]]`):
Cookies to include in the requests
timeout (`int`):
Timeout in seconds
"""
self.base_url = base_url
self.headers = headers
self.cookies = cookies
self.timeout = timeout
| (self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10) |
35,879 | text_generation.client | _chat_stream_response | null | def _chat_stream_response(self, request):
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
# iterate and print stream
for byte_payload in resp.iter_lines():
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
if payload.startswith("data:"):
json_payload = json.loads(payload.lstrip("data:").rstrip("\n"))
try:
response = ChatCompletionChunk(**json_payload)
yield response
except ValidationError:
raise parse_error(resp.status, json_payload)
| (self, request) |
35,880 | text_generation.client | chat |
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for repetition penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_choice (`str`):
The tool to use
| def chat(
self,
messages: List[Message],
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
stream: bool = False,
seed: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
tools: Optional[List[Tool]] = None,
tool_choice: Optional[str] = None,
):
"""
Given a list of messages, generate a response asynchronously
Args:
messages (`List[Message]`):
List of messages
repetition_penalty (`float`):
The parameter for repetition penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 0.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
logit_bias (`List[float]`):
Adjust the likelihood of specified tokens
logprobs (`bool`):
Include log probabilities in the response
top_logprobs (`int`):
Include the `n` most likely tokens at each step
max_tokens (`int`):
Maximum number of generated tokens
n (`int`):
Generate `n` completions
presence_penalty (`float`):
The parameter for presence penalty. 0.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
stream (`bool`):
Stream the response
seed (`int`):
Random sampling seed
temperature (`float`):
The value used to module the logits distribution.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation
tools (`List[Tool]`):
List of tools to use
tool_choice (`str`):
The tool to use
"""
request = ChatRequest(
model="tgi",
messages=messages,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
top_logprobs=top_logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
stream=stream,
seed=seed,
temperature=temperature,
top_p=top_p,
tools=tools,
tool_choice=tool_choice,
)
if not stream:
resp = requests.post(
f"{self.base_url}/v1/chat/completions",
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return ChatComplete(**payload)
else:
return self._chat_stream_response(request)
| (self, messages: List[text_generation.types.Message], repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[List[float]] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[float] = None, stream: bool = False, seed: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, tools: Optional[List[text_generation.types.Tool]] = None, tool_choice: Optional[str] = None) |
35,881 | text_generation.client | generate |
Given a prompt, generate the following text
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
| def generate(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
best_of: Optional[int] = None,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
decoder_input_details: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Response:
"""
Given a prompt, generate the following text
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
best_of (`int`):
Generate best_of sequences and return the one if the highest token logprobs
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
decoder_input_details (`bool`):
Return the decoder input token logprobs and ids
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Response: generated response
"""
# Validate parameters
parameters = Parameters(
best_of=best_of,
details=True,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
decoder_input_details=decoder_input_details,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=False, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
)
payload = resp.json()
if resp.status_code != 200:
raise parse_error(resp.status_code, payload)
return Response(**payload[0])
| (self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[text_generation.types.Grammar] = None) -> text_generation.types.Response |
35,882 | text_generation.client | generate_stream |
Given a prompt, generate the following stream of tokens
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Iterator[StreamResponse]: stream of generated tokens
| def generate_stream(
self,
prompt: str,
do_sample: bool = False,
max_new_tokens: int = 20,
repetition_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
return_full_text: bool = False,
seed: Optional[int] = None,
stop_sequences: Optional[List[str]] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: bool = False,
top_n_tokens: Optional[int] = None,
grammar: Optional[Grammar] = None,
) -> Iterator[StreamResponse]:
"""
Given a prompt, generate the following stream of tokens
Args:
prompt (`str`):
Input text
do_sample (`bool`):
Activate logits sampling
max_new_tokens (`int`):
Maximum number of generated tokens
repetition_penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
frequency_penalty (`float`):
The parameter for frequency penalty. 1.0 means no penalty
Penalize new tokens based on their existing frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
return_full_text (`bool`):
Whether to prepend the prompt to the generated text
seed (`int`):
Random sampling seed
stop_sequences (`List[str]`):
Stop generating tokens if a member of `stop_sequences` is generated
temperature (`float`):
The value used to module the logits distribution.
top_k (`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`):
Truncate inputs tokens to the given size
typical_p (`float`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
top_n_tokens (`int`):
Return the `n` most likely tokens at each step
grammar (`Grammar`):
Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation
of the text to match a regular expression or JSON schema.
Returns:
Iterator[StreamResponse]: stream of generated tokens
"""
# Validate parameters
parameters = Parameters(
best_of=None,
details=True,
decoder_input_details=False,
do_sample=do_sample,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
frequency_penalty=frequency_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop_sequences if stop_sequences is not None else [],
temperature=temperature,
top_k=top_k,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
top_n_tokens=top_n_tokens,
grammar=grammar,
)
request = Request(inputs=prompt, stream=True, parameters=parameters)
resp = requests.post(
self.base_url,
json=request.dict(),
headers=self.headers,
cookies=self.cookies,
timeout=self.timeout,
stream=True,
)
if resp.status_code != 200:
raise parse_error(resp.status_code, resp.json())
# Parse ServerSentEvents
for byte_payload in resp.iter_lines():
# Skip line
if byte_payload == b"\n":
continue
payload = byte_payload.decode("utf-8")
# Event data
if payload.startswith("data:"):
# Decode payload
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
# Parse payload
try:
response = StreamResponse(**json_payload)
except ValidationError:
# If we failed to parse the payload, then it is an error payload
raise parse_error(resp.status_code, json_payload)
yield response
| (self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[text_generation.types.Grammar] = None) -> Iterator[text_generation.types.StreamResponse] |
35,883 | text_generation.inference_api | InferenceAPIAsyncClient | Aynschronous Client to make calls to the HuggingFace Inference API.
Only supports a subset of the available text-generation or text2text-generation models that are served using
text-generation-inference
Example:
```python
>>> from text_generation import InferenceAPIAsyncClient
>>> client = InferenceAPIAsyncClient("bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
| class InferenceAPIAsyncClient(AsyncClient):
"""Aynschronous Client to make calls to the HuggingFace Inference API.
Only supports a subset of the available text-generation or text2text-generation models that are served using
text-generation-inference
Example:
```python
>>> from text_generation import InferenceAPIAsyncClient
>>> client = InferenceAPIAsyncClient("bigscience/bloomz")
>>> response = await client.generate("Why is the sky blue?")
>>> response.generated_text
' Rayleigh scattering'
>>> result = ""
>>> async for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10):
"""
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
"""
headers = build_hf_headers(
token=token, library_name="text-generation", library_version=__version__
)
# Text Generation Inference client only supports a subset of the available hub models
if not check_model_support(repo_id, headers):
raise NotSupportedError(repo_id)
base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}"
super(InferenceAPIAsyncClient, self).__init__(
base_url, headers=headers, timeout=timeout
)
| (repo_id: str, token: Optional[str] = None, timeout: int = 10) |
35,884 | text_generation.inference_api | __init__ |
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
| def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10):
"""
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
"""
headers = build_hf_headers(
token=token, library_name="text-generation", library_version=__version__
)
# Text Generation Inference client only supports a subset of the available hub models
if not check_model_support(repo_id, headers):
raise NotSupportedError(repo_id)
base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}"
super(InferenceAPIAsyncClient, self).__init__(
base_url, headers=headers, timeout=timeout
)
| (self, repo_id: str, token: Optional[str] = None, timeout: int = 10) |
35,890 | text_generation.inference_api | InferenceAPIClient | Client to make calls to the HuggingFace Inference API.
Only supports a subset of the available text-generation or text2text-generation models that are served using
text-generation-inference
Example:
```python
>>> from text_generation import InferenceAPIClient
>>> client = InferenceAPIClient("bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
| class InferenceAPIClient(Client):
"""Client to make calls to the HuggingFace Inference API.
Only supports a subset of the available text-generation or text2text-generation models that are served using
text-generation-inference
Example:
```python
>>> from text_generation import InferenceAPIClient
>>> client = InferenceAPIClient("bigscience/bloomz")
>>> client.generate("Why is the sky blue?").generated_text
' Rayleigh scattering'
>>> result = ""
>>> for response in client.generate_stream("Why is the sky blue?"):
>>> if not response.token.special:
>>> result += response.token.text
>>> result
' Rayleigh scattering'
```
"""
def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10):
"""
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
"""
headers = build_hf_headers(
token=token, library_name="text-generation", library_version=__version__
)
# Text Generation Inference client only supports a subset of the available hub models
if not check_model_support(repo_id, headers):
raise NotSupportedError(repo_id)
base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}"
super(InferenceAPIClient, self).__init__(
base_url, headers=headers, timeout=timeout
)
| (repo_id: str, token: Optional[str] = None, timeout: int = 10) |
35,891 | text_generation.inference_api | __init__ |
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
| def __init__(self, repo_id: str, token: Optional[str] = None, timeout: int = 10):
"""
Init headers and API information
Args:
repo_id (`str`):
Id of repository (e.g. `bigscience/bloom`).
token (`str`, `optional`):
The API token to use as HTTP bearer authorization. This is not
the authentication token. You can find the token in
https://huggingface.co/settings/token. Alternatively, you can
find both your organizations and personal API tokens using
`HfApi().whoami(token)`.
timeout (`int`):
Timeout in seconds
"""
headers = build_hf_headers(
token=token, library_name="text-generation", library_version=__version__
)
# Text Generation Inference client only supports a subset of the available hub models
if not check_model_support(repo_id, headers):
raise NotSupportedError(repo_id)
base_url = f"{INFERENCE_ENDPOINT}/models/{repo_id}"
super(InferenceAPIClient, self).__init__(
base_url, headers=headers, timeout=timeout
)
| (self, repo_id: str, token: Optional[str] = None, timeout: int = 10) |
35,901 | col_spanish.encrypt | create_dictionary |
Recieves two lists and create a dictionary, both of the lists should have
the same length.
Params:
------
key_list : list : list that will work as the keys inside the dictionary
value_list : list : list that contains the values that we want to call with the keys
Usage:
------
>>> from col_spanish import create_dictionary
>>> key_list = ['k','h','l','p']
>>> value_list = ['8','d','g','a']
>>> create_dictionary(key_list, value_list)
| def create_dictionary(key_list:list, value_list:list):
"""
Recieves two lists and create a dictionary, both of the lists should have
the same length.
Params:
------
key_list : list : list that will work as the keys inside the dictionary
value_list : list : list that contains the values that we want to call with the keys
Usage:
------
>>> from col_spanish import create_dictionary
>>> key_list = ['k','h','l','p']
>>> value_list = ['8','d','g','a']
>>> create_dictionary(key_list, value_list)
"""
# check that the two list have the same length and are not empty
if len(key_list) < 1 or key_list is None:
return 'key_list param should be a list with a length bigger than 1'
if len(value_list) < 1 or value_list is None:
return 'value_list param should be a list with a length bigger than 1'
key_dictionary = {}
for idx, character in enumerate(value_list):
dict_append = {f'{key_list[idx]}':f'{character}'}
key_dictionary.update(dict_append)
return key_dictionary
| (key_list: list, value_list: list) |
35,902 | col_spanish.col_spanish | del_accent |
Function to delete accents from the text
Params:
------
text: str : text to be clean
Usage:
------
>>> from col_spanish import del_acce
>>> text = 'hola, este es un texto! que necesita? remover signos de puntuacion!!'
>>> del_acce(text)
| def del_accent(text):
"""
Function to delete accents from the text
Params:
------
text: str : text to be clean
Usage:
------
>>> from col_spanish import del_acce
>>> text = 'hola, este es un texto! que necesita? remover signos de puntuacion!!'
>>> del_acce(text)
"""
new_text = ""
words = text.split()
for w in words:
wl = w.lower()
# delete accent mark
for accent, new_value in delete_accent_dict.items():
wl = wl.replace(accent, new_value)
# add the words without punctuation marks to the final text
new_text += f"{wl} "
return new_text[:-1]
| (text) |
35,903 | col_spanish.col_spanish | del_punctuation |
Function to delete all the punctuation marks that are inside a text
Params:
------
text: str : text to be clean
Usage:
------
>>> from col_spanish import del_punctuation
>>> text = 'hola, este es un texto! que necesita? remover signos de puntuacion!!'
>>> del_punctuation(text)
| def del_punctuation(text):
"""
Function to delete all the punctuation marks that are inside a text
Params:
------
text: str : text to be clean
Usage:
------
>>> from col_spanish import del_punctuation
>>> text = 'hola, este es un texto! que necesita? remover signos de puntuacion!!'
>>> del_punctuation(text)
"""
new_text = ""
words = text.split()
for wl in words:
# delete punctuation marks to keep only words
for punctuation, new_value in delete_punctuation_dict.items():
wl = wl.replace(punctuation, new_value)
# add the words without punctuation marks to the final text
new_text += f"{wl} "
return new_text[:-1]
| (text) |
35,905 | col_spanish.encrypt | generate_pass |
This function create a safe encrypted version of any password that you have.
Params:
------
* option : int : 0. only letters, 1. only numbers and 2. any character
* sentence : str : a sentence that will be the key to lock and unlock the password
* password : str : password to encrypt
* encrypt : int : 0. encrypt, 1. decrypt
Usage:
------
>>> from col_spanish import generate_pass
>>> generate_pass(2, 'sentence', 'password', 0)
| def generate_pass(option:int, sentence:str, password:str, encrypt:int):
"""
This function create a safe encrypted version of any password that you have.
Params:
------
* option : int : 0. only letters, 1. only numbers and 2. any character
* sentence : str : a sentence that will be the key to lock and unlock the password
* password : str : password to encrypt
* encrypt : int : 0. encrypt, 1. decrypt
Usage:
------
>>> from col_spanish import generate_pass
>>> generate_pass(2, 'sentence', 'password', 0)
"""
# checking the options is inside the available options
if int(option) in [0,1,2]:
pass
else:
return "the option is not inside the valide options. 0,1,2"
# checking that there is a sentence to create the password encription
if len(sentence) < 1:
return "you need to write a sentence to be able to encrypt the password"
# checking that there is a password to encrypt
if len(password) < 1:
return "you need to write a password to encrypt"
# checking the encrypt param is inside the valide options
if int(encrypt) in [0,1]:
pass
else:
return "the encrypt value is not inside the valid options. 0,1"
# delete blanck spaces
sentence = sentence.replace(' ','')
password = password.replace(' ','')
# checking the option is according to the params
if int(option):
for character in sentence:
if character not in dict_lists[f'{option}']:
return "the option is not adecuate for the sentence you wrote"
for character in password:
if character not in dict_lists[f'{option}']:
return "the option is not adecuate for the password"
# delete duplicate inside the sentence to create the alphabet for the encryption
set_sentence = set_characters(sentence)
# create the new alphabet
for character in dict_lists[f'{option}']:
if character not in set_sentence:
set_sentence.append(character)
# create the encrypted or decrypted password
if int(encrypt) == 0:
# create a dictionary with the keys to encrypt the sentence/password
key_dictionary = create_dictionary(dict_lists[f'{option}'], set_sentence)
encrypted_password = ''
for character in password:
encrypted_password = f'{encrypted_password}{key_dictionary[character]}'
return encrypted_password
if int(encrypt) == 1:
# create a dictionary with the keys to encrypt the sentence/password
key_dictionary = create_dictionary(set_sentence,dict_lists[f'{option}'])
decrypted_password = ''
for character in password:
decrypted_password = f'{decrypted_password}{key_dictionary[character]}'
return decrypted_password
| (option: int, sentence: str, password: str, encrypt: int) |
35,906 | col_spanish.encrypt | set_characters |
delete duplicate characters inside the sentence. save it in the same order,
as it appears in the sentence not in an aleatory order as set()
Params:
------
*sentence : string : sentence where the duplicates will be eliminated
Usage:
------
>>> from col_spanish import set_characters
>>> set_characters('sentence')
| def set_characters(sentence):
"""
delete duplicate characters inside the sentence. save it in the same order,
as it appears in the sentence not in an aleatory order as set()
Params:
------
*sentence : string : sentence where the duplicates will be eliminated
Usage:
------
>>> from col_spanish import set_characters
>>> set_characters('sentence')
"""
sentence = sentence.replace(" ","")
sentence_list = []
for character in sentence:
if character not in sentence_list:
sentence_list.append(character)
return sentence_list
| (sentence) |
35,907 | cmarkgfm.cmark | Options | null | class Options(object):
CMARK_OPT_DEFAULT = _cmark.lib.CMARK_OPT_DEFAULT
CMARK_OPT_SOURCEPOS = _cmark.lib.CMARK_OPT_SOURCEPOS
CMARK_OPT_HARDBREAKS = _cmark.lib.CMARK_OPT_HARDBREAKS
CMARK_OPT_UNSAFE = _cmark.lib.CMARK_OPT_UNSAFE
CMARK_OPT_NOBREAKS = _cmark.lib.CMARK_OPT_NOBREAKS
CMARK_OPT_NORMALIZE = _cmark.lib.CMARK_OPT_NORMALIZE
CMARK_OPT_VALIDATE_UTF8 = _cmark.lib.CMARK_OPT_VALIDATE_UTF8
CMARK_OPT_SMART = _cmark.lib.CMARK_OPT_SMART
CMARK_OPT_GITHUB_PRE_LANG = _cmark.lib.CMARK_OPT_GITHUB_PRE_LANG
CMARK_OPT_LIBERAL_HTML_TAG = _cmark.lib.CMARK_OPT_LIBERAL_HTML_TAG
CMARK_OPT_FOOTNOTES = _cmark.lib.CMARK_OPT_FOOTNOTES
CMARK_OPT_STRIKETHROUGH_DOUBLE_TILDE = (
_cmark.lib.CMARK_OPT_STRIKETHROUGH_DOUBLE_TILDE)
CMARK_OPT_TABLE_PREFER_STYLE_ATTRIBUTES = (
_cmark.lib.CMARK_OPT_TABLE_PREFER_STYLE_ATTRIBUTES)
| () |
35,910 | cmarkgfm.cmark | github_flavored_markdown_to_html | Render the given GitHub-flavored Makrdown to HTML.
This is a small wrapper over :func:`markdown_to_html_with_extensions`.
The GitHub extensions and the option CMARK_OPT_GITHUB_PRE_LANG are applied.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
Returns:
str: The HTML rendered from Markdown.
| def github_flavored_markdown_to_html(text, options=0):
"""Render the given GitHub-flavored Makrdown to HTML.
This is a small wrapper over :func:`markdown_to_html_with_extensions`.
The GitHub extensions and the option CMARK_OPT_GITHUB_PRE_LANG are applied.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
Returns:
str: The HTML rendered from Markdown.
"""
# Force some more options; see
# <https://github.com/theacodes/cmarkgfm/issues/37#issuecomment-852925142>
options = (
options |
Options.CMARK_OPT_GITHUB_PRE_LANG
)
return markdown_to_html_with_extensions(
text, options=options,
extensions=[
'table', 'autolink', 'tagfilter', 'strikethrough', 'tasklist'
])
| (text, options=0) |
35,911 | cmarkgfm.cmark | markdown_to_html | Render the given Markdown text to HTML.
This is a direct interface to ``cmark_markdown_to_html``.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
Returns:
str: The HTML rendered from Markdown.
| def markdown_to_html(text, options=0):
"""Render the given Markdown text to HTML.
This is a direct interface to ``cmark_markdown_to_html``.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
Returns:
str: The HTML rendered from Markdown.
"""
encoded_text = text.encode('utf-8')
raw_result = _cmark.lib.cmark_markdown_to_html(
encoded_text, len(encoded_text), options)
return _cmark.ffi.string(raw_result).decode('utf-8')
| (text, options=0) |
35,912 | cmarkgfm.cmark | markdown_to_html_with_extensions | Render the given Markdown text to HTML, using extensions.
This is a high-level wrapper over the various functions needed to enable
extensions, attach them to a parser, and render HTML.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
extensions (Sequence[str]): The list of extension names to use.
Returns:
str: The HTML rendered from Markdown.
| def markdown_to_html_with_extensions(text, options=0, extensions=None):
"""Render the given Markdown text to HTML, using extensions.
This is a high-level wrapper over the various functions needed to enable
extensions, attach them to a parser, and render HTML.
Args:
text (str): The Markdown text to render to HTML.
options (int): The cmark options.
extensions (Sequence[str]): The list of extension names to use.
Returns:
str: The HTML rendered from Markdown.
"""
if extensions is None:
extensions = []
core_extensions_ensure_registered()
cmark_extensions = []
for extension_name in extensions:
extension = find_syntax_extension(extension_name)
if extension is None:
raise ValueError('Unknown extension {}'.format(extension_name))
cmark_extensions.append(extension)
parser = parser_new(options=options)
try:
for extension in cmark_extensions:
parser_attach_syntax_extension(parser, extension)
parser_feed(parser, text)
root = parser_finish(parser)
if _cmark.lib.cmark_node_get_type(root) == _cmark.lib.CMARK_NODE_NONE:
raise ValueError('Error parsing markdown!')
extensions_ll = parser_get_syntax_extensions(parser)
output = render_html(root, options=options, extensions=extensions_ll)
finally:
parser_free(parser)
return output
| (text, options=0, extensions=None) |
35,913 | wheel_filename | InvalidFilenameError | Raised when an invalid wheel filename is encountered | class InvalidFilenameError(ValueError):
"""Raised when an invalid wheel filename is encountered"""
filename: str
def __init__(self, filename: str) -> None:
#: The invalid filename
self.filename = filename
def __str__(self) -> str:
return "Invalid wheel filename: " + repr(self.filename)
| (filename: str) -> None |
35,914 | wheel_filename | __init__ | null | def __init__(self, filename: str) -> None:
#: The invalid filename
self.filename = filename
| (self, filename: str) -> NoneType |
35,915 | wheel_filename | __str__ | null | def __str__(self) -> str:
return "Invalid wheel filename: " + repr(self.filename)
| (self) -> str |
35,916 | typing | NamedTuple | Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
| def NamedTuple(typename, fields=None, /, **kwargs):
"""Typed version of namedtuple.
Usage in Python versions >= 3.6::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
Alternative equivalent keyword syntax is also accepted::
Employee = NamedTuple('Employee', name=str, id=int)
In Python versions <= 3.5 use::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is None:
fields = kwargs.items()
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
try:
module = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
module = None
return _make_nmtuple(typename, fields, module=module)
| (typename, fields=None, /, **kwargs) |
35,917 | wheel_filename | ParsedWheelFilename | ParsedWheelFilename(project, version, build, python_tags, abi_tags, platform_tags) | class ParsedWheelFilename(NamedTuple):
project: str
version: str
build: Optional[str]
python_tags: List[str]
abi_tags: List[str]
platform_tags: List[str]
def __str__(self) -> str:
if self.build:
fmt = "{0.project}-{0.version}-{0.build}-{1}-{2}-{3}.whl"
else:
fmt = "{0.project}-{0.version}-{1}-{2}-{3}.whl"
return fmt.format(
self,
".".join(self.python_tags),
".".join(self.abi_tags),
".".join(self.platform_tags),
)
def tag_triples(self) -> Iterator[str]:
"""
Returns a generator of all simple tag triples formed from the tags in
the filename
"""
for py in self.python_tags:
for abi in self.abi_tags:
for plat in self.platform_tags:
yield "-".join([py, abi, plat])
| (project: str, version: str, build: Optional[str], python_tags: List[str], abi_tags: List[str], platform_tags: List[str]) |
35,919 | namedtuple_ParsedWheelFilename | __new__ | Create new instance of ParsedWheelFilename(project, version, build, python_tags, abi_tags, platform_tags) | from builtins import function
| (_cls, project: str, version: str, build: Optional[str], python_tags: List[str], abi_tags: List[str], platform_tags: List[str]) |
35,921 | wheel_filename | __str__ | null | def __str__(self) -> str:
if self.build:
fmt = "{0.project}-{0.version}-{0.build}-{1}-{2}-{3}.whl"
else:
fmt = "{0.project}-{0.version}-{1}-{2}-{3}.whl"
return fmt.format(
self,
".".join(self.python_tags),
".".join(self.abi_tags),
".".join(self.platform_tags),
)
| (self) -> str |
35,923 | collections | _replace | Return a new ParsedWheelFilename object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
35,924 | wheel_filename | tag_triples |
Returns a generator of all simple tag triples formed from the tags in
the filename
| def tag_triples(self) -> Iterator[str]:
"""
Returns a generator of all simple tag triples formed from the tags in
the filename
"""
for py in self.python_tags:
for abi in self.abi_tags:
for plat in self.platform_tags:
yield "-".join([py, abi, plat])
| (self) -> Iterator[str] |
35,926 | wheel_filename | parse_wheel_filename |
Parse a wheel filename into its components
:param path filename: a wheel path or filename
:rtype: ParsedWheelFilename
:raises InvalidFilenameError: if the filename is invalid
| def parse_wheel_filename(
filename: Union[str, bytes, "os.PathLike[str]", "os.PathLike[bytes]"]
) -> ParsedWheelFilename:
"""
Parse a wheel filename into its components
:param path filename: a wheel path or filename
:rtype: ParsedWheelFilename
:raises InvalidFilenameError: if the filename is invalid
"""
basename = os.path.basename(os.fsdecode(filename))
m = WHEEL_FILENAME_CRGX.fullmatch(basename)
if not m:
raise InvalidFilenameError(basename)
return ParsedWheelFilename(
project=m.group("project"),
version=m.group("version"),
build=m.group("build"),
python_tags=m.group("python_tags").split("."),
abi_tags=m.group("abi_tags").split("."),
platform_tags=m.group("platform_tags").split("."),
)
| (filename: Union[str, bytes, os.PathLike[str], os.PathLike[bytes]]) -> wheel_filename.ParsedWheelFilename |
35,930 | apeye.url | Domain |
:class:`typing.NamedTuple` of a URL's subdomain, domain, and suffix.
| from apeye.url import Domain
| (subdomain: str, domain: str, suffix: str) |
35,932 | namedtuple_Domain | __new__ | Create new instance of Domain(subdomain, domain, suffix) | from builtins import function
| (_cls, subdomain: str, domain: str, suffix: str) |
35,933 | apeye_core | __repr__ |
Return a string representation of the :class:`~.Domain`.
| epr__(self) -> str:
a string representation of the :class:`~.Domain`.
is necessary to get the custom docstring
mt = f"({', '.join(f'{name}=%r' for name in self._fields)})"
f"{self.__class__.__name__}{repr_fmt % self}"
| (self) -> str |
35,935 | collections | _replace | Return a new Domain object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
35,936 | apeye.url | URL |
:mod:`pathlib`-like class for URLs.
:param url: The URL to construct the :class:`~apeye.url.URL` object from.
.. versionchanged:: 0.3.0 The ``url`` parameter can now be a string or a :class:`~.apeye.url.URL`.
.. versionchanged:: 1.1.0
Added support for sorting and rich comparisons (``<``, ``<=``, ``>`` and ``>=``).
.. autoclasssumm:: URL
:autosummary-sections: Methods
:autosummary-exclude-members: __lt__,__le__,__gt__,__ge__,__init__,__hash__
.. autosummary-widths:: 1/5
.. autoclasssumm:: URL
:autosummary-sections: Attributes
| RL(apeye_core.URL): # noqa: D101
scheme specifier
: str
work location part of the URL
: str
hierarchical path of the URL
URLPath
Dict[str, List[str]]
ery parameters of the URL, if present.
sionadded:: 0.7.0
nt: Optional[str]
L fragment, used to identify a part of the document. :py:obj:`None` if absent from the URL.
sionadded:: 0.7.0
| (url: Union[str, ForwardRef('URL')] = '') |
35,937 | apeye_core | __eq__ |
Return ``self == other``.
.. latex:vspace:: -10px
.. attention::
URL fragments and query parameters are not compared.
.. seealso:: :meth:`.URL.strict_compare`, which *does* consider those attributes.
.. latex:vspace:: -20px
| q__(self, other) -> bool:
``self == other``.
ex:vspace:: -10px
ention::
ragments and query parameters are not compared.
ealso:: :meth:`.URL.strict_compare`, which *does* consider those attributes.
ex:vspace:: -20px
nstance(other, URL):
n self.netloc == other.netloc and self.scheme == other.scheme and self.path == other.path
n NotImplemented
| (self, other) -> bool |
35,938 | apeye_core | __fspath__ |
Returns the file system path representation of the :class:`~.apeye.url.URL`.
This is comprised of the ``netloc`` and ``path`` attributes.
| spath__(self) -> str:
s the file system path representation of the :class:`~.apeye.url.URL`.
s comprised of the ``netloc`` and ``path`` attributes.
f"{self.netloc}{self.path}"
| (self) -> str |
35,939 | apeye_core | __ge__ | null | e__(self, other):
nstance(other, URL):
n self._parts_port >= other._parts_port
n NotImplemented
| (self, other) |
35,940 | apeye_core | __gt__ | null | t__(self, other):
nstance(other, URL):
n self._parts_port > other._parts_port
n NotImplemented
| (self, other) |
35,941 | apeye_core | __hash__ |
Returns the has of the :class:`~apeye.url.URL` .
| ash__(self) -> int:
s the has of the :class:`~apeye.url.URL` .
hash((self.scheme, self.netloc, self.path))
| (self) -> int |
35,942 | apeye_core | __init__ | null | nit__(self, url: Union[str, "URL"] = ''):
nstance(url, URL):
str(url)
re.match("([A-Za-z-.]+:)?//", url):
"//" + str(url)
, netloc, parts, params, query, fragment = urlparse(url)
cheme: str = scheme
etloc: str = netloc
ath = URLPath(parts)
uery = parse_qs(query or '')
ragment = fragment or None
| (self, url: Union[str, apeye.url.URL] = '') |
35,943 | apeye_core | __le__ | null | e__(self, other):
nstance(other, URL):
n self._parts_port <= other._parts_port
n NotImplemented
| (self, other) |
35,944 | apeye_core | __lt__ | null | t__(self, other):
nstance(other, URL):
n self._parts_port < other._parts_port
n NotImplemented
| (self, other) |
35,945 | apeye_core | __repr__ |
Returns the string representation of the :class:`~apeye.url.URL`.
| epr__(self) -> str:
s the string representation of the :class:`~apeye.url.URL`.
f"{self.__class__.__name__}({str(self)!r})"
| (self) -> str |
35,946 | apeye_core | __str__ |
Returns the :class:`~apeye.url.URL` as a string.
| tr__(self) -> str:
s the :class:`~apeye.url.URL` as a string.
= urlencode(self.query, doseq=True)
urlunparse([self.scheme, self.netloc, str(self.path), None, query, self.fragment])
.startswith("//"):
n url[2:]
n url
| (self) -> str |
35,947 | apeye_core | __truediv__ |
Construct a new :class:`~apeye.url.URL` object for the given child of this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0
* Added support for division by integers.
* Now officially supports the new path having a URL fragment and/or query parameters.
Any URL fragment or query parameters from the parent URL are not inherited by its children.
| ruediv__(self: URLType, key: Union[PathLike, int]) -> URLType:
uct a new :class:`~apeye.url.URL` object for the given child of this :class:`~apeye.url.URL`.
:
sionchanged:: 0.7.0
ed support for division by integers.
officially supports the new path having a URL fragment and/or query parameters.
URL fragment or query parameters from the parent URL are not inherited by its children.
n self._make_child((key, ))
TypeError:
n NotImplemented
| (self: ~URLType, key: Union[str, pathlib.Path, os.PathLike, int]) -> ~URLType |
35,948 | apeye_core | _make_child |
Construct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
.. versionadded:: 1.1.0 (private)
Except for the final path element any queries and fragments are ignored.
:returns: A new :class:`~.apeye.url.URL` representing either a subpath
(if all arguments are relative paths) or a totally different path
(if one of the arguments is absolute).
| ke_child(self: URLType, args: Iterable[Union[PathLike, int]]) -> URLType:
uct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
sionadded:: 1.1.0 (private)
for the final path element any queries and fragments are ignored.
ns: A new :class:`~.apeye.url.URL` representing either a subpath
ll arguments are relative paths) or a totally different path
ne of the arguments is absolute).
_args: List[ParseResult] = []
g in args:
rg = arg
instance(arg, pathlib.PurePath):
= arg.as_posix()
isinstance(arg, os.PathLike):
= os.fspath(arg)
isinstance(arg, int):
= str(arg)
e_result = urlparse(arg)
t AttributeError as e:
tr(e).endswith("'decode'"):
= f"Cannot join {type(raw_arg).__name__!r} to a {type(self.path).__name__!r}"
se TypeError(msg) from None
:
se
d_args.append(parse_result)
ath = self.from_parts(
f.scheme,
f.netloc,
f.path.joinpath(*map(attrgetter("path"), parsed_args)),
TypeError:
n NotImplemented
sed_args:
ath.query = parse_qs(parsed_args[-1].query)
ath.fragment = parsed_args[-1].fragment or None
new_path
| (self: ~URLType, args: Iterable[Union[str, pathlib.Path, os.PathLike, int]]) -> ~URLType |
35,949 | apeye_core | joinurl |
Construct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
.. versionadded:: 1.1.0
Except for the final path element any queries and fragments are ignored.
:returns: A new :class:`~.apeye.url.URL` representing either a subpath
(if all arguments are relative paths) or a totally different path
(if one of the arguments is absolute).
| nurl(self: URLType, *args) -> URLType:
uct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
sionadded:: 1.1.0
for the final path element any queries and fragments are ignored.
ns: A new :class:`~.apeye.url.URL` representing either a subpath
ll arguments are relative paths) or a totally different path
ne of the arguments is absolute).
self._make_child(args)
| (self: ~URLType, *args) -> ~URLType |
35,950 | apeye_core | relative_to |
Returns a version of this URL's path relative to ``other``.
.. versionadded:: 1.1.0
:param other: Either a :class:`~.apeye.url.URL`, or a string or :class:`~.apeye.url.URLPath` representing an *absolute* path.
If a :class:`~.apeye.url.URL`, the :attr:`~.apeye.url.URL.netloc` must match this URL's.
:raises ValueError: if the operation is not possible
(i.e. because this URL's path is not a subpath of the other path)
| ative_to(self, other: Union[str, "URL", URLPath]) -> URLPath:
s a version of this URL's path relative to ``other``.
sionadded:: 1.1.0
other: Either a :class:`~.apeye.url.URL`, or a string or :class:`~.apeye.url.URLPath` representing an *absolute* path.
:class:`~.apeye.url.URL`, the :attr:`~.apeye.url.URL.netloc` must match this URL's.
s ValueError: if the operation is not possible
because this URL's path is not a subpath of the other path)
nstance(other, URLPath):
t other.is_absolute():
e ValueError("'URL.relative_to' cannot be used with relative URLPath objects")
r = URL('/') / other
ot isinstance(other, URL):
se other as a URL
= URL(other)
are netloc, if both have one
f.netloc and other.netloc and self.netloc.lower() != other.netloc.lower():
ValueError(f"{self!r} does not start with {other!r}")
the paths absolute
oming from a URL they must always be absolute
th = '/' / self.path
path = '/' / other.path
ve_path = our_path.relative_to(other_path)
relative_path
| (self, other: Union[str, apeye.url.URL, apeye.url.URLPath]) -> apeye.url.URLPath |
35,951 | apeye_core | strict_compare |
Return ``self ≡ other``, comparing the scheme, netloc, path, fragment and query parameters.
.. versionadded:: 0.7.0
| ict_compare(self, other) -> bool:
``self ≡ other``, comparing the scheme, netloc, path, fragment and query parameters.
sionadded:: 0.7.0
nstance(other, URL):
n (
f.netloc == other.netloc and self.scheme == other.scheme and self.path == other.path
self.query == other.query and self.fragment == other.fragment
n NotImplemented
| (self, other) -> bool |
35,952 | apeye_core | with_name |
Return a new :class:`~apeye.url.URL` with the file name changed.
:param name:
:param inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
and fragment from this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0 Added the ``inherit`` parameter.
| h_name(self: URLType, name: str, inherit: bool = True) -> URLType:
a new :class:`~apeye.url.URL` with the file name changed.
name:
inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
ragment from this :class:`~apeye.url.URL`.
:
sionchanged:: 0.7.0 Added the ``inherit`` parameter.
erit:
s = {"query": self.query, "fragment": self.fragment}
s = {}
self.from_parts(
.scheme,
.netloc,
.path.with_name(name),
args, # type: ignore
| (self: ~URLType, name: str, inherit: bool = True) -> ~URLType |
35,953 | apeye_core | with_suffix |
Returns a new :class:`~apeye.url.URL` with the file suffix changed.
If the :class:`~apeye.url.URL` has no suffix, add the given suffix.
If the given suffix is an empty string, remove the suffix from the :class:`~apeye.url.URL`.
:param suffix:
:param inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
and fragment from this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0 Added the ``inherit`` parameter.
| h_suffix(self: URLType, suffix: str, inherit: bool = True) -> URLType:
s a new :class:`~apeye.url.URL` with the file suffix changed.
:class:`~apeye.url.URL` has no suffix, add the given suffix.
given suffix is an empty string, remove the suffix from the :class:`~apeye.url.URL`.
suffix:
inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
ragment from this :class:`~apeye.url.URL`.
:
sionchanged:: 0.7.0 Added the ``inherit`` parameter.
erit:
s = {"query": self.query, "fragment": self.fragment}
s = {}
self.from_parts(
.scheme,
.netloc,
.path.with_suffix(suffix),
args, # type: ignore
| (self: ~URLType, suffix: str, inherit: bool = True) -> ~URLType |
35,954 | apeye.url | URLPath |
Represents the path part of a URL.
Subclass of :class:`pathlib.PurePosixPath` that provides a subset of its methods.
.. versionchanged:: 1.1.0
Implemented :meth:`~.apeye.url.URLPath.is_absolute`, :meth:`~.apeye.url.URLPath.joinpath`,
:meth:`~.apeye.url.URLPath.relative_to`, :meth:`~.pathlib.PurePath.match`,
``anchor``, ``drive``, and support for rich comparisons (``<``, ``<=``, ``>`` and ``>=``),
which previously raised :exc:`NotImplementedError`.
.. latex:clearpage::
| from apeye.url import URLPath
| (*args) |
35,956 | pathlib | __eq__ | Return ``self == other``. | def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
| (self, other) -> bool |
35,958 | pathlib | __ge__ | Return ``self >= other``. | def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
| (self, other) -> bool |
35,959 | pathlib | __gt__ | Return ``self > other``. | def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
| (self, other) -> bool |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.