index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
37,094 | sphinx.util.inspect | stringify_signature | Stringify a :class:`~inspect.Signature` object.
:param show_annotation: If enabled, show annotations on the signature
:param show_return_annotation: If enabled, show annotation of the return value
:param unqualified_typehints: If enabled, show annotations as unqualified
(ex. io.StringIO -> StringIO)
| def stringify_signature(
sig: Signature,
show_annotation: bool = True,
show_return_annotation: bool = True,
unqualified_typehints: bool = False,
) -> str:
"""Stringify a :class:`~inspect.Signature` object.
:param show_annotation: If enabled, show annotations on the signature
:param show_return_annotation: If enabled, show annotation of the return value
:param unqualified_typehints: If enabled, show annotations as unqualified
(ex. io.StringIO -> StringIO)
"""
if unqualified_typehints:
mode = 'smart'
else:
mode = 'fully-qualified'
EMPTY = Parameter.empty
args = []
last_kind = None
for param in sig.parameters.values():
if param.kind != Parameter.POSITIONAL_ONLY and last_kind == Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
args.append('/')
if param.kind == Parameter.KEYWORD_ONLY and last_kind in (
Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY,
None,
):
# PEP-3102: Separator for Keyword Only Parameter: *
args.append('*')
arg = StringIO()
if param.kind is Parameter.VAR_POSITIONAL:
arg.write('*' + param.name)
elif param.kind is Parameter.VAR_KEYWORD:
arg.write('**' + param.name)
else:
arg.write(param.name)
if show_annotation and param.annotation is not EMPTY:
arg.write(': ')
arg.write(stringify_annotation(param.annotation, mode))
if param.default is not EMPTY:
if show_annotation and param.annotation is not EMPTY:
arg.write(' = ')
else:
arg.write('=')
arg.write(object_description(param.default))
args.append(arg.getvalue())
last_kind = param.kind
if last_kind is Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
args.append('/')
concatenated_args = ', '.join(args)
if sig.return_annotation is EMPTY or not show_annotation or not show_return_annotation:
return f'({concatenated_args})'
else:
retann = stringify_annotation(sig.return_annotation, mode)
return f'({concatenated_args}) -> {retann}'
| (sig: inspect.Signature, show_annotation: bool = True, show_return_annotation: bool = True, unqualified_typehints: bool = False) -> str |
37,096 | sphinx_autodoc_typehints | tag_name | null | def tag_name(node: Node) -> str:
return node.tagname # type:ignore[attr-defined,no-any-return]
| (node: 'Node') -> 'str' |
37,099 | sphinx_autodoc_typehints | unescape | null | def unescape(escaped: str) -> str:
# For some reason the string we get has a bunch of null bytes in it??
# Remove them...
escaped = escaped.replace("\x00", "")
# For some reason the extra slash before spaces gets lost between the .rst
# source and when this directive is called. So don't replace "\<space>" =>
# "<space>"
return re.sub(r"\\([^ ])", r"\1", escaped)
| (escaped: str) -> str |
37,100 | sphinx_autodoc_typehints | validate_config | null | def validate_config(app: Sphinx, env: BuildEnvironment, docnames: list[str]) -> None: # noqa: ARG001
valid = {None, "comma", "braces", "braces-after"}
if app.config.typehints_defaults not in valid | {False}:
msg = f"typehints_defaults needs to be one of {valid!r}, not {app.config.typehints_defaults!r}"
raise ValueError(msg)
formatter = app.config.typehints_formatter
if formatter is not None and not callable(formatter):
msg = f"typehints_formatter needs to be callable or `None`, not {formatter}"
raise ValueError(msg)
| (app: 'Sphinx', env: 'BuildEnvironment', docnames: 'list[str]') -> 'None' |
37,102 | youtube_transcript_api._errors | CookiePathInvalid | null | class CookiePathInvalid(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'The provided cookie file was unable to be loaded'
| (video_id) |
37,103 | youtube_transcript_api._errors | __init__ | null | def __init__(self, video_id):
self.video_id = video_id
super(CouldNotRetrieveTranscript, self).__init__(self._build_error_message())
| (self, video_id) |
37,104 | youtube_transcript_api._errors | _build_error_message | null | def _build_error_message(self):
cause = self.cause
error_message = self.ERROR_MESSAGE.format(video_url=WATCH_URL.format(video_id=self.video_id))
if cause:
error_message += self.CAUSE_MESSAGE_INTRO.format(cause=cause) + self.GITHUB_REFERRAL
return error_message
| (self) |
37,105 | youtube_transcript_api._errors | CookiesInvalid | null | class CookiesInvalid(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'The cookies provided are not valid (may have expired)'
| (video_id) |
37,108 | youtube_transcript_api._errors | CouldNotRetrieveTranscript |
Raised if a transcript could not be retrieved.
| class CouldNotRetrieveTranscript(Exception):
"""
Raised if a transcript could not be retrieved.
"""
ERROR_MESSAGE = '\nCould not retrieve a transcript for the video {video_url}!'
CAUSE_MESSAGE_INTRO = ' This is most likely caused by:\n\n{cause}'
CAUSE_MESSAGE = ''
GITHUB_REFERRAL = (
'\n\nIf you are sure that the described cause is not responsible for this error '
'and that a transcript should be retrievable, please create an issue at '
'https://github.com/jdepoix/youtube-transcript-api/issues. '
'Please add which version of youtube_transcript_api you are using '
'and provide the information needed to replicate the error. '
'Also make sure that there are no open issues which already describe your problem!'
)
def __init__(self, video_id):
self.video_id = video_id
super(CouldNotRetrieveTranscript, self).__init__(self._build_error_message())
def _build_error_message(self):
cause = self.cause
error_message = self.ERROR_MESSAGE.format(video_url=WATCH_URL.format(video_id=self.video_id))
if cause:
error_message += self.CAUSE_MESSAGE_INTRO.format(cause=cause) + self.GITHUB_REFERRAL
return error_message
@property
def cause(self):
return self.CAUSE_MESSAGE
| (video_id) |
37,111 | youtube_transcript_api._errors | FailedToCreateConsentCookie | null | class FailedToCreateConsentCookie(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'Failed to automatically give consent to saving cookies'
| (video_id) |
37,114 | youtube_transcript_api._errors | InvalidVideoId | null | class InvalidVideoId(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = (
'You provided an invalid video id. Make sure you are using the video id and NOT the url!\n\n'
'Do NOT run: `YouTubeTranscriptApi.get_transcript("https://www.youtube.com/watch?v=1234")`\n'
'Instead run: `YouTubeTranscriptApi.get_transcript("1234")`'
)
| (video_id) |
37,117 | youtube_transcript_api._errors | NoTranscriptAvailable | null | class NoTranscriptAvailable(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'No transcripts are available for this video'
| (video_id) |
37,120 | youtube_transcript_api._errors | NoTranscriptFound | null | class NoTranscriptFound(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = (
'No transcripts were found for any of the requested language codes: {requested_language_codes}\n\n'
'{transcript_data}'
)
def __init__(self, video_id, requested_language_codes, transcript_data):
self._requested_language_codes = requested_language_codes
self._transcript_data = transcript_data
super(NoTranscriptFound, self).__init__(video_id)
@property
def cause(self):
return self.CAUSE_MESSAGE.format(
requested_language_codes=self._requested_language_codes,
transcript_data=str(self._transcript_data),
)
| (video_id, requested_language_codes, transcript_data) |
37,121 | youtube_transcript_api._errors | __init__ | null | def __init__(self, video_id, requested_language_codes, transcript_data):
self._requested_language_codes = requested_language_codes
self._transcript_data = transcript_data
super(NoTranscriptFound, self).__init__(video_id)
| (self, video_id, requested_language_codes, transcript_data) |
37,123 | youtube_transcript_api._errors | NotTranslatable | null | class NotTranslatable(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'The requested language is not translatable'
| (video_id) |
37,126 | youtube_transcript_api._errors | TooManyRequests | null | class TooManyRequests(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = (
'YouTube is receiving too many requests from this IP and now requires solving a captcha to continue. '
'One of the following things can be done to work around this:\n\
- Manually solve the captcha in a browser and export the cookie. '
'Read here how to use that cookie with '
'youtube-transcript-api: https://github.com/jdepoix/youtube-transcript-api#cookies\n\
- Use a different IP address\n\
- Wait until the ban on your IP has been lifted'
)
| (video_id) |
37,129 | youtube_transcript_api._transcripts | Transcript | null | class Transcript(object):
def __init__(self, http_client, video_id, url, language, language_code, is_generated, translation_languages):
"""
You probably don't want to initialize this directly. Usually you'll access Transcript objects using a
TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param url: the url which needs to be called to fetch the transcript
:param language: the name of the language this transcript uses
:param language_code:
:param is_generated:
:param translation_languages:
"""
self._http_client = http_client
self.video_id = video_id
self._url = url
self.language = language
self.language_code = language_code
self.is_generated = is_generated
self.translation_languages = translation_languages
self._translation_languages_dict = {
translation_language['language_code']: translation_language['language']
for translation_language in translation_languages
}
def fetch(self, preserve_formatting=False):
"""
Loads the actual transcript data.
:param preserve_formatting: whether to keep select HTML text formatting
:type preserve_formatting: bool
:return: a list of dictionaries containing the 'text', 'start' and 'duration' keys
:rtype [{'text': str, 'start': float, 'end': float}]:
"""
response = self._http_client.get(self._url, headers={'Accept-Language': 'en-US'})
return _TranscriptParser(preserve_formatting=preserve_formatting).parse(
_raise_http_errors(response, self.video_id).text,
)
def __str__(self):
return '{language_code} ("{language}"){translation_description}'.format(
language=self.language,
language_code=self.language_code,
translation_description='[TRANSLATABLE]' if self.is_translatable else ''
)
@property
def is_translatable(self):
return len(self.translation_languages) > 0
def translate(self, language_code):
if not self.is_translatable:
raise NotTranslatable(self.video_id)
if language_code not in self._translation_languages_dict:
raise TranslationLanguageNotAvailable(self.video_id)
return Transcript(
self._http_client,
self.video_id,
'{url}&tlang={language_code}'.format(url=self._url, language_code=language_code),
self._translation_languages_dict[language_code],
language_code,
True,
[],
)
| (http_client, video_id, url, language, language_code, is_generated, translation_languages) |
37,130 | youtube_transcript_api._transcripts | __init__ |
You probably don't want to initialize this directly. Usually you'll access Transcript objects using a
TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param url: the url which needs to be called to fetch the transcript
:param language: the name of the language this transcript uses
:param language_code:
:param is_generated:
:param translation_languages:
| def __init__(self, http_client, video_id, url, language, language_code, is_generated, translation_languages):
"""
You probably don't want to initialize this directly. Usually you'll access Transcript objects using a
TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param url: the url which needs to be called to fetch the transcript
:param language: the name of the language this transcript uses
:param language_code:
:param is_generated:
:param translation_languages:
"""
self._http_client = http_client
self.video_id = video_id
self._url = url
self.language = language
self.language_code = language_code
self.is_generated = is_generated
self.translation_languages = translation_languages
self._translation_languages_dict = {
translation_language['language_code']: translation_language['language']
for translation_language in translation_languages
}
| (self, http_client, video_id, url, language, language_code, is_generated, translation_languages) |
37,131 | youtube_transcript_api._transcripts | __str__ | null | def __str__(self):
return '{language_code} ("{language}"){translation_description}'.format(
language=self.language,
language_code=self.language_code,
translation_description='[TRANSLATABLE]' if self.is_translatable else ''
)
| (self) |
37,132 | youtube_transcript_api._transcripts | fetch |
Loads the actual transcript data.
:param preserve_formatting: whether to keep select HTML text formatting
:type preserve_formatting: bool
:return: a list of dictionaries containing the 'text', 'start' and 'duration' keys
:rtype [{'text': str, 'start': float, 'end': float}]:
| def fetch(self, preserve_formatting=False):
"""
Loads the actual transcript data.
:param preserve_formatting: whether to keep select HTML text formatting
:type preserve_formatting: bool
:return: a list of dictionaries containing the 'text', 'start' and 'duration' keys
:rtype [{'text': str, 'start': float, 'end': float}]:
"""
response = self._http_client.get(self._url, headers={'Accept-Language': 'en-US'})
return _TranscriptParser(preserve_formatting=preserve_formatting).parse(
_raise_http_errors(response, self.video_id).text,
)
| (self, preserve_formatting=False) |
37,133 | youtube_transcript_api._transcripts | translate | null | def translate(self, language_code):
if not self.is_translatable:
raise NotTranslatable(self.video_id)
if language_code not in self._translation_languages_dict:
raise TranslationLanguageNotAvailable(self.video_id)
return Transcript(
self._http_client,
self.video_id,
'{url}&tlang={language_code}'.format(url=self._url, language_code=language_code),
self._translation_languages_dict[language_code],
language_code,
True,
[],
)
| (self, language_code) |
37,134 | youtube_transcript_api._transcripts | TranscriptList |
This object represents a list of transcripts. It can be iterated over to list all transcripts which are available
for a given YouTube video. Also it provides functionality to search for a transcript in a given language.
| class TranscriptList(object):
"""
This object represents a list of transcripts. It can be iterated over to list all transcripts which are available
for a given YouTube video. Also it provides functionality to search for a transcript in a given language.
"""
def __init__(self, video_id, manually_created_transcripts, generated_transcripts, translation_languages):
"""
The constructor is only for internal use. Use the static build method instead.
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param manually_created_transcripts: dict mapping language codes to the manually created transcripts
:type manually_created_transcripts: dict[str, Transcript]
:param generated_transcripts: dict mapping language codes to the generated transcripts
:type generated_transcripts: dict[str, Transcript]
:param translation_languages: list of languages which can be used for translatable languages
:type translation_languages: list[dict[str, str]]
"""
self.video_id = video_id
self._manually_created_transcripts = manually_created_transcripts
self._generated_transcripts = generated_transcripts
self._translation_languages = translation_languages
@staticmethod
def build(http_client, video_id, captions_json):
"""
Factory method for TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param captions_json: the JSON parsed from the YouTube pages static HTML
:type captions_json: dict
:return: the created TranscriptList
:rtype TranscriptList:
"""
translation_languages = [
{
'language': translation_language['languageName']['simpleText'],
'language_code': translation_language['languageCode'],
} for translation_language in captions_json.get('translationLanguages', [])
]
manually_created_transcripts = {}
generated_transcripts = {}
for caption in captions_json['captionTracks']:
if caption.get('kind', '') == 'asr':
transcript_dict = generated_transcripts
else:
transcript_dict = manually_created_transcripts
transcript_dict[caption['languageCode']] = Transcript(
http_client,
video_id,
caption['baseUrl'],
caption['name']['simpleText'],
caption['languageCode'],
caption.get('kind', '') == 'asr',
translation_languages if caption.get('isTranslatable', False) else [],
)
return TranscriptList(
video_id,
manually_created_transcripts,
generated_transcripts,
translation_languages,
)
def __iter__(self):
return iter(list(self._manually_created_transcripts.values()) + list(self._generated_transcripts.values()))
def find_transcript(self, language_codes):
"""
Finds a transcript for a given language code. Manually created transcripts are returned first and only if none
are found, generated transcripts are used. If you only want generated transcripts use
`find_manually_created_transcript` instead.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._manually_created_transcripts, self._generated_transcripts])
def find_generated_transcript(self, language_codes):
"""
Finds an automatically generated transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._generated_transcripts])
def find_manually_created_transcript(self, language_codes):
"""
Finds a manually created transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._manually_created_transcripts])
def _find_transcript(self, language_codes, transcript_dicts):
for language_code in language_codes:
for transcript_dict in transcript_dicts:
if language_code in transcript_dict:
return transcript_dict[language_code]
raise NoTranscriptFound(
self.video_id,
language_codes,
self
)
def __str__(self):
return (
'For this video ({video_id}) transcripts are available in the following languages:\n\n'
'(MANUALLY CREATED)\n'
'{available_manually_created_transcript_languages}\n\n'
'(GENERATED)\n'
'{available_generated_transcripts}\n\n'
'(TRANSLATION LANGUAGES)\n'
'{available_translation_languages}'
).format(
video_id=self.video_id,
available_manually_created_transcript_languages=self._get_language_description(
str(transcript) for transcript in self._manually_created_transcripts.values()
),
available_generated_transcripts=self._get_language_description(
str(transcript) for transcript in self._generated_transcripts.values()
),
available_translation_languages=self._get_language_description(
'{language_code} ("{language}")'.format(
language=translation_language['language'],
language_code=translation_language['language_code'],
) for translation_language in self._translation_languages
)
)
def _get_language_description(self, transcript_strings):
description = '\n'.join(' - {transcript}'.format(transcript=transcript) for transcript in transcript_strings)
return description if description else 'None'
| (video_id, manually_created_transcripts, generated_transcripts, translation_languages) |
37,135 | youtube_transcript_api._transcripts | __init__ |
The constructor is only for internal use. Use the static build method instead.
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param manually_created_transcripts: dict mapping language codes to the manually created transcripts
:type manually_created_transcripts: dict[str, Transcript]
:param generated_transcripts: dict mapping language codes to the generated transcripts
:type generated_transcripts: dict[str, Transcript]
:param translation_languages: list of languages which can be used for translatable languages
:type translation_languages: list[dict[str, str]]
| def __init__(self, video_id, manually_created_transcripts, generated_transcripts, translation_languages):
"""
The constructor is only for internal use. Use the static build method instead.
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param manually_created_transcripts: dict mapping language codes to the manually created transcripts
:type manually_created_transcripts: dict[str, Transcript]
:param generated_transcripts: dict mapping language codes to the generated transcripts
:type generated_transcripts: dict[str, Transcript]
:param translation_languages: list of languages which can be used for translatable languages
:type translation_languages: list[dict[str, str]]
"""
self.video_id = video_id
self._manually_created_transcripts = manually_created_transcripts
self._generated_transcripts = generated_transcripts
self._translation_languages = translation_languages
| (self, video_id, manually_created_transcripts, generated_transcripts, translation_languages) |
37,136 | youtube_transcript_api._transcripts | __iter__ | null | def __iter__(self):
return iter(list(self._manually_created_transcripts.values()) + list(self._generated_transcripts.values()))
| (self) |
37,137 | youtube_transcript_api._transcripts | __str__ | null | def __str__(self):
return (
'For this video ({video_id}) transcripts are available in the following languages:\n\n'
'(MANUALLY CREATED)\n'
'{available_manually_created_transcript_languages}\n\n'
'(GENERATED)\n'
'{available_generated_transcripts}\n\n'
'(TRANSLATION LANGUAGES)\n'
'{available_translation_languages}'
).format(
video_id=self.video_id,
available_manually_created_transcript_languages=self._get_language_description(
str(transcript) for transcript in self._manually_created_transcripts.values()
),
available_generated_transcripts=self._get_language_description(
str(transcript) for transcript in self._generated_transcripts.values()
),
available_translation_languages=self._get_language_description(
'{language_code} ("{language}")'.format(
language=translation_language['language'],
language_code=translation_language['language_code'],
) for translation_language in self._translation_languages
)
)
| (self) |
37,138 | youtube_transcript_api._transcripts | _find_transcript | null | def _find_transcript(self, language_codes, transcript_dicts):
for language_code in language_codes:
for transcript_dict in transcript_dicts:
if language_code in transcript_dict:
return transcript_dict[language_code]
raise NoTranscriptFound(
self.video_id,
language_codes,
self
)
| (self, language_codes, transcript_dicts) |
37,139 | youtube_transcript_api._transcripts | _get_language_description | null | def _get_language_description(self, transcript_strings):
description = '\n'.join(' - {transcript}'.format(transcript=transcript) for transcript in transcript_strings)
return description if description else 'None'
| (self, transcript_strings) |
37,140 | youtube_transcript_api._transcripts | build |
Factory method for TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param captions_json: the JSON parsed from the YouTube pages static HTML
:type captions_json: dict
:return: the created TranscriptList
:rtype TranscriptList:
| @staticmethod
def build(http_client, video_id, captions_json):
"""
Factory method for TranscriptList.
:param http_client: http client which is used to make the transcript retrieving http calls
:type http_client: requests.Session
:param video_id: the id of the video this TranscriptList is for
:type video_id: str
:param captions_json: the JSON parsed from the YouTube pages static HTML
:type captions_json: dict
:return: the created TranscriptList
:rtype TranscriptList:
"""
translation_languages = [
{
'language': translation_language['languageName']['simpleText'],
'language_code': translation_language['languageCode'],
} for translation_language in captions_json.get('translationLanguages', [])
]
manually_created_transcripts = {}
generated_transcripts = {}
for caption in captions_json['captionTracks']:
if caption.get('kind', '') == 'asr':
transcript_dict = generated_transcripts
else:
transcript_dict = manually_created_transcripts
transcript_dict[caption['languageCode']] = Transcript(
http_client,
video_id,
caption['baseUrl'],
caption['name']['simpleText'],
caption['languageCode'],
caption.get('kind', '') == 'asr',
translation_languages if caption.get('isTranslatable', False) else [],
)
return TranscriptList(
video_id,
manually_created_transcripts,
generated_transcripts,
translation_languages,
)
| (http_client, video_id, captions_json) |
37,141 | youtube_transcript_api._transcripts | find_generated_transcript |
Finds an automatically generated transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
| def find_generated_transcript(self, language_codes):
"""
Finds an automatically generated transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._generated_transcripts])
| (self, language_codes) |
37,142 | youtube_transcript_api._transcripts | find_manually_created_transcript |
Finds a manually created transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
| def find_manually_created_transcript(self, language_codes):
"""
Finds a manually created transcript for a given language code.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._manually_created_transcripts])
| (self, language_codes) |
37,143 | youtube_transcript_api._transcripts | find_transcript |
Finds a transcript for a given language code. Manually created transcripts are returned first and only if none
are found, generated transcripts are used. If you only want generated transcripts use
`find_manually_created_transcript` instead.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
| def find_transcript(self, language_codes):
"""
Finds a transcript for a given language code. Manually created transcripts are returned first and only if none
are found, generated transcripts are used. If you only want generated transcripts use
`find_manually_created_transcript` instead.
:param language_codes: A list of language codes in a descending priority. For example, if this is set to
['de', 'en'] it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if
it fails to do so.
:type languages: list[str]
:return: the found Transcript
:rtype Transcript:
:raises: NoTranscriptFound
"""
return self._find_transcript(language_codes, [self._manually_created_transcripts, self._generated_transcripts])
| (self, language_codes) |
37,144 | youtube_transcript_api._errors | TranscriptsDisabled | null | class TranscriptsDisabled(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'Subtitles are disabled for this video'
| (video_id) |
37,147 | youtube_transcript_api._errors | TranslationLanguageNotAvailable | null | class TranslationLanguageNotAvailable(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'The requested translation language is not available'
| (video_id) |
37,150 | youtube_transcript_api._errors | VideoUnavailable | null | class VideoUnavailable(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'The video is no longer available'
| (video_id) |
37,153 | youtube_transcript_api._errors | YouTubeRequestFailed | null | class YouTubeRequestFailed(CouldNotRetrieveTranscript):
CAUSE_MESSAGE = 'Request to YouTube failed: {reason}'
def __init__(self, video_id, http_error):
self.reason = str(http_error)
super(YouTubeRequestFailed, self).__init__(video_id)
@property
def cause(self):
return self.CAUSE_MESSAGE.format(
reason=self.reason,
)
| (video_id, http_error) |
37,154 | youtube_transcript_api._errors | __init__ | null | def __init__(self, video_id, http_error):
self.reason = str(http_error)
super(YouTubeRequestFailed, self).__init__(video_id)
| (self, video_id, http_error) |
37,156 | youtube_transcript_api._api | YouTubeTranscriptApi | null | class YouTubeTranscriptApi(object):
@classmethod
def list_transcripts(cls, video_id, proxies=None, cookies=None):
"""
Retrieves the list of transcripts which are available for a given video. It returns a `TranscriptList` object
which is iterable and provides methods to filter the list of transcripts for specific languages. While iterating
over the `TranscriptList` the individual transcripts are represented by `Transcript` objects, which provide
metadata and can either be fetched by calling `transcript.fetch()` or translated by calling
`transcript.translate('en')`. Example::
# retrieve the available transcripts
transcript_list = YouTubeTranscriptApi.get('video_id')
# iterate over all available transcripts
for transcript in transcript_list:
# the Transcript object provides metadata properties
print(
transcript.video_id,
transcript.language,
transcript.language_code,
# whether it has been manually created or generated by YouTube
transcript.is_generated,
# a list of languages the transcript can be translated to
transcript.translation_languages,
)
# fetch the actual transcript data
print(transcript.fetch())
# translating the transcript will return another transcript object
print(transcript.translate('en').fetch())
# you can also directly filter for the language you are looking for, using the transcript list
transcript = transcript_list.find_transcript(['de', 'en'])
# or just filter for manually created transcripts
transcript = transcript_list.find_manually_created_transcript(['de', 'en'])
# or automatically generated ones
transcript = transcript_list.find_generated_transcript(['de', 'en'])
:param video_id: the youtube video id
:type video_id: str
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:param cookies: a string of the path to a text file containing youtube authorization cookies
:type cookies: str
:return: the list of available transcripts
:rtype TranscriptList:
"""
with requests.Session() as http_client:
if cookies:
http_client.cookies = cls._load_cookies(cookies, video_id)
http_client.proxies = proxies if proxies else {}
return TranscriptListFetcher(http_client).fetch(video_id)
@classmethod
def get_transcripts(cls, video_ids, languages=('en',), continue_after_error=False, proxies=None,
cookies=None, preserve_formatting=False):
"""
Retrieves the transcripts for a list of videos.
:param video_ids: a list of youtube video ids
:type video_ids: list[str]
:param languages: A list of language codes in a descending priority. For example, if this is set to ['de', 'en']
it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if it fails to
do so.
:type languages: list[str]
:param continue_after_error: if this is set the execution won't be stopped, if an error occurs while retrieving
one of the video transcripts
:type continue_after_error: bool
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:param cookies: a string of the path to a text file containing youtube authorization cookies
:type cookies: str
:param preserve_formatting: whether to keep select HTML text formatting
:type preserve_formatting: bool
:return: a tuple containing a dictionary mapping video ids onto their corresponding transcripts, and a list of
video ids, which could not be retrieved
:rtype ({str: [{'text': str, 'start': float, 'end': float}]}, [str]}):
"""
assert isinstance(video_ids, list), "`video_ids` must be a list of strings"
data = {}
unretrievable_videos = []
for video_id in video_ids:
try:
data[video_id] = cls.get_transcript(video_id, languages, proxies, cookies, preserve_formatting)
except Exception as exception:
if not continue_after_error:
raise exception
unretrievable_videos.append(video_id)
return data, unretrievable_videos
@classmethod
def get_transcript(cls, video_id, languages=('en',), proxies=None, cookies=None, preserve_formatting=False):
"""
Retrieves the transcript for a single video. This is just a shortcut for calling::
YouTubeTranscriptApi.list_transcripts(video_id, proxies).find_transcript(languages).fetch()
:param video_id: the youtube video id
:type video_id: str
:param languages: A list of language codes in a descending priority. For example, if this is set to ['de', 'en']
it will first try to fetch the german transcript (de) and then fetch the english transcript (en) if it fails to
do so.
:type languages: list[str]
:param proxies: a dictionary mapping of http and https proxies to be used for the network requests
:type proxies: {'http': str, 'https': str} - http://docs.python-requests.org/en/master/user/advanced/#proxies
:param cookies: a string of the path to a text file containing youtube authorization cookies
:type cookies: str
:param preserve_formatting: whether to keep select HTML text formatting
:type preserve_formatting: bool
:return: a list of dictionaries containing the 'text', 'start' and 'duration' keys
:rtype [{'text': str, 'start': float, 'end': float}]:
"""
assert isinstance(video_id, str), "`video_id` must be a string"
return cls.list_transcripts(video_id, proxies, cookies).find_transcript(languages).fetch(preserve_formatting=preserve_formatting)
@classmethod
def _load_cookies(cls, cookies, video_id):
try:
cookie_jar = cookiejar.MozillaCookieJar()
cookie_jar.load(cookies)
if not cookie_jar:
raise CookiesInvalid(video_id)
return cookie_jar
except CookieLoadError:
raise CookiePathInvalid(video_id)
| () |
37,162 | ipinfo.handler_async | AsyncHandler |
Allows client to request data for specified IP address asynchronously.
Instantiates and maintains access to cache.
| class AsyncHandler:
"""
Allows client to request data for specified IP address asynchronously.
Instantiates and maintains access to cache.
"""
def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
async def init(self):
"""
Initializes internal aiohttp connection pool.
This isn't _required_, as the pool is initialized lazily when needed.
But in case you require non-lazy initialization, you may await this.
This is idempotent.
"""
await self._ensure_aiohttp_ready()
async def deinit(self):
"""
Deinitialize the async handler.
This is required in case you need to let go of the memory/state
associated with the async handler in a long-running process.
This is idempotent.
"""
if self.httpsess:
await self.httpsess.close()
self.httpsess = None
async def getDetails(self, ip_address=None, timeout=None):
"""Get details for specified IP address as a Details object."""
self._ensure_aiohttp_ready()
# If the supplied IP address uses the objects defined in the built-in
# module ipaddress, extract the appropriate string notation before
# formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
# check if bogon.
if ip_address and is_bogon(ip_address):
details = {"ip": ip_address, "bogon": True}
return Details(details)
# check cache first.
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
return Details(cached_ipaddr)
except KeyError:
pass
# not in cache; do http req
url = API_URL
if ip_address:
url += "/" + ip_address
headers = handler_utils.get_headers(self.access_token, self.headers)
req_opts = {}
if timeout is not None:
req_opts["timeout"] = timeout
async with self.httpsess.get(url, headers=headers, **req_opts) as resp:
if resp.status == 429:
raise RequestQuotaExceededError()
if resp.status >= 400:
error_response = await resp.json()
error_code = resp.status
raise APIError(error_code, error_response)
details = await resp.json()
# format & cache
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
self.cache[cache_key(ip_address)] = details
return Details(details)
async def getBatchDetails(
self,
ip_addresses,
batch_size=None,
timeout_per_batch=BATCH_REQ_TIMEOUT_DEFAULT,
timeout_total=None,
raise_on_fail=True,
):
"""
Get details for a batch of IP addresses at once.
There is no specified limit to the number of IPs this function can
accept; it can handle as much as the user can fit in RAM (along with
all of the response data, which is at least a magnitude larger than the
input list).
The input list is broken up into batches to abide by API requirements.
The batch size can be adjusted with `batch_size` but is clipped to
`BATCH_MAX_SIZE`.
Defaults to `BATCH_MAX_SIZE`.
For each batch, `timeout_per_batch` indicates the maximum seconds to
spend waiting for the HTTP request to complete. If any batch fails with
this timeout, the whole operation fails.
Defaults to `BATCH_REQ_TIMEOUT_DEFAULT` seconds.
`timeout_total` is a seconds-denominated hard-timeout for the time
spent in HTTP operations; regardless of whether all batches have
succeeded so far, if `timeout_total` is reached, the whole operation
will fail by raising `TimeoutExceededError`.
Defaults to being turned off.
`raise_on_fail`, if turned off, will return any result retrieved so far
rather than raise an exception when errors occur, including timeout and
quota errors.
Defaults to on.
The concurrency level is currently unadjustable; coroutines will be
created and consumed for all batches at once.
"""
self._ensure_aiohttp_ready()
if batch_size is None:
batch_size = BATCH_MAX_SIZE
result = {}
# Pre-populate with anything we've got in the cache, and keep around
# the IPs not in the cache.
lookup_addresses = []
for ip_address in ip_addresses:
# If the supplied IP address uses the objects defined in the
# built-in module ipaddress extract the appropriate string notation
# before formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
result[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
# all in cache - return early.
if not lookup_addresses:
return result
# do start timer if necessary
if timeout_total is not None:
start_time = time.time()
# loop over batch chunks and prepare coroutines for each.
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
# prepare coroutines that will make reqs and update results.
reqs = [
self._do_batch_req(
lookup_addresses[i : i + batch_size],
url,
headers,
timeout_per_batch,
raise_on_fail,
result,
)
for i in range(0, len(lookup_addresses), batch_size)
]
try:
_, pending = await asyncio.wait(
{*reqs},
timeout=timeout_total,
return_when=asyncio.FIRST_EXCEPTION,
)
# if all done, return result.
if not pending:
return result
# if some had a timeout, first cancel timed out stuff and wait for
# cleanup. then exit with return_or_fail.
for co in pending:
try:
co.cancel()
await co
except asyncio.CancelledError:
pass
return handler_utils.return_or_fail(
raise_on_fail, TimeoutExceededError(), result
)
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, result)
return result
async def _do_batch_req(
self, chunk, url, headers, timeout_per_batch, raise_on_fail, result
):
"""
Coroutine which will do the actual POST request for getBatchDetails.
"""
try:
resp = await self.httpsess.post(
url,
data=json.dumps(chunk),
headers=headers,
timeout=timeout_per_batch,
)
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, None)
# gather data
try:
if resp.status == 429:
raise RequestQuotaExceededError()
resp.raise_for_status()
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, None)
json_resp = await resp.json()
# format & fill up cache
for ip_address, details in json_resp.items():
if isinstance(details, dict):
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
self.cache[cache_key(ip_address)] = details
# merge cached results with new lookup
result.update(json_resp)
def _ensure_aiohttp_ready(self):
"""Ensures aiohttp internal state is initialized."""
if self.httpsess:
return
timeout = aiohttp.ClientTimeout(total=self.request_options["timeout"])
self.httpsess = aiohttp.ClientSession(timeout=timeout)
async def getBatchDetailsIter(
self,
ip_addresses,
batch_size=None,
raise_on_fail=True,
):
if batch_size is None:
batch_size = BATCH_MAX_SIZE
results = {}
lookup_addresses = []
for ip_address in ip_addresses:
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
if ip_address and is_bogon(ip_address):
details = {"ip": ip_address, "bogon": True}
yield Details(details)
else:
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
results[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
if not lookup_addresses:
yield results.items()
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
async def process_batch(batch):
async with aiohttp.ClientSession(headers=headers) as session:
response = await session.post(url, json=batch)
response.raise_for_status()
json_response = await response.json()
for ip_address, details in json_response.items():
self.cache[cache_key(ip_address)] = details
results[ip_address] = details
for i in range(0, len(lookup_addresses), batch_size):
batch = lookup_addresses[i : i + batch_size]
await process_batch(batch)
for ip_address, details in results.items():
if isinstance(details, dict):
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
yield ip_address, details
| (access_token=None, **kwargs) |
37,163 | ipinfo.handler_async | __init__ |
Initialize the Handler object with country name list and the
cache initialized.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self, access_token=None, **kwargs) |
37,164 | ipinfo.handler_async | _do_batch_req |
Coroutine which will do the actual POST request for getBatchDetails.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self, chunk, url, headers, timeout_per_batch, raise_on_fail, result) |
37,165 | ipinfo.handler_async | _ensure_aiohttp_ready | Ensures aiohttp internal state is initialized. | def _ensure_aiohttp_ready(self):
"""Ensures aiohttp internal state is initialized."""
if self.httpsess:
return
timeout = aiohttp.ClientTimeout(total=self.request_options["timeout"])
self.httpsess = aiohttp.ClientSession(timeout=timeout)
| (self) |
37,166 | ipinfo.handler_async | deinit |
Deinitialize the async handler.
This is required in case you need to let go of the memory/state
associated with the async handler in a long-running process.
This is idempotent.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self) |
37,167 | ipinfo.handler_async | getBatchDetails |
Get details for a batch of IP addresses at once.
There is no specified limit to the number of IPs this function can
accept; it can handle as much as the user can fit in RAM (along with
all of the response data, which is at least a magnitude larger than the
input list).
The input list is broken up into batches to abide by API requirements.
The batch size can be adjusted with `batch_size` but is clipped to
`BATCH_MAX_SIZE`.
Defaults to `BATCH_MAX_SIZE`.
For each batch, `timeout_per_batch` indicates the maximum seconds to
spend waiting for the HTTP request to complete. If any batch fails with
this timeout, the whole operation fails.
Defaults to `BATCH_REQ_TIMEOUT_DEFAULT` seconds.
`timeout_total` is a seconds-denominated hard-timeout for the time
spent in HTTP operations; regardless of whether all batches have
succeeded so far, if `timeout_total` is reached, the whole operation
will fail by raising `TimeoutExceededError`.
Defaults to being turned off.
`raise_on_fail`, if turned off, will return any result retrieved so far
rather than raise an exception when errors occur, including timeout and
quota errors.
Defaults to on.
The concurrency level is currently unadjustable; coroutines will be
created and consumed for all batches at once.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self, ip_addresses, batch_size=None, timeout_per_batch=5, timeout_total=None, raise_on_fail=True) |
37,168 | ipinfo.handler_async | getBatchDetailsIter | null | def _ensure_aiohttp_ready(self):
"""Ensures aiohttp internal state is initialized."""
if self.httpsess:
return
timeout = aiohttp.ClientTimeout(total=self.request_options["timeout"])
self.httpsess = aiohttp.ClientSession(timeout=timeout)
| (self, ip_addresses, batch_size=None, raise_on_fail=True) |
37,169 | ipinfo.handler_async | getDetails | Get details for specified IP address as a Details object. | def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self, ip_address=None, timeout=None) |
37,170 | ipinfo.handler_async | init |
Initializes internal aiohttp connection pool.
This isn't _required_, as the pool is initialized lazily when needed.
But in case you require non-lazy initialization, you may await this.
This is idempotent.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup aiohttp
self.httpsess = None
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self) |
37,171 | ipinfo.handler | Handler |
Allows client to request data for specified IP address.
Instantiates and maintains access to cache.
| class Handler:
"""
Allows client to request data for specified IP address.
Instantiates and maintains access to cache.
"""
def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
def getDetails(self, ip_address=None, timeout=None):
"""
Get details for specified IP address as a Details object.
If `timeout` is not `None`, it will override the client-level timeout
just for this operation.
"""
# If the supplied IP address uses the objects defined in the built-in
# module ipaddress extract the appropriate string notation before
# formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
# check if bogon.
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
return Details(details)
# check cache first.
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
return Details(cached_ipaddr)
except KeyError:
pass
# prepare req http opts
req_opts = {**self.request_options}
if timeout is not None:
req_opts["timeout"] = timeout
# not in cache; do http req
url = API_URL
if ip_address:
url += "/" + ip_address
headers = handler_utils.get_headers(self.access_token, self.headers)
response = requests.get(url, headers=headers, **req_opts)
if response.status_code == 429:
raise RequestQuotaExceededError()
if response.status_code >= 400:
error_response = response.json()
error_code = response.status_code
raise APIError(error_code, error_response)
details = response.json()
# format & cache
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
self.cache[cache_key(ip_address)] = details
return Details(details)
def getBatchDetails(
self,
ip_addresses,
batch_size=None,
timeout_per_batch=BATCH_REQ_TIMEOUT_DEFAULT,
timeout_total=None,
raise_on_fail=True,
):
"""
Get details for a batch of IP addresses at once.
There is no specified limit to the number of IPs this function can
accept; it can handle as much as the user can fit in RAM (along with
all of the response data, which is at least a magnitude larger than the
input list).
The input list is broken up into batches to abide by API requirements.
The batch size can be adjusted with `batch_size` but is clipped to
`BATCH_MAX_SIZE`.
Defaults to `BATCH_MAX_SIZE`.
For each batch, `timeout_per_batch` indicates the maximum seconds to
spend waiting for the HTTP request to complete. If any batch fails with
this timeout, the whole operation fails.
Defaults to `BATCH_REQ_TIMEOUT_DEFAULT` seconds.
`timeout_total` is a seconds-denominated hard-timeout for the time
spent in HTTP operations; regardless of whether all batches have
succeeded so far, if `timeout_total` is reached, the whole operation
will fail by raising `TimeoutExceededError`.
Defaults to being turned off.
`raise_on_fail`, if turned off, will return any result retrieved so far
rather than raise an exception when errors occur, including timeout and
quota errors.
Defaults to on.
"""
if batch_size == None:
batch_size = BATCH_MAX_SIZE
result = {}
lookup_addresses = []
# pre-populate with anything we've got in the cache, and keep around
# the IPs not in the cache.
for ip_address in ip_addresses:
# if the supplied IP address uses the objects defined in the
# built-in module ipaddress extract the appropriate string notation
# before formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
result[ip_address] = Details(details)
else:
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
result[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
# all in cache - return early.
if len(lookup_addresses) == 0:
return result
# do start timer if necessary
if timeout_total is not None:
start_time = time.time()
# prepare req http options
req_opts = {**self.request_options, "timeout": timeout_per_batch}
# loop over batch chunks and do lookup for each.
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
for i in range(0, len(lookup_addresses), batch_size):
# quit if total timeout is reached.
if (
timeout_total is not None
and time.time() - start_time > timeout_total
):
return handler_utils.return_or_fail(
raise_on_fail, TimeoutExceededError(), result
)
chunk = lookup_addresses[i : i + batch_size]
# lookup
try:
response = requests.post(
url, json=chunk, headers=headers, **req_opts
)
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, result)
# fail on bad status codes
try:
if response.status_code == 429:
raise RequestQuotaExceededError()
response.raise_for_status()
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, result)
# fill cache
json_response = response.json()
for ip_address, details in json_response.items():
self.cache[cache_key(ip_address)] = details
# merge cached results with new lookup
result.update(json_response)
# format all
for detail in result.values():
if isinstance(detail, dict):
handler_utils.format_details(
detail,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
return result
def getMap(self, ips):
"""
Gets a URL to a map on https://ipinfo.io/map given a list of IPs (max
500,000).
"""
ip_strs = []
for ip in ips:
# if the supplied IP address uses the objects defined in the
# built-in module ipaddress extract the appropriate string notation
# before formatting the URL.
if isinstance(ip, IPv4Address) or isinstance(ip, IPv6Address):
ip = ip.exploded
ip_strs.append(ip)
req_opts = {**self.request_options}
url = f"{API_URL}/map?cli=1"
headers = handler_utils.get_headers(None, self.headers)
headers["content-type"] = "application/json"
response = requests.post(
url, json=ip_strs, headers=headers, **req_opts
)
response.raise_for_status()
return response.json()["reportUrl"]
def getBatchDetailsIter(
self,
ip_addresses,
batch_size=None,
raise_on_fail=True,
):
if batch_size is None:
batch_size = BATCH_MAX_SIZE
result = {}
lookup_addresses = []
for ip_address in ip_addresses:
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
yield Details(details)
else:
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
result[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
# all in cache - exit early.
if len(lookup_addresses) == 0:
raise StopIteration(result.items())
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
for i in range(0, len(lookup_addresses), batch_size):
batch = lookup_addresses[i : i + batch_size]
try:
response = requests.post(url, json=batch, headers=headers)
except Exception as e:
raise e
try:
if response.status_code == 429:
raise RequestQuotaExceededError()
response.raise_for_status()
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e)
details = response.json()
# format & cache
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
for ip in batch:
detail = details.get(ip)
self.cache[cache_key(ip)] = detail
yield detail
| (access_token=None, **kwargs) |
37,172 | ipinfo.handler | __init__ |
Initialize the Handler object with country name list and the
cache initialized.
| def __init__(self, access_token=None, **kwargs):
"""
Initialize the Handler object with country name list and the
cache initialized.
"""
self.access_token = access_token
# load countries file
self.countries = kwargs.get("countries") or countries
# load eu countries file
self.eu_countries = kwargs.get("eu_countries") or eu_countries
# load countries flags file
self.countries_flags = kwargs.get("countries_flags") or countries_flags
# load countries currency file
self.countries_currencies = (
kwargs.get("countries_currencies") or countries_currencies
)
# load continent file
self.continents = kwargs.get("continent") or continents
# setup req opts
self.request_options = kwargs.get("request_options", {})
if "timeout" not in self.request_options:
self.request_options["timeout"] = REQUEST_TIMEOUT_DEFAULT
# setup cache
if "cache" in kwargs:
self.cache = kwargs["cache"]
else:
cache_options = kwargs.get("cache_options", {})
if "maxsize" not in cache_options:
cache_options["maxsize"] = CACHE_MAXSIZE
if "ttl" not in cache_options:
cache_options["ttl"] = CACHE_TTL
self.cache = DefaultCache(**cache_options)
# setup custom headers
self.headers = kwargs.get("headers", None)
| (self, access_token=None, **kwargs) |
37,173 | ipinfo.handler | getBatchDetails |
Get details for a batch of IP addresses at once.
There is no specified limit to the number of IPs this function can
accept; it can handle as much as the user can fit in RAM (along with
all of the response data, which is at least a magnitude larger than the
input list).
The input list is broken up into batches to abide by API requirements.
The batch size can be adjusted with `batch_size` but is clipped to
`BATCH_MAX_SIZE`.
Defaults to `BATCH_MAX_SIZE`.
For each batch, `timeout_per_batch` indicates the maximum seconds to
spend waiting for the HTTP request to complete. If any batch fails with
this timeout, the whole operation fails.
Defaults to `BATCH_REQ_TIMEOUT_DEFAULT` seconds.
`timeout_total` is a seconds-denominated hard-timeout for the time
spent in HTTP operations; regardless of whether all batches have
succeeded so far, if `timeout_total` is reached, the whole operation
will fail by raising `TimeoutExceededError`.
Defaults to being turned off.
`raise_on_fail`, if turned off, will return any result retrieved so far
rather than raise an exception when errors occur, including timeout and
quota errors.
Defaults to on.
| def getBatchDetails(
self,
ip_addresses,
batch_size=None,
timeout_per_batch=BATCH_REQ_TIMEOUT_DEFAULT,
timeout_total=None,
raise_on_fail=True,
):
"""
Get details for a batch of IP addresses at once.
There is no specified limit to the number of IPs this function can
accept; it can handle as much as the user can fit in RAM (along with
all of the response data, which is at least a magnitude larger than the
input list).
The input list is broken up into batches to abide by API requirements.
The batch size can be adjusted with `batch_size` but is clipped to
`BATCH_MAX_SIZE`.
Defaults to `BATCH_MAX_SIZE`.
For each batch, `timeout_per_batch` indicates the maximum seconds to
spend waiting for the HTTP request to complete. If any batch fails with
this timeout, the whole operation fails.
Defaults to `BATCH_REQ_TIMEOUT_DEFAULT` seconds.
`timeout_total` is a seconds-denominated hard-timeout for the time
spent in HTTP operations; regardless of whether all batches have
succeeded so far, if `timeout_total` is reached, the whole operation
will fail by raising `TimeoutExceededError`.
Defaults to being turned off.
`raise_on_fail`, if turned off, will return any result retrieved so far
rather than raise an exception when errors occur, including timeout and
quota errors.
Defaults to on.
"""
if batch_size == None:
batch_size = BATCH_MAX_SIZE
result = {}
lookup_addresses = []
# pre-populate with anything we've got in the cache, and keep around
# the IPs not in the cache.
for ip_address in ip_addresses:
# if the supplied IP address uses the objects defined in the
# built-in module ipaddress extract the appropriate string notation
# before formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
result[ip_address] = Details(details)
else:
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
result[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
# all in cache - return early.
if len(lookup_addresses) == 0:
return result
# do start timer if necessary
if timeout_total is not None:
start_time = time.time()
# prepare req http options
req_opts = {**self.request_options, "timeout": timeout_per_batch}
# loop over batch chunks and do lookup for each.
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
for i in range(0, len(lookup_addresses), batch_size):
# quit if total timeout is reached.
if (
timeout_total is not None
and time.time() - start_time > timeout_total
):
return handler_utils.return_or_fail(
raise_on_fail, TimeoutExceededError(), result
)
chunk = lookup_addresses[i : i + batch_size]
# lookup
try:
response = requests.post(
url, json=chunk, headers=headers, **req_opts
)
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, result)
# fail on bad status codes
try:
if response.status_code == 429:
raise RequestQuotaExceededError()
response.raise_for_status()
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e, result)
# fill cache
json_response = response.json()
for ip_address, details in json_response.items():
self.cache[cache_key(ip_address)] = details
# merge cached results with new lookup
result.update(json_response)
# format all
for detail in result.values():
if isinstance(detail, dict):
handler_utils.format_details(
detail,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
return result
| (self, ip_addresses, batch_size=None, timeout_per_batch=5, timeout_total=None, raise_on_fail=True) |
37,174 | ipinfo.handler | getBatchDetailsIter | null | def getBatchDetailsIter(
self,
ip_addresses,
batch_size=None,
raise_on_fail=True,
):
if batch_size is None:
batch_size = BATCH_MAX_SIZE
result = {}
lookup_addresses = []
for ip_address in ip_addresses:
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
yield Details(details)
else:
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
result[ip_address] = cached_ipaddr
except KeyError:
lookup_addresses.append(ip_address)
# all in cache - exit early.
if len(lookup_addresses) == 0:
raise StopIteration(result.items())
url = API_URL + "/batch"
headers = handler_utils.get_headers(self.access_token, self.headers)
headers["content-type"] = "application/json"
for i in range(0, len(lookup_addresses), batch_size):
batch = lookup_addresses[i : i + batch_size]
try:
response = requests.post(url, json=batch, headers=headers)
except Exception as e:
raise e
try:
if response.status_code == 429:
raise RequestQuotaExceededError()
response.raise_for_status()
except Exception as e:
return handler_utils.return_or_fail(raise_on_fail, e)
details = response.json()
# format & cache
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
for ip in batch:
detail = details.get(ip)
self.cache[cache_key(ip)] = detail
yield detail
| (self, ip_addresses, batch_size=None, raise_on_fail=True) |
37,175 | ipinfo.handler | getDetails |
Get details for specified IP address as a Details object.
If `timeout` is not `None`, it will override the client-level timeout
just for this operation.
| def getDetails(self, ip_address=None, timeout=None):
"""
Get details for specified IP address as a Details object.
If `timeout` is not `None`, it will override the client-level timeout
just for this operation.
"""
# If the supplied IP address uses the objects defined in the built-in
# module ipaddress extract the appropriate string notation before
# formatting the URL.
if isinstance(ip_address, IPv4Address) or isinstance(
ip_address, IPv6Address
):
ip_address = ip_address.exploded
# check if bogon.
if ip_address and is_bogon(ip_address):
details = {}
details["ip"] = ip_address
details["bogon"] = True
return Details(details)
# check cache first.
try:
cached_ipaddr = self.cache[cache_key(ip_address)]
return Details(cached_ipaddr)
except KeyError:
pass
# prepare req http opts
req_opts = {**self.request_options}
if timeout is not None:
req_opts["timeout"] = timeout
# not in cache; do http req
url = API_URL
if ip_address:
url += "/" + ip_address
headers = handler_utils.get_headers(self.access_token, self.headers)
response = requests.get(url, headers=headers, **req_opts)
if response.status_code == 429:
raise RequestQuotaExceededError()
if response.status_code >= 400:
error_response = response.json()
error_code = response.status_code
raise APIError(error_code, error_response)
details = response.json()
# format & cache
handler_utils.format_details(
details,
self.countries,
self.eu_countries,
self.countries_flags,
self.countries_currencies,
self.continents,
)
self.cache[cache_key(ip_address)] = details
return Details(details)
| (self, ip_address=None, timeout=None) |
37,176 | ipinfo.handler | getMap |
Gets a URL to a map on https://ipinfo.io/map given a list of IPs (max
500,000).
| def getMap(self, ips):
"""
Gets a URL to a map on https://ipinfo.io/map given a list of IPs (max
500,000).
"""
ip_strs = []
for ip in ips:
# if the supplied IP address uses the objects defined in the
# built-in module ipaddress extract the appropriate string notation
# before formatting the URL.
if isinstance(ip, IPv4Address) or isinstance(ip, IPv6Address):
ip = ip.exploded
ip_strs.append(ip)
req_opts = {**self.request_options}
url = f"{API_URL}/map?cli=1"
headers = handler_utils.get_headers(None, self.headers)
headers["content-type"] = "application/json"
response = requests.post(
url, json=ip_strs, headers=headers, **req_opts
)
response.raise_for_status()
return response.json()["reportUrl"]
| (self, ips) |
37,183 | ipinfo | getHandler | Create and return Handler object. | def getHandler(access_token=None, **kwargs):
"""Create and return Handler object."""
return Handler(access_token, **kwargs)
| (access_token=None, **kwargs) |
37,184 | ipinfo | getHandlerAsync | Create an return an asynchronous Handler object. | def getHandlerAsync(access_token=None, **kwargs):
"""Create an return an asynchronous Handler object."""
return AsyncHandler(access_token, **kwargs)
| (access_token=None, **kwargs) |
37,189 | apeye_core | Domain |
:class:`typing.NamedTuple` of a URL's subdomain, domain, and suffix.
| class Domain(NamedTuple):
"""
:class:`typing.NamedTuple` of a URL's subdomain, domain, and suffix.
"""
subdomain: str
domain: str
suffix: str
@property
def registered_domain(self):
"""
Joins the domain and suffix fields with a dot, if they're both set.
.. code-block:: python
>>> URL('https://forums.bbc.co.uk').domain.registered_domain
'bbc.co.uk'
>>> URL('https://localhost:8080').domain.registered_domain
''
"""
if self.domain and self.suffix:
return self.domain + '.' + self.suffix
return ''
@property
def fqdn(self):
"""
Returns a Fully Qualified Domain Name, if there is a proper domain/suffix.
.. code-block:: python
>>> URL('https://forums.bbc.co.uk/path/to/file').domain.fqdn
'forums.bbc.co.uk'
>>> URL('https://localhost:8080').domain.fqdn
''
"""
if self.domain and self.suffix:
# self is the namedtuple (subdomain domain suffix)
return '.'.join(i for i in self if i)
return ''
@property
def ipv4(self) -> Optional[ipaddress.IPv4Address]:
"""
Returns the ipv4 if that is what the presented domain/url is.
.. code-block:: python
>>> URL('https://127.0.0.1/path/to/file').domain.ipv4
IPv4Address('127.0.0.1')
>>> URL('https://127.0.0.1.1/path/to/file').domain.ipv4
>>> URL('https://256.1.1.1').domain.ipv4
"""
if not (self.suffix or self.subdomain) and _tld.IP_RE.match(self.domain):
return cast(ipaddress.IPv4Address, ipaddress.ip_address(self.domain))
return None
def __repr__(self) -> str:
"""
Return a string representation of the :class:`~.Domain`.
"""
# This is necessary to get the custom docstring
repr_fmt = f"({', '.join(f'{name}=%r' for name in self._fields)})"
return f"{self.__class__.__name__}{repr_fmt % self}"
| (subdomain: str, domain: str, suffix: str) |
37,196 | urllib.parse | ParseResult | null | class ParseResult(_ParseResultBase, _NetlocResultMixinStr):
__slots__ = ()
def geturl(self):
return urlunparse(self)
| (scheme, netloc, path, params, query, fragment) |
37,198 | namedtuple_ParseResult | __new__ | Create new instance of ParseResult(scheme, netloc, path, params, query, fragment) | from builtins import function
| (_cls, scheme, netloc, path, params, query, fragment) |
37,201 | collections | _replace | Return a new ParseResult object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
37,203 | urllib.parse | geturl | null | def geturl(self):
return urlunparse(self)
| (self) |
37,212 | apeye_core | URL |
:mod:`pathlib`-like class for URLs.
:param url: The URL to construct the :class:`~apeye.url.URL` object from.
.. versionchanged:: 0.3.0 The ``url`` parameter can now be a string or a :class:`~.apeye.url.URL`.
.. versionchanged:: 1.1.0
Added support for sorting and rich comparisons (``<``, ``<=``, ``>`` and ``>=``).
.. autoclasssumm:: URL
:autosummary-sections: Methods
:autosummary-exclude-members: __lt__,__le__,__gt__,__ge__,__init__,__hash__
.. autosummary-widths:: 1/5
.. autoclasssumm:: URL
:autosummary-sections: Attributes
| class URL(os.PathLike):
r"""
:mod:`pathlib`-like class for URLs.
:param url: The URL to construct the :class:`~apeye.url.URL` object from.
.. versionchanged:: 0.3.0 The ``url`` parameter can now be a string or a :class:`~.apeye.url.URL`.
.. versionchanged:: 1.1.0
Added support for sorting and rich comparisons (``<``, ``<=``, ``>`` and ``>=``).
.. autoclasssumm:: URL
:autosummary-sections: Methods
:autosummary-exclude-members: __lt__,__le__,__gt__,__ge__,__init__,__hash__
.. autosummary-widths:: 1/5
.. autoclasssumm:: URL
:autosummary-sections: Attributes
"""
#: URL scheme specifier
scheme: str
#: Network location part of the URL
netloc: str
#: The hierarchical path of the URL
path: URLPath
query: Dict[str, List[str]]
"""
The query parameters of the URL, if present.
.. versionadded:: 0.7.0
"""
fragment: Optional[str]
"""
The URL fragment, used to identify a part of the document. :py:obj:`None` if absent from the URL.
.. versionadded:: 0.7.0
"""
def __init__(self, url: Union[str, "URL"] = ''):
if isinstance(url, URL):
url = str(url)
if not re.match("([A-Za-z-.]+:)?//", url):
url = "//" + str(url)
scheme, netloc, parts, params, query, fragment = urlparse(url)
self.scheme: str = scheme
self.netloc: str = netloc
self.path = URLPath(parts)
self.query = parse_qs(query or '')
self.fragment = fragment or None
@property
def port(self) -> Optional[int]:
"""
The port of number of the URL as an integer, if present. Default :py:obj:`None`.
.. versionadded:: 0.7.0
"""
if ':' not in self.netloc:
return None
else:
return int(self.netloc.split(':')[-1])
@classmethod
def from_parts(
cls: Type[URLType],
scheme: str,
netloc: str,
path: PathLike,
query: Optional[Mapping[Any, List]] = None,
fragment: Optional[str] = None,
) -> URLType:
"""
Construct a :class:`~apeye.url.URL` from a scheme, netloc and path.
:param scheme: The scheme of the URL, e.g ``'http'``.
:param netloc: The netloc of the URl, e.g. ``'bbc.co.uk:80'``.
:param path: The path of the URL, e.g. ``'/news'``.
:param query: The query parameters of the URL, if present.
:param fragment: The URL fragment, used to identify a part of the document.
:py:obj:`None` if absent from the URL.
Put together, the resulting path would be ``'http://bbc.co.uk:80/news'``
:rtype:
.. versionchanged:: 0.7.0 Added the ``query`` and ``fragment`` arguments.
"""
obj = cls('')
obj.scheme = scheme
obj.netloc = netloc
obj.query = dict(query or {})
obj.fragment = fragment or None
path = URLPath(path)
if path.root == '/':
obj.path = path
else:
obj.path = URLPath('/' + str(path))
return obj
def __str__(self) -> str:
"""
Returns the :class:`~apeye.url.URL` as a string.
"""
query = urlencode(self.query, doseq=True)
url = urlunparse([self.scheme, self.netloc, str(self.path), None, query, self.fragment])
if url.startswith("//"):
return url[2:]
else:
return url
def __repr__(self) -> str:
"""
Returns the string representation of the :class:`~apeye.url.URL`.
"""
return f"{self.__class__.__name__}({str(self)!r})"
def __truediv__(self: URLType, key: Union[PathLike, int]) -> URLType:
"""
Construct a new :class:`~apeye.url.URL` object for the given child of this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0
* Added support for division by integers.
* Now officially supports the new path having a URL fragment and/or query parameters.
Any URL fragment or query parameters from the parent URL are not inherited by its children.
"""
try:
return self._make_child((key, ))
except TypeError:
return NotImplemented
def _make_child(self: URLType, args: Iterable[Union[PathLike, int]]) -> URLType:
"""
Construct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
.. versionadded:: 1.1.0 (private)
Except for the final path element any queries and fragments are ignored.
:returns: A new :class:`~.apeye.url.URL` representing either a subpath
(if all arguments are relative paths) or a totally different path
(if one of the arguments is absolute).
"""
parsed_args: List[ParseResult] = []
for arg in args:
raw_arg = arg
if isinstance(arg, pathlib.PurePath):
arg = arg.as_posix()
elif isinstance(arg, os.PathLike):
arg = os.fspath(arg)
elif isinstance(arg, int):
arg = str(arg)
try:
parse_result = urlparse(arg)
except AttributeError as e:
if str(e).endswith("'decode'"):
msg = f"Cannot join {type(raw_arg).__name__!r} to a {type(self.path).__name__!r}"
raise TypeError(msg) from None
else:
raise
parsed_args.append(parse_result)
try:
new_path = self.from_parts(
self.scheme,
self.netloc,
self.path.joinpath(*map(attrgetter("path"), parsed_args)),
)
except TypeError:
return NotImplemented
if parsed_args:
new_path.query = parse_qs(parsed_args[-1].query)
new_path.fragment = parsed_args[-1].fragment or None
return new_path
def joinurl(self: URLType, *args) -> URLType:
"""
Construct a new :class:`~apeye.url.URL` object by combining the given arguments with this instance's path part.
.. versionadded:: 1.1.0
Except for the final path element any queries and fragments are ignored.
:returns: A new :class:`~.apeye.url.URL` representing either a subpath
(if all arguments are relative paths) or a totally different path
(if one of the arguments is absolute).
"""
return self._make_child(args)
def __fspath__(self) -> str:
"""
Returns the file system path representation of the :class:`~.apeye.url.URL`.
This is comprised of the ``netloc`` and ``path`` attributes.
"""
return f"{self.netloc}{self.path}"
def __eq__(self, other) -> bool:
"""
Return ``self == other``.
.. latex:vspace:: -10px
.. attention::
URL fragments and query parameters are not compared.
.. seealso:: :meth:`.URL.strict_compare`, which *does* consider those attributes.
.. latex:vspace:: -20px
"""
if isinstance(other, URL):
return self.netloc == other.netloc and self.scheme == other.scheme and self.path == other.path
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, URL):
return self._parts_port < other._parts_port
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, URL):
return self._parts_port <= other._parts_port
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, URL):
return self._parts_port > other._parts_port
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, URL):
return self._parts_port >= other._parts_port
else:
return NotImplemented
def strict_compare(self, other) -> bool:
"""
Return ``self ≡ other``, comparing the scheme, netloc, path, fragment and query parameters.
.. versionadded:: 0.7.0
"""
if isinstance(other, URL):
return (
self.netloc == other.netloc and self.scheme == other.scheme and self.path == other.path
and self.query == other.query and self.fragment == other.fragment
)
else:
return NotImplemented
def __hash__(self) -> int:
"""
Returns the has of the :class:`~apeye.url.URL` .
"""
return hash((self.scheme, self.netloc, self.path))
@property
def name(self) -> str:
"""
The final path component, if any.
"""
return self.path.name
@property
def suffix(self) -> str:
"""
The final component's last suffix, if any.
This includes the leading period. For example: ``'.txt'``.
"""
return self.path.suffix
@property
def suffixes(self) -> List[str]:
"""
A list of the final component's suffixes, if any.
These include the leading periods. For example: ``['.tar', '.gz']``.
"""
return self.path.suffixes
@property
def stem(self) -> str:
"""
The final path component, minus its last suffix.
"""
return self.path.stem
def with_name(self: URLType, name: str, inherit: bool = True) -> URLType:
"""
Return a new :class:`~apeye.url.URL` with the file name changed.
:param name:
:param inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
and fragment from this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0 Added the ``inherit`` parameter.
"""
if inherit:
kwargs = {"query": self.query, "fragment": self.fragment}
else:
kwargs = {}
return self.from_parts(
self.scheme,
self.netloc,
self.path.with_name(name),
**kwargs, # type: ignore
)
def with_suffix(self: URLType, suffix: str, inherit: bool = True) -> URLType:
"""
Returns a new :class:`~apeye.url.URL` with the file suffix changed.
If the :class:`~apeye.url.URL` has no suffix, add the given suffix.
If the given suffix is an empty string, remove the suffix from the :class:`~apeye.url.URL`.
:param suffix:
:param inherit: Whether the new :class:`~apeye.url.URL` should inherit the query string
and fragment from this :class:`~apeye.url.URL`.
:rtype:
.. versionchanged:: 0.7.0 Added the ``inherit`` parameter.
"""
if inherit:
kwargs = {"query": self.query, "fragment": self.fragment}
else:
kwargs = {}
return self.from_parts(
self.scheme,
self.netloc,
self.path.with_suffix(suffix),
**kwargs, # type: ignore
)
@property
def parts(self) -> Tuple[str, ...]:
"""
An object providing sequence-like access to the components in the URL.
To retrieve only the parts of the path, use :meth:`URL.path.parts <URLPath.parts>`.
"""
return (
self.scheme,
self.domain.subdomain,
self.domain.domain,
self.domain.suffix,
*('/' / self.path).parts[1:],
)
@property
def _parts_port(self) -> Tuple:
"""
An object providing sequence-like access to the components in the URL.
Unlike ``.parts`` this includes the port.
To retrieve only the parts of the path, use :meth:`URL.path.parts <URLPath.parts>`.
.. versionadded:: 1.1.0 (private)
"""
return (
self.scheme,
self.domain.subdomain,
self.domain.domain,
self.domain.suffix,
self.port or 0,
*('/' / self.path).parts[1:],
)
@property
def parent(self: URLType) -> URLType:
"""
The logical parent of the :class:`~apeye.url.URL`.
"""
return self.from_parts(self.scheme, self.netloc, self.path.parent)
@property
def parents(self: URLType) -> Tuple[URLType, ...]:
"""
An immutable sequence providing access to the logical ancestors of the :class:`~apeye.url.URL`.
"""
return tuple(self.from_parts(self.scheme, self.netloc, path) for path in self.path.parents)
@property
def fqdn(self) -> str:
"""
Returns the Fully Qualified Domain Name of the :class:`~apeye.url.URL` .
"""
return self.domain.fqdn
@property
def domain(self) -> "Domain":
"""
Returns a :class:`apeye.url.Domain` object representing the domain part of the URL.
"""
return Domain._make(_tld.extract_tld(self.netloc))
@property
def base_url(self: URLType) -> URLType:
"""
Returns a :class:`apeye.url.URL` object representing the URL without query strings or URL fragments.
.. versionadded:: 0.7.0
"""
return self.from_parts(
self.scheme,
self.netloc,
self.path,
)
def relative_to(self, other: Union[str, "URL", URLPath]) -> URLPath:
"""
Returns a version of this URL's path relative to ``other``.
.. versionadded:: 1.1.0
:param other: Either a :class:`~.apeye.url.URL`, or a string or :class:`~.apeye.url.URLPath` representing an *absolute* path.
If a :class:`~.apeye.url.URL`, the :attr:`~.apeye.url.URL.netloc` must match this URL's.
:raises ValueError: if the operation is not possible
(i.e. because this URL's path is not a subpath of the other path)
"""
if isinstance(other, URLPath):
if not other.is_absolute():
raise ValueError("'URL.relative_to' cannot be used with relative URLPath objects")
else:
other = URL('/') / other
elif not isinstance(other, URL):
# Parse other as a URL
other = URL(other)
# Compare netloc, if both have one
if self.netloc and other.netloc and self.netloc.lower() != other.netloc.lower():
raise ValueError(f"{self!r} does not start with {other!r}")
# Make the paths absolute
# If coming from a URL they must always be absolute
our_path = '/' / self.path
other_path = '/' / other.path
relative_path = our_path.relative_to(other_path)
return relative_path
| (url: Union[str, ForwardRef('URL')] = '') |
37,230 | apeye_core | URLPath |
Represents the path part of a URL.
Subclass of :class:`pathlib.PurePosixPath` that provides a subset of its methods.
.. versionchanged:: 1.1.0
Implemented :meth:`~.apeye.url.URLPath.is_absolute`, :meth:`~.apeye.url.URLPath.joinpath`,
:meth:`~.apeye.url.URLPath.relative_to`, :meth:`~.pathlib.PurePath.match`,
``anchor``, ``drive``, and support for rich comparisons (``<``, ``<=``, ``>`` and ``>=``),
which previously raised :exc:`NotImplementedError`.
.. latex:clearpage::
| class URLPath(pathlib.PurePosixPath):
"""
Represents the path part of a URL.
Subclass of :class:`pathlib.PurePosixPath` that provides a subset of its methods.
.. versionchanged:: 1.1.0
Implemented :meth:`~.apeye.url.URLPath.is_absolute`, :meth:`~.apeye.url.URLPath.joinpath`,
:meth:`~.apeye.url.URLPath.relative_to`, :meth:`~.pathlib.PurePath.match`,
``anchor``, ``drive``, and support for rich comparisons (``<``, ``<=``, ``>`` and ``>=``),
which previously raised :exc:`NotImplementedError`.
.. latex:clearpage::
"""
def __str__(self) -> str:
"""
Return the string representation of the path, suitable for passing to system calls.
"""
if not hasattr(self, "_root") and hasattr(self, "_load_parts"):
self._load_parts() # type: ignore[attr-defined]
try:
return self._str # type: ignore
except AttributeError:
if hasattr(self, "_parts"):
parts = self._parts # type: ignore[attr-defined]
else:
parts = self._tail # type: ignore[attr-defined]
self._str = self._format_parsed_parts(self._drv, self._root, parts) or '' # type: ignore
return self._str
def __repr__(self) -> str:
return super().__repr__()
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
if sys.version_info > (3, 12):
return drv + root + '/'.join(parts)
else:
return drv + root + '/'.join(parts[1:])
else:
return '/'.join(parts)
def is_absolute(self) -> bool:
"""
Returns whether the path is absolute (i.e. starts with ``/``).
.. versionadded:: 1.1.0 previously raised :exc:`NotImplementedError`.
"""
return self.root == '/'
def joinpath(self: URLPathType, *args) -> URLPathType:
"""
Combine this :class:`~.apeye.url.URLPath` with one or several arguments.
.. versionadded:: 1.1.0 previously raised :exc:`NotImplementedError`.
:returns: A new :class:`~.apeye.url.URLPath` representing either a subpath
(if all arguments are relative paths) or a totally different path
(if one of the arguments is absolute).
"""
return super().joinpath(*args)
def relative_to(self: URLPathType, *other: PathLike) -> URLPathType:
r"""
Returns the relative path to another path identified by the passed arguments.
The arguments are joined together to form a single path, and therefore the following behave identically:
.. code-block:: pycon
>>> URLPath("/news/sport").relative_to("/", "news")
URLPath('sport')
>>> URLPath("/news/sport").relative_to("/news")
URLPath('sport')
.. versionadded:: 1.1.0 previously raised :exc:`NotImplementedError`.
:param \*other:
:raises ValueError: if the operation is not possible (because this is not a subpath of the other path)
.. latex:clearpage::
.. seealso::
:meth:`~.apeye.url.URL.relative_to`, which is recommended when constructing a relative path from a :class:`~URL`.
This method cannot correctly handle some cases, such as:
.. code-block:: pycon
>>> URL("https://github.com/domdfcoding").path.relative_to(URL("https://github.com").path)
Traceback (most recent call last):
ValueError: '/domdfcoding' does not start with ''
Since ``URL("https://github.com").path`` is ``URLPath('')``.
Instead, use:
>>> URL("https://github.com/domdfcoding").relative_to(URL("https://github.com"))
URLPath('domdfcoding')
"""
return super().relative_to(*other)
def as_uri(self, *args, **kwargs) -> "NoReturn": # noqa: D102
raise NotImplementedError
| (*args) |
37,264 | domdf_python_tools.doctools | prettify_docstrings |
Decorator to prettify the default :class:`object` docstrings for use in Sphinx documentation.
.. versionadded:: 0.8.0
:param obj: The object to prettify the method docstrings for.
| def prettify_docstrings(obj: _T) -> _T:
"""
Decorator to prettify the default :class:`object` docstrings for use in Sphinx documentation.
.. versionadded:: 0.8.0
:param obj: The object to prettify the method docstrings for.
"""
repr_docstring = f"Return a string representation of the :class:`~{obj.__module__}.{obj.__name__}`."
new_docstrings = {**base_new_docstrings, "__repr__": repr_docstring}
_do_prettify(obj, object, new_docstrings)
_do_prettify(obj, dict, container_docstrings)
_do_prettify(obj, int, operator_docstrings)
_do_prettify(obj, int, base_int_docstrings)
for attribute in new_return_types:
if hasattr(obj, attribute):
annotations: Dict = getattr(getattr(obj, attribute), "__annotations__", {})
if "return" not in annotations or annotations["return"] is Any:
annotations["return"] = new_return_types[attribute]
with suppress(AttributeError, TypeError):
getattr(obj, attribute).__annotations__ = annotations
if issubclass(obj, tuple) and obj.__repr__.__doc__ == "Return a nicely formatted representation string":
obj.__repr__.__doc__ = repr_docstring
return obj
| (obj: ~_T) -> ~_T |
37,269 | urllib.parse | urlunparse | Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent). | def urlunparse(components):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment, _coerce_result = (
_coerce_args(*components))
if params:
url = "%s;%s" % (url, params)
return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment)))
| (components) |
37,270 | dataclass_csv.exceptions | CsvValueError | Error when a value in the CSV file cannot be parsed. | class CsvValueError(Exception):
"""Error when a value in the CSV file cannot be parsed."""
def __init__(self, error: Any, line_number: int):
self.error: Any = error
self.line_number: int = line_number
def __str__(self):
return f"{self.error} [CSV Line number: {self.line_number}]"
| (error: Any, line_number: int) |
37,271 | dataclass_csv.exceptions | __init__ | null | def __init__(self, error: Any, line_number: int):
self.error: Any = error
self.line_number: int = line_number
| (self, error: Any, line_number: int) |
37,272 | dataclass_csv.exceptions | __str__ | null | def __str__(self):
return f"{self.error} [CSV Line number: {self.line_number}]"
| (self) |
37,273 | dataclass_csv.dataclass_reader | DataclassReader | null | class DataclassReader:
def __init__(
self,
f: Any,
cls: Type[object],
fieldnames: Optional[Sequence[str]] = None,
restkey: Optional[str] = None,
restval: Optional[Any] = None,
dialect: str = "excel",
*args: Any,
**kwds: Any,
):
if not f:
raise ValueError("The f argument is required.")
if cls is None or not dataclasses.is_dataclass(cls):
raise ValueError("cls argument needs to be a dataclass.")
self._cls = cls
self._optional_fields = self._get_optional_fields()
self._field_mapping: Dict[str, Dict[str, Any]] = {}
validate_header = kwds.pop("validate_header", True)
self._reader = csv.DictReader(
f, fieldnames, restkey, restval, dialect, *args, **kwds
)
if validate_header:
_verify_duplicate_header_items(self._reader.fieldnames)
self.type_hints = typing.get_type_hints(cls)
def _get_optional_fields(self):
return [
field.name
for field in dataclasses.fields(self._cls)
if not isinstance(field.default, dataclasses._MISSING_TYPE)
or not isinstance(field.default_factory, dataclasses._MISSING_TYPE)
]
def _add_to_mapping(self, property_name, csv_fieldname):
self._field_mapping[property_name] = csv_fieldname
def _get_metadata_option(self, field, key):
option = field.metadata.get(key, getattr(self._cls, f"__{key}__", None))
return option
def _get_default_value(self, field):
return (
field.default
if not isinstance(field.default, dataclasses._MISSING_TYPE)
else field.default_factory()
)
def _get_possible_keys(self, fieldname, row):
possible_keys = list(filter(lambda x: x.strip() == fieldname, row.keys()))
if possible_keys:
return possible_keys[0]
def _get_value(self, row, field):
is_field_mapped = False
try:
if field.name in self._field_mapping.keys():
is_field_mapped = True
key = self._field_mapping.get(field.name)
else:
key = field.name
if key in row.keys():
value = row[key]
else:
possible_key = self._get_possible_keys(field.name, row)
key = possible_key if possible_key else key
value = row[key]
except KeyError:
if field.name in self._optional_fields:
return self._get_default_value(field)
else:
keyerror_message = f"The value for the column `{field.name}`"
if is_field_mapped:
keyerror_message = f"The value for the mapped column `{key}`"
raise KeyError(f"{keyerror_message} is missing in the CSV file")
else:
if not value and field.name in self._optional_fields:
return self._get_default_value(field)
elif not value and field.name not in self._optional_fields:
raise ValueError(f"The field `{field.name}` is required.")
elif (
value
and field.type is str
and not len(value.strip())
and not self._get_metadata_option(field, "accept_whitespaces")
):
raise ValueError(
(
f"It seems like the value of `{field.name}` contains "
"only white spaces. To allow white spaces to all "
"string fields, use the @accept_whitespaces "
"decorator. "
"To allow white spaces specifically for the field "
f"`{field.name}` change its definition to: "
f"`{field.name}: str = field(metadata="
"{'accept_whitespaces': True})`."
)
)
else:
return value
def _parse_date_value(self, field, date_value, field_type):
dateformat = self._get_metadata_option(field, "dateformat")
if not isinstance(date_value, str):
return date_value
if not dateformat:
raise AttributeError(
(
"Unable to parse the datetime string value. Date format "
"not specified. To specify a date format for all "
"datetime fields in the class, use the @dateformat "
"decorator. To define a date format specifically for this "
"field, change its definition to: "
f"`{field.name}: datetime = field(metadata="
"{'dateformat': <date_format>})`."
)
)
datetime_obj = datetime.strptime(date_value, dateformat)
if field_type == date:
return datetime_obj.date()
else:
return datetime_obj
def _process_row(self, row):
values = dict()
for field in dataclasses.fields(self._cls):
if not field.init:
continue
try:
value = self._get_value(row, field)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
if not value and field.default is None:
values[field.name] = None
continue
field_type = self.type_hints[field.name]
if is_union_type(field_type):
type_args = [x for x in get_args(field_type) if x is not type(None)]
if len(type_args) == 1:
field_type = type_args[0]
if field_type is datetime or field_type is date:
try:
transformed_value = self._parse_date_value(field, value, field_type)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
else:
values[field.name] = transformed_value
continue
if field_type is bool:
try:
transformed_value = (
value
if isinstance(value, bool)
else strtobool(str(value).strip()) == 1
)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
else:
values[field.name] = transformed_value
continue
try:
transformed_value = field_type(value)
except ValueError as e:
raise CsvValueError(
(
f"The field `{field.name}` is defined as {field.type} "
f"but received a value of type {type(value)}."
),
line_number=self._reader.line_num,
) from e
else:
values[field.name] = transformed_value
return self._cls(**values)
def __next__(self):
row = next(self._reader)
return self._process_row(row)
def __iter__(self):
return self
def map(self, csv_fieldname: str) -> FieldMapper:
"""Used to map a field in the CSV file to a `dataclass` field
:param csv_fieldname: The name of the CSV field
"""
return FieldMapper(
lambda property_name: self._add_to_mapping(property_name, csv_fieldname)
)
| (f: Any, cls: Type[object], fieldnames: Optional[Sequence[str]] = None, restkey: Optional[str] = None, restval: Optional[Any] = None, dialect: str = 'excel', *args: Any, **kwds: Any) |
37,274 | dataclass_csv.dataclass_reader | __init__ | null | def __init__(
self,
f: Any,
cls: Type[object],
fieldnames: Optional[Sequence[str]] = None,
restkey: Optional[str] = None,
restval: Optional[Any] = None,
dialect: str = "excel",
*args: Any,
**kwds: Any,
):
if not f:
raise ValueError("The f argument is required.")
if cls is None or not dataclasses.is_dataclass(cls):
raise ValueError("cls argument needs to be a dataclass.")
self._cls = cls
self._optional_fields = self._get_optional_fields()
self._field_mapping: Dict[str, Dict[str, Any]] = {}
validate_header = kwds.pop("validate_header", True)
self._reader = csv.DictReader(
f, fieldnames, restkey, restval, dialect, *args, **kwds
)
if validate_header:
_verify_duplicate_header_items(self._reader.fieldnames)
self.type_hints = typing.get_type_hints(cls)
| (self, f: Any, cls: Type[object], fieldnames: Optional[Sequence[str]] = None, restkey: Optional[str] = None, restval: Optional[Any] = None, dialect: str = 'excel', *args: Any, **kwds: Any) |
37,276 | dataclass_csv.dataclass_reader | __next__ | null | def __next__(self):
row = next(self._reader)
return self._process_row(row)
| (self) |
37,277 | dataclass_csv.dataclass_reader | _add_to_mapping | null | def _add_to_mapping(self, property_name, csv_fieldname):
self._field_mapping[property_name] = csv_fieldname
| (self, property_name, csv_fieldname) |
37,278 | dataclass_csv.dataclass_reader | _get_default_value | null | def _get_default_value(self, field):
return (
field.default
if not isinstance(field.default, dataclasses._MISSING_TYPE)
else field.default_factory()
)
| (self, field) |
37,279 | dataclass_csv.dataclass_reader | _get_metadata_option | null | def _get_metadata_option(self, field, key):
option = field.metadata.get(key, getattr(self._cls, f"__{key}__", None))
return option
| (self, field, key) |
37,280 | dataclass_csv.dataclass_reader | _get_optional_fields | null | def _get_optional_fields(self):
return [
field.name
for field in dataclasses.fields(self._cls)
if not isinstance(field.default, dataclasses._MISSING_TYPE)
or not isinstance(field.default_factory, dataclasses._MISSING_TYPE)
]
| (self) |
37,281 | dataclass_csv.dataclass_reader | _get_possible_keys | null | def _get_possible_keys(self, fieldname, row):
possible_keys = list(filter(lambda x: x.strip() == fieldname, row.keys()))
if possible_keys:
return possible_keys[0]
| (self, fieldname, row) |
37,282 | dataclass_csv.dataclass_reader | _get_value | null | def _get_value(self, row, field):
is_field_mapped = False
try:
if field.name in self._field_mapping.keys():
is_field_mapped = True
key = self._field_mapping.get(field.name)
else:
key = field.name
if key in row.keys():
value = row[key]
else:
possible_key = self._get_possible_keys(field.name, row)
key = possible_key if possible_key else key
value = row[key]
except KeyError:
if field.name in self._optional_fields:
return self._get_default_value(field)
else:
keyerror_message = f"The value for the column `{field.name}`"
if is_field_mapped:
keyerror_message = f"The value for the mapped column `{key}`"
raise KeyError(f"{keyerror_message} is missing in the CSV file")
else:
if not value and field.name in self._optional_fields:
return self._get_default_value(field)
elif not value and field.name not in self._optional_fields:
raise ValueError(f"The field `{field.name}` is required.")
elif (
value
and field.type is str
and not len(value.strip())
and not self._get_metadata_option(field, "accept_whitespaces")
):
raise ValueError(
(
f"It seems like the value of `{field.name}` contains "
"only white spaces. To allow white spaces to all "
"string fields, use the @accept_whitespaces "
"decorator. "
"To allow white spaces specifically for the field "
f"`{field.name}` change its definition to: "
f"`{field.name}: str = field(metadata="
"{'accept_whitespaces': True})`."
)
)
else:
return value
| (self, row, field) |
37,283 | dataclass_csv.dataclass_reader | _parse_date_value | null | def _parse_date_value(self, field, date_value, field_type):
dateformat = self._get_metadata_option(field, "dateformat")
if not isinstance(date_value, str):
return date_value
if not dateformat:
raise AttributeError(
(
"Unable to parse the datetime string value. Date format "
"not specified. To specify a date format for all "
"datetime fields in the class, use the @dateformat "
"decorator. To define a date format specifically for this "
"field, change its definition to: "
f"`{field.name}: datetime = field(metadata="
"{'dateformat': <date_format>})`."
)
)
datetime_obj = datetime.strptime(date_value, dateformat)
if field_type == date:
return datetime_obj.date()
else:
return datetime_obj
| (self, field, date_value, field_type) |
37,284 | dataclass_csv.dataclass_reader | _process_row | null | def _process_row(self, row):
values = dict()
for field in dataclasses.fields(self._cls):
if not field.init:
continue
try:
value = self._get_value(row, field)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
if not value and field.default is None:
values[field.name] = None
continue
field_type = self.type_hints[field.name]
if is_union_type(field_type):
type_args = [x for x in get_args(field_type) if x is not type(None)]
if len(type_args) == 1:
field_type = type_args[0]
if field_type is datetime or field_type is date:
try:
transformed_value = self._parse_date_value(field, value, field_type)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
else:
values[field.name] = transformed_value
continue
if field_type is bool:
try:
transformed_value = (
value
if isinstance(value, bool)
else strtobool(str(value).strip()) == 1
)
except ValueError as ex:
raise CsvValueError(ex, line_number=self._reader.line_num) from None
else:
values[field.name] = transformed_value
continue
try:
transformed_value = field_type(value)
except ValueError as e:
raise CsvValueError(
(
f"The field `{field.name}` is defined as {field.type} "
f"but received a value of type {type(value)}."
),
line_number=self._reader.line_num,
) from e
else:
values[field.name] = transformed_value
return self._cls(**values)
| (self, row) |
37,285 | dataclass_csv.dataclass_reader | map | Used to map a field in the CSV file to a `dataclass` field
:param csv_fieldname: The name of the CSV field
| def map(self, csv_fieldname: str) -> FieldMapper:
"""Used to map a field in the CSV file to a `dataclass` field
:param csv_fieldname: The name of the CSV field
"""
return FieldMapper(
lambda property_name: self._add_to_mapping(property_name, csv_fieldname)
)
| (self, csv_fieldname: str) -> dataclass_csv.field_mapper.FieldMapper |
37,286 | dataclass_csv.dataclass_writer | DataclassWriter | null | class DataclassWriter:
def __init__(
self,
f: Any,
data: List[Any],
cls: Type[object],
dialect: str = "excel",
**fmtparams: Dict[str, Any],
):
if not f:
raise ValueError("The f argument is required")
if not isinstance(data, list):
raise ValueError("Invalid 'data' argument. It must be a list")
if not dataclasses.is_dataclass(cls):
raise ValueError("Invalid 'cls' argument. It must be a dataclass")
self._data = data
self._cls = cls
self._field_mapping: Dict[str, str] = dict()
self._fieldnames = [x.name for x in dataclasses.fields(cls)]
self._writer = csv.writer(f, dialect=dialect, **fmtparams)
def _add_to_mapping(self, header: str, propname: str):
self._field_mapping[propname] = header
def _apply_mapping(self):
mapped_fields = []
for field in self._fieldnames:
mapped_item = self._field_mapping.get(field, field)
mapped_fields.append(mapped_item)
return mapped_fields
def write(self, skip_header: bool = False):
if not skip_header:
if self._field_mapping:
self._fieldnames = self._apply_mapping()
self._writer.writerow(self._fieldnames)
for item in self._data:
if not isinstance(item, self._cls):
raise TypeError(
(
f"The item [{item}] is not an instance of "
f"{self._cls.__name__}. All items on the list must be "
"instances of the same type"
)
)
row = dataclasses.astuple(item)
self._writer.writerow(row)
def map(self, propname: str) -> HeaderMapper:
"""Used to map a field in the dataclass to header item in the CSV file
:param propname: The name of the property of the dataclass to be mapped
"""
return HeaderMapper(lambda header: self._add_to_mapping(header, propname))
| (f: Any, data: List[Any], cls: Type[object], dialect: str = 'excel', **fmtparams: Dict[str, Any]) |
37,287 | dataclass_csv.dataclass_writer | __init__ | null | def __init__(
self,
f: Any,
data: List[Any],
cls: Type[object],
dialect: str = "excel",
**fmtparams: Dict[str, Any],
):
if not f:
raise ValueError("The f argument is required")
if not isinstance(data, list):
raise ValueError("Invalid 'data' argument. It must be a list")
if not dataclasses.is_dataclass(cls):
raise ValueError("Invalid 'cls' argument. It must be a dataclass")
self._data = data
self._cls = cls
self._field_mapping: Dict[str, str] = dict()
self._fieldnames = [x.name for x in dataclasses.fields(cls)]
self._writer = csv.writer(f, dialect=dialect, **fmtparams)
| (self, f: Any, data: List[Any], cls: Type[object], dialect: str = 'excel', **fmtparams: Dict[str, Any]) |
37,288 | dataclass_csv.dataclass_writer | _add_to_mapping | null | def _add_to_mapping(self, header: str, propname: str):
self._field_mapping[propname] = header
| (self, header: str, propname: str) |
37,289 | dataclass_csv.dataclass_writer | _apply_mapping | null | def _apply_mapping(self):
mapped_fields = []
for field in self._fieldnames:
mapped_item = self._field_mapping.get(field, field)
mapped_fields.append(mapped_item)
return mapped_fields
| (self) |
37,290 | dataclass_csv.dataclass_writer | map | Used to map a field in the dataclass to header item in the CSV file
:param propname: The name of the property of the dataclass to be mapped
| def map(self, propname: str) -> HeaderMapper:
"""Used to map a field in the dataclass to header item in the CSV file
:param propname: The name of the property of the dataclass to be mapped
"""
return HeaderMapper(lambda header: self._add_to_mapping(header, propname))
| (self, propname: str) -> dataclass_csv.header_mapper.HeaderMapper |
37,291 | dataclass_csv.dataclass_writer | write | null | def write(self, skip_header: bool = False):
if not skip_header:
if self._field_mapping:
self._fieldnames = self._apply_mapping()
self._writer.writerow(self._fieldnames)
for item in self._data:
if not isinstance(item, self._cls):
raise TypeError(
(
f"The item [{item}] is not an instance of "
f"{self._cls.__name__}. All items on the list must be "
"instances of the same type"
)
)
row = dataclasses.astuple(item)
self._writer.writerow(row)
| (self, skip_header: bool = False) |
37,292 | dataclass_csv.decorators | accept_whitespaces | The accept_whitespaces decorator tells the `DataclassReader`
that `str` fields defined in the `dataclass` should accept
values containing only white spaces.
Usage:
>>> from dataclasses import dataclass
>>> from dataclass_csv import accept_whitespaces
>>> @dataclass
>>> @accept_whitespaces
>>> class User:
>>> firstname: str
>>> lastname: str
>>> brithday: datetime
| def accept_whitespaces(_cls: Type[Any] = None) -> Callable[[F], F]:
"""The accept_whitespaces decorator tells the `DataclassReader`
that `str` fields defined in the `dataclass` should accept
values containing only white spaces.
Usage:
>>> from dataclasses import dataclass
>>> from dataclass_csv import accept_whitespaces
>>> @dataclass
>>> @accept_whitespaces
>>> class User:
>>> firstname: str
>>> lastname: str
>>> brithday: datetime
"""
def func(cls):
cls.__accept_whitespaces__ = True
return cls
if _cls:
return func(_cls)
return func
| (_cls: Optional[Type[Any]] = None) -> Callable[[~F], ~F] |
37,295 | dataclass_csv.decorators | dateformat | The dateformat decorator is used to specify the format
the `DataclassReader` should use when parsing datetime strings.
Usage:
>>> from dataclasses import dataclass
>>> from datetime import datetime
>>> from dataclass_csv import dateformat
>>> @dataclass
>>> @dateformat('%Y-%m-%d')
>>> class User:
>>> firstname: str
>>> lastname: str
>>> brithday: datetime
| def dateformat(date_format: str) -> Callable[[F], F]:
"""The dateformat decorator is used to specify the format
the `DataclassReader` should use when parsing datetime strings.
Usage:
>>> from dataclasses import dataclass
>>> from datetime import datetime
>>> from dataclass_csv import dateformat
>>> @dataclass
>>> @dateformat('%Y-%m-%d')
>>> class User:
>>> firstname: str
>>> lastname: str
>>> brithday: datetime
"""
if not date_format or not isinstance(date_format, str):
raise ValueError("Invalid value for the date_format argument")
def func(cls):
cls.__dateformat__ = date_format
return cls
return func
| (date_format: str) -> Callable[[~F], ~F] |
37,306 | micropip._commands.mock_package | add_mock_package |
Add a mock version of a package to the package dictionary.
This means that if it is a dependency, it is skipped on install.
By default a single empty module is installed with the same
name as the package. You can alternatively give one or more modules to make a
set of named modules.
The modules parameter is usually a dictionary mapping module name to module text.
.. code-block:: python
{
"mylovely_module":'''
def module_method(an_argument):
print("This becomes a module level argument")
module_value = "this value becomes a module level variable"
print("This is run on import of module")
'''
}
If you are adding the module in non-persistent mode, you can also pass functions
which are used to initialize the module on loading (as in `importlib.abc.loader.exec_module` ).
This allows you to do things like use `unittest.mock.MagicMock` classes for modules.
.. code-block:: python
def init_fn(module):
module.dict["WOO"]="hello"
print("Initing the module now!")
...
{
"mylovely_module": init_fn
}
Parameters
----------
name :
Package name to add
version :
Version of the package. This should be a semantic version string,
e.g. 1.2.3
modules :
Dictionary of module_name:string pairs.
The string contains the source of the mock module or is blank for
an empty module.
persistent :
If this is True, modules will be written to the file system, so they
persist between runs of python (assuming the file system persists).
If it is False, modules will be stored inside micropip in memory only.
| def add_mock_package(
name: str,
version: str,
*,
modules: dict[str, str | None] | None = None,
persistent: bool = False,
) -> None:
"""
Add a mock version of a package to the package dictionary.
This means that if it is a dependency, it is skipped on install.
By default a single empty module is installed with the same
name as the package. You can alternatively give one or more modules to make a
set of named modules.
The modules parameter is usually a dictionary mapping module name to module text.
.. code-block:: python
{
"mylovely_module":'''
def module_method(an_argument):
print("This becomes a module level argument")
module_value = "this value becomes a module level variable"
print("This is run on import of module")
'''
}
If you are adding the module in non-persistent mode, you can also pass functions
which are used to initialize the module on loading (as in `importlib.abc.loader.exec_module` ).
This allows you to do things like use `unittest.mock.MagicMock` classes for modules.
.. code-block:: python
def init_fn(module):
module.dict["WOO"]="hello"
print("Initing the module now!")
...
{
"mylovely_module": init_fn
}
Parameters
----------
name :
Package name to add
version :
Version of the package. This should be a semantic version string,
e.g. 1.2.3
modules :
Dictionary of module_name:string pairs.
The string contains the source of the mock module or is blank for
an empty module.
persistent :
If this is True, modules will be written to the file system, so they
persist between runs of python (assuming the file system persists).
If it is False, modules will be stored inside micropip in memory only.
"""
if modules is None:
# make a single mock module with this name
modules = {name: ""}
# make the metadata
METADATA = f"""Metadata-Version: 1.1
Name: {name}
Version: {version}
Summary: {name} mock package generated by micropip
Author-email: {name}@micro.pip.non-working-fake-host
"""
for module_name in modules.keys():
METADATA += f"Provides: {module_name}\n"
if persistent:
# make empty mock modules with the requested names in user site packages
site_packages = Path(site.getsitepackages()[0])
# should exist already, but just in case
site_packages.mkdir(parents=True, exist_ok=True)
dist_dir = site_packages / f"{name}-{version}.dist-info"
dist_dir.mkdir(parents=True, exist_ok=False)
metadata_file = dist_dir / "METADATA"
record_file = dist_dir / "RECORD"
installer_file = dist_dir / "INSTALLER"
file_list = [metadata_file, installer_file]
metadata_file.write_text(METADATA)
installer_file.write_text(MOCK_INSTALL_NAME_PERSISTENT)
for module_name, content in modules.items():
if not content:
content = ""
content = dedent(content)
path_parts = module_name.split(".")
dir_path = Path(site_packages, *path_parts)
dir_path.mkdir(exist_ok=True, parents=True)
init_file = dir_path / "__init__.py"
file_list.append(init_file)
init_file.write_text(content)
with open(record_file, "w") as f:
for file in file_list:
f.write(f"{file},,{file.stat().st_size}\n")
f.write(f"{record_file},,\n")
else:
# make memory mocks of files
INSTALLER = MOCK_INSTALL_NAME_MEMORY
metafiles = {"METADATA": METADATA, "INSTALLER": INSTALLER}
_add_in_memory_distribution(name, metafiles, modules)
importlib.invalidate_caches()
| (name: str, version: str, *, modules: Optional[dict[str, str | None]] = None, persistent: bool = False) -> NoneType |
37,309 | micropip._commands.freeze | freeze | Produce a json string which can be used as the contents of the
``repodata.json`` lock file.
If you later load Pyodide with this lock file, you can use
:js:func:`pyodide.loadPackage` to load packages that were loaded with :py:mod:`micropip`
this time. Loading packages with :js:func:`~pyodide.loadPackage` is much faster
and you will always get consistent versions of all your dependencies.
You can use your custom lock file by passing an appropriate url to the
``lockFileURL`` of :js:func:`~globalThis.loadPyodide`.
| def freeze() -> str:
"""Produce a json string which can be used as the contents of the
``repodata.json`` lock file.
If you later load Pyodide with this lock file, you can use
:js:func:`pyodide.loadPackage` to load packages that were loaded with :py:mod:`micropip`
this time. Loading packages with :js:func:`~pyodide.loadPackage` is much faster
and you will always get consistent versions of all your dependencies.
You can use your custom lock file by passing an appropriate url to the
``lockFileURL`` of :js:func:`~globalThis.loadPyodide`.
"""
packages = deepcopy(REPODATA_PACKAGES)
for dist in importlib.metadata.distributions():
name = dist.name
version = dist.version
url = dist.read_text("PYODIDE_URL")
if url is None:
continue
sha256 = dist.read_text("PYODIDE_SHA256")
assert sha256
imports = (dist.read_text("top_level.txt") or "").split()
requires = dist.read_text("PYODIDE_REQUIRES")
if not requires:
fix_package_dependencies(name)
requires = dist.read_text("PYODIDE_REQUIRES")
if requires:
depends = json.loads(requires)
else:
depends = []
pkg_entry: dict[str, Any] = dict(
name=name,
version=version,
file_name=url,
install_dir="site",
sha256=sha256,
imports=imports,
depends=depends,
)
packages[canonicalize_name(name)] = pkg_entry
# Sort
packages = dict(sorted(packages.items()))
package_data = {
"info": REPODATA_INFO,
"packages": packages,
}
return json.dumps(package_data)
| () -> str |
37,310 | micropip._commands.install | install | Install the given package and all of its dependencies.
If a package is not found in the Pyodide repository it will be loaded from
PyPI. Micropip can only load pure Python wheels or wasm32/emscripten wheels
built by Pyodide.
When used in web browsers, downloads from PyPI will be cached. When run in
Node.js, packages are currently not cached, and will be re-downloaded each
time ``micropip.install`` is run.
Parameters
----------
requirements :
A requirement or list of requirements to install. Each requirement is a
string, which should be either a package name or a wheel URI:
- If the requirement does not end in ``.whl``, it will be interpreted as
a package name. A package with this name must either be present
in the Pyodide lock file or on PyPI.
- If the requirement ends in ``.whl``, it is a wheel URI. The part of
the requirement after the last ``/`` must be a valid wheel name in
compliance with the `PEP 427 naming convention
<https://www.python.org/dev/peps/pep-0427/#file-format>`_.
- If a wheel URI starts with ``emfs:``, it will be interpreted as a path
in the Emscripten file system (Pyodide's file system). E.g.,
``emfs:../relative/path/wheel.whl`` or ``emfs:/absolute/path/wheel.whl``.
In this case, only .whl files are supported.
- If a wheel URI requirement starts with ``http:`` or ``https:`` it will
be interpreted as a URL.
- In node, you can access the native file system using a URI that starts
with ``file:``. In the browser this will not work.
keep_going :
This parameter decides the behavior of the micropip when it encounters a
Python package without a pure Python wheel while doing dependency
resolution:
- If ``False``, an error will be raised on first package with a missing
wheel.
- If ``True``, the micropip will keep going after the first error, and
report a list of errors at the end.
deps :
If ``True``, install dependencies specified in METADATA file for each
package. Otherwise do not install dependencies.
credentials :
This parameter specifies the value of ``credentials`` when calling the
`fetch() <https://developer.mozilla.org/en-US/docs/Web/API/fetch>`__
function which is used to download the package.
When not specified, ``fetch()`` is called without ``credentials``.
pre :
If ``True``, include pre-release and development versions. By default,
micropip only finds stable versions.
index_urls :
A list of URLs or a single URL to use as the package index when looking
up packages. If None, *https://pypi.org/pypi/{package_name}/json* is used.
- The index URL should support the
`JSON API <https://warehouse.pypa.io/api-reference/json/>`__ .
- The index URL may contain the placeholder {package_name} which will be
replaced with the package name when looking up a package. If it does not
contain the placeholder, the package name will be appended to the URL.
- If a list of URLs is provided, micropip will try each URL in order until
it finds a package. If no package is found, an error will be raised.
verbose :
Print more information about the process.
By default, micropip is silent. Setting ``verbose=True`` will print
similar information as pip.
| import asyncio
import importlib
from pathlib import Path
from packaging.markers import default_environment
from .. import package_index
from .._compat import loadPackage, to_js
from ..constants import FAQ_URLS
from ..logging import setup_logging
from ..transaction import Transaction
async def install(
requirements: str | list[str],
keep_going: bool = False,
deps: bool = True,
credentials: str | None = None,
pre: bool = False,
index_urls: list[str] | str | None = None,
*,
verbose: bool | int = False,
) -> None:
"""Install the given package and all of its dependencies.
If a package is not found in the Pyodide repository it will be loaded from
PyPI. Micropip can only load pure Python wheels or wasm32/emscripten wheels
built by Pyodide.
When used in web browsers, downloads from PyPI will be cached. When run in
Node.js, packages are currently not cached, and will be re-downloaded each
time ``micropip.install`` is run.
Parameters
----------
requirements :
A requirement or list of requirements to install. Each requirement is a
string, which should be either a package name or a wheel URI:
- If the requirement does not end in ``.whl``, it will be interpreted as
a package name. A package with this name must either be present
in the Pyodide lock file or on PyPI.
- If the requirement ends in ``.whl``, it is a wheel URI. The part of
the requirement after the last ``/`` must be a valid wheel name in
compliance with the `PEP 427 naming convention
<https://www.python.org/dev/peps/pep-0427/#file-format>`_.
- If a wheel URI starts with ``emfs:``, it will be interpreted as a path
in the Emscripten file system (Pyodide's file system). E.g.,
``emfs:../relative/path/wheel.whl`` or ``emfs:/absolute/path/wheel.whl``.
In this case, only .whl files are supported.
- If a wheel URI requirement starts with ``http:`` or ``https:`` it will
be interpreted as a URL.
- In node, you can access the native file system using a URI that starts
with ``file:``. In the browser this will not work.
keep_going :
This parameter decides the behavior of the micropip when it encounters a
Python package without a pure Python wheel while doing dependency
resolution:
- If ``False``, an error will be raised on first package with a missing
wheel.
- If ``True``, the micropip will keep going after the first error, and
report a list of errors at the end.
deps :
If ``True``, install dependencies specified in METADATA file for each
package. Otherwise do not install dependencies.
credentials :
This parameter specifies the value of ``credentials`` when calling the
`fetch() <https://developer.mozilla.org/en-US/docs/Web/API/fetch>`__
function which is used to download the package.
When not specified, ``fetch()`` is called without ``credentials``.
pre :
If ``True``, include pre-release and development versions. By default,
micropip only finds stable versions.
index_urls :
A list of URLs or a single URL to use as the package index when looking
up packages. If None, *https://pypi.org/pypi/{package_name}/json* is used.
- The index URL should support the
`JSON API <https://warehouse.pypa.io/api-reference/json/>`__ .
- The index URL may contain the placeholder {package_name} which will be
replaced with the package name when looking up a package. If it does not
contain the placeholder, the package name will be appended to the URL.
- If a list of URLs is provided, micropip will try each URL in order until
it finds a package. If no package is found, an error will be raised.
verbose :
Print more information about the process.
By default, micropip is silent. Setting ``verbose=True`` will print
similar information as pip.
"""
logger = setup_logging(verbose)
ctx = default_environment()
if isinstance(requirements, str):
requirements = [requirements]
fetch_kwargs = dict()
if credentials:
fetch_kwargs["credentials"] = credentials
# Note: getsitepackages is not available in a virtual environment...
# See https://github.com/pypa/virtualenv/issues/228 (issue is closed but
# problem is not fixed)
from site import getsitepackages
wheel_base = Path(getsitepackages()[0])
if index_urls is None:
index_urls = package_index.INDEX_URLS[:]
transaction = Transaction(
ctx=ctx,
ctx_extras=[],
keep_going=keep_going,
deps=deps,
pre=pre,
fetch_kwargs=fetch_kwargs,
verbose=verbose,
index_urls=index_urls,
)
await transaction.gather_requirements(requirements)
if transaction.failed:
failed_requirements = ", ".join([f"'{req}'" for req in transaction.failed])
raise ValueError(
f"Can't find a pure Python 3 wheel for: {failed_requirements}\n"
f"See: {FAQ_URLS['cant_find_wheel']}\n"
)
package_names = [pkg.name for pkg in transaction.pyodide_packages] + [
pkg.name for pkg in transaction.wheels
]
if package_names:
logger.info("Installing collected packages: " + ", ".join(package_names))
wheel_promises = []
# Install built-in packages
pyodide_packages = transaction.pyodide_packages
if len(pyodide_packages):
# Note: branch never happens in out-of-browser testing because in
# that case REPODATA_PACKAGES is empty.
wheel_promises.append(
asyncio.ensure_future(
loadPackage(to_js([name for [name, _, _] in pyodide_packages]))
)
)
# Now install PyPI packages
for wheel in transaction.wheels:
# detect whether the wheel metadata is from PyPI or from custom location
# wheel metadata from PyPI has SHA256 checksum digest.
wheel_promises.append(wheel.install(wheel_base))
await asyncio.gather(*wheel_promises)
packages = [f"{pkg.name}-{pkg.version}" for pkg in transaction.pyodide_packages] + [
f"{pkg.name}-{pkg.version}" for pkg in transaction.wheels
]
if packages:
logger.info("Successfully installed " + ", ".join(packages))
importlib.invalidate_caches()
| (requirements: str | list[str], keep_going: bool = False, deps: bool = True, credentials: Optional[str] = None, pre: bool = False, index_urls: Union[list[str], str, NoneType] = None, *, verbose: bool | int = False) -> NoneType |
37,311 | micropip._commands.list | _list | Get the dictionary of installed packages.
Returns
-------
``PackageDict``
A dictionary of installed packages.
>>> import micropip
>>> await micropip.install('regex') # doctest: +SKIP
>>> package_list = micropip.list()
>>> print(package_list) # doctest: +SKIP
Name | Version | Source
----------------- | -------- | -------
regex | 2021.7.6 | pyodide
>>> "regex" in package_list # doctest: +SKIP
True
| def _list() -> PackageDict:
"""Get the dictionary of installed packages.
Returns
-------
``PackageDict``
A dictionary of installed packages.
>>> import micropip
>>> await micropip.install('regex') # doctest: +SKIP
>>> package_list = micropip.list()
>>> print(package_list) # doctest: +SKIP
Name | Version | Source
----------------- | -------- | -------
regex | 2021.7.6 | pyodide
>>> "regex" in package_list # doctest: +SKIP
True
"""
# Add packages that are loaded through pyodide.loadPackage
packages = PackageDict()
for dist in importlib.metadata.distributions():
name = dist.name
version = dist.version
source = dist.read_text("PYODIDE_SOURCE")
if source is None:
# source is None if PYODIDE_SOURCE does not exist. In this case the
# wheel was installed manually, not via `pyodide.loadPackage` or
# `micropip`.
continue
packages[name] = PackageMetadata(
name=name,
version=version,
source=source,
)
for name, pkg_source in loadedPackages.to_py().items():
if name in packages:
continue
if name in REPODATA_PACKAGES:
version = REPODATA_PACKAGES[name]["version"]
source_ = "pyodide"
if pkg_source != "default channel":
# Pyodide package loaded from a custom URL
source_ = pkg_source
else:
# TODO: calculate version from wheel metadata
version = "unknown"
source_ = pkg_source
packages[name] = PackageMetadata(name=name, version=version, source=source_)
return packages
| () -> micropip.package.PackageDict |
37,312 | micropip._commands.mock_package | list_mock_packages |
List all mock packages currently installed.
| def list_mock_packages() -> list[str]:
"""
List all mock packages currently installed.
"""
mock_packages = [
dist.name
for dist in importlib.metadata.distributions()
if dist.read_text("INSTALLER")
in (MOCK_INSTALL_NAME_PERSISTENT, MOCK_INSTALL_NAME_MEMORY)
]
return mock_packages
| () -> list[str] |
37,317 | micropip._commands.mock_package | remove_mock_package |
Remove a mock package.
| def remove_mock_package(name: str) -> None:
"""
Remove a mock package.
"""
d = importlib.metadata.distribution(name)
installer = d.read_text("INSTALLER")
if installer == MOCK_INSTALL_NAME_MEMORY:
_remove_in_memory_distribution(name)
return
elif installer is None or installer != MOCK_INSTALL_NAME_PERSISTENT:
raise ValueError(
f"Package {name} doesn't seem to be a micropip mock. \n"
"Are you sure it was installed with micropip?"
)
# a real mock package - kill it
# remove all files
folders: set[Path] = set()
if d.files is not None:
for file in d.files:
p = Path(file.locate())
p.unlink()
folders.add(p.parent)
# delete all folders except site_packages
# (that check is just to avoid killing
# undesirable things in case of weird micropip errors)
site_packages = Path(site.getsitepackages()[0])
for f in folders:
if f != site_packages:
shutil.rmtree(f)
| (name: str) -> NoneType |
37,318 | micropip._commands.index_urls | set_index_urls |
Set the index URLs to use when looking up packages.
- The index URL should support the
`JSON API <https://warehouse.pypa.io/api-reference/json/>`__ .
- The index URL may contain the placeholder {package_name} which will be
replaced with the package name when looking up a package. If it does not
contain the placeholder, the package name will be appended to the URL.
- If a list of URLs is provided, micropip will try each URL in order until
it finds a package. If no package is found, an error will be raised.
Parameters
----------
urls
A list of URLs or a single URL to use as the package index.
| def set_index_urls(urls: list[str] | str) -> None:
"""
Set the index URLs to use when looking up packages.
- The index URL should support the
`JSON API <https://warehouse.pypa.io/api-reference/json/>`__ .
- The index URL may contain the placeholder {package_name} which will be
replaced with the package name when looking up a package. If it does not
contain the placeholder, the package name will be appended to the URL.
- If a list of URLs is provided, micropip will try each URL in order until
it finds a package. If no package is found, an error will be raised.
Parameters
----------
urls
A list of URLs or a single URL to use as the package index.
"""
if isinstance(urls, str):
urls = [urls]
package_index.INDEX_URLS = urls[:]
| (urls: list[str] | str) -> NoneType |
37,320 | micropip._commands.uninstall | uninstall | Uninstall the given packages.
This function only supports uninstalling packages that are installed
using a wheel file, i.e. packages that have distribution metadata.
It is possible to reinstall a package after uninstalling it, but
note that modules / functions that are already imported will not be
automatically removed from the namespace. So make sure to reload
the module after reinstalling by e.g. running `importlib.reload(module)`.
Parameters
----------
packages
Packages to uninstall.
verbose
Print more information about the process.
By default, micropip is silent. Setting ``verbose=True`` will print
similar information as pip.
| def uninstall(packages: str | list[str], *, verbose: bool | int = False) -> None:
"""Uninstall the given packages.
This function only supports uninstalling packages that are installed
using a wheel file, i.e. packages that have distribution metadata.
It is possible to reinstall a package after uninstalling it, but
note that modules / functions that are already imported will not be
automatically removed from the namespace. So make sure to reload
the module after reinstalling by e.g. running `importlib.reload(module)`.
Parameters
----------
packages
Packages to uninstall.
verbose
Print more information about the process.
By default, micropip is silent. Setting ``verbose=True`` will print
similar information as pip.
"""
logger = setup_logging(verbose)
if isinstance(packages, str):
packages = [packages]
distributions: list[Distribution] = []
for package in packages:
try:
dist = importlib.metadata.distribution(package)
distributions.append(dist)
except importlib.metadata.PackageNotFoundError:
logger.warning(f"Skipping '{package}' as it is not installed.")
for dist in distributions:
# Note: this value needs to be retrieved before removing files, as
# dist.name uses metadata file to get the name
name = dist.name
version = dist.version
logger.info(f"Found existing installation: {name} {version}")
root = get_root(dist)
files = get_files_in_distribution(dist)
directories = set()
for file in files:
if not file.is_file():
if not file.is_relative_to(root):
# This file is not in the site-packages directory. Probably one of:
# - data_files
# - scripts
# - entry_points
# Since we don't support these, we can ignore them (except for data_files (TODO))
continue
logger.warning(
f"A file '{file}' listed in the metadata of '{name}' does not exist.",
)
continue
file.unlink()
if file.parent != root:
directories.add(file.parent)
# Remove directories in reverse hierarchical order
for directory in sorted(directories, key=lambda x: len(x.parts), reverse=True):
try:
directory.rmdir()
except OSError:
logger.warning(
f"A directory '{directory}' is not empty after uninstallation of '{name}'. "
"This might cause problems when installing a new version of the package. ",
)
if hasattr(loadedPackages, name):
delattr(loadedPackages, name)
else:
# This should not happen, but just in case
logger.warning(
f"a package '{name}' was not found in loadedPackages.",
)
logger.info(f"Successfully uninstalled {name}-{version}")
importlib.invalidate_caches()
| (packages: str | list[str], *, verbose: bool | int = False) -> NoneType |
37,322 | async_payok.exceptions | AsyncPayOkError | null | class AsyncPayOkError(Exception):
pass
| null |
37,323 | async_payok.asyncpayok | AsyncPayok |
PayOk API class
Асинхронный класс для работы с PayOk API
Подробнее об API: https://payok.io/cabinet/documentation/doc_main.php
:param api_key: ключ авторизации. Нужен для работы с API
:type api_key: str
:param secret_key: секретный ключ авторизации. Нужен для выставления счетов.
:type secret_key: Optional[str]
:param shop_id: ID кассы. Нужен для взаимодействия с определенной кассой
:type shop_id: Optional[int]
| class AsyncPayok:
"""
PayOk API class
Асинхронный класс для работы с PayOk API
Подробнее об API: https://payok.io/cabinet/documentation/doc_main.php
:param api_key: ключ авторизации. Нужен для работы с API
:type api_key: str
:param secret_key: секретный ключ авторизации. Нужен для выставления счетов.
:type secret_key: Optional[str]
:param shop_id: ID кассы. Нужен для взаимодействия с определенной кассой
:type shop_id: Optional[int]
"""
def __init__(
self,
api_id: int,
api_key: str,
secret_key: Optional[str],
shop_id: Optional[int]
) -> None:
self.__API_URL__: str = "https://payok.io"
self.__API_ID__: int = api_id
self.__API_KEY__: str = api_key
self.__SECRET_KEY__: Optional[str] = secret_key
self.__SHOP_ID__: Optional[int] = shop_id
self._SSL_CONTEXT_ = ssl.create_default_context(cafile=certifi.where())
self.session: ClientSession = ClientSession()
async def getBalance(
self
) -> Balance:
"""
Получение баланса аккаунта
:return: баланс\Реф баланс
:rtype: Balance
"""
params: dict = {
"API_ID": self.__API_ID__,
"API_KEY": self.__API_KEY__
}
method: str = "POST"
url: str = self.__API_URL__ + "/api/balance"
resp = await self._request(
method=method,
url=url,
params=params
)
return Balance(**resp)
async def getPayments(
self,
offset: Optional[int] = None
) -> Union[Invoice, List[Invoice]]:
"""
Получение всех транзакций\транзакции
:param offset: отсуп\пропуск указанного кол-ва строк
:type offset: Optional[int]
:return: данные об транзациях\транзакции
:rtype: Union[Transaction, List[Transaction]
"""
method: str = "POST"
url: str = self.__API_URL__ + "/api/transaction"
data: dict = {
"API_ID": self.__API_ID__,
"API_KEY": self.__API_KEY__,
"shop": self.__SHOP_ID__,
}
if offset:
data["offset"] = offset
response = await self._request(
method=method,
url=url,
params=data
)
transactions = []
for transaction in response.values():
if not transaction["method"]:
transaction['method'] = "Не выбран"
transactions.append(Invoice(**transaction))
return transactions
async def createInvoice(
self,
payment: Union[int, str],
amount: float,
currency: Optional[str] = Currency.RUB,
desc: Optional[str] = 'Description',
email: Optional[str] = None,
success_url: Optional[str] = None,
method: Optional[str] = None,
lang: Optional[str] = None,
custom: Optional[str] = None
) -> str:
"""
Создание ссылки формы (инвойса) на оплату
:param payment: уникальный айди платежа
:type payment: Union[int, str]
:param amount: сумма платежа
:type amount: float
:param currency: валюта платежа
:type currency: Optional[str]
:param desc: описание платежа
:type desc: Optinal[str]
:param email: почта плательщика
:type email: Optional[str]
:param success_url: ссылка для переадрессации после оплаты
:type success_url: Optional[str]
:param method: метод оплаты (см объект Method)
:type method: Optional[str]
:param lang: язык интерфейса формы (RU - ENG)
:type lang: Optional[str]
:param custom: параметр для уведомления
:type custom: Optional[str]
:return: ссылку на форму оплаты
:rtype: str
"""
if not self.__SECRET_KEY__:
raise AsyncPayOkError("Invalid Secret Key - is empty!")
params = {
'amount': amount,
'payment': payment,
'shop': self.__SHOP_ID__,
'currency': currency,
'desc': desc,
'email': email,
'success_url': success_url,
'method': method,
'lang': lang,
'custom': custom
}
for key, value in params.copy().items():
if value is None:
del params[key]
sign_params = '|'.join(
map(str, [
amount, payment,
self.__SHOP_ID__,
currency, desc,
self.__SECRET_KEY__
]
)).encode('utf-8')
sign = md5(sign_params).hexdigest()
params['sign'] = sign
url = f'{self.__API_URL__}/pay?' + urlencode(params)
return url
async def _request(
self,
method: Optional[str],
url: Optional[str],
params: dict
) -> Dict:
"""
Создание запроса к API
:param method: метод запроса (POST, GET)
:type method: Optional[str]
:param url: ссылка запроса к API
:type url: Optional[str]
:param params: параметры запрсоа
:type params: dict
:return:
"""
request = await self.session.request(
method=method,
url=url,
ssl_context=self._SSL_CONTEXT_,
data=params,
)
if request.status == 401:
raise AsyncPayOkError(
"Invalid API KEY! You can get it here!"
)
answer: dict = await request.json(
content_type="text/plain"
)
if answer.get("status") and answer.pop("status") == "error":
desc = answer.get("text", answer.get("error_text"))
code = answer["error_code"]
raise PayOkAPIError(
f"ERROR: {code} | {desc} \nCheck docs: https://payok.io/cabinet/documentation/doc_api_errors"
)
return answer
| (api_id: int, api_key: str, secret_key: Optional[str], shop_id: Optional[int]) -> None |
37,324 | async_payok.asyncpayok | __init__ | null | nit__(
d: int,
ey: str,
t_key: Optional[str],
id: Optional[int]
ne:
_API_URL__: str = "https://payok.io"
_API_ID__: int = api_id
_API_KEY__: str = api_key
_SECRET_KEY__: Optional[str] = secret_key
_SHOP_ID__: Optional[int] = shop_id
SSL_CONTEXT_ = ssl.create_default_context(cafile=certifi.where())
ession: ClientSession = ClientSession()
| (self, api_id: int, api_key: str, secret_key: Optional[str], shop_id: Optional[int]) -> NoneType |
37,325 | async_payok.asyncpayok | _request |
Создание запроса к API
:param method: метод запроса (POST, GET)
:type method: Optional[str]
:param url: ссылка запроса к API
:type url: Optional[str]
:param params: параметры запрсоа
:type params: dict
:return:
| nit__(
d: int,
ey: str,
t_key: Optional[str],
id: Optional[int]
ne:
_API_URL__: str = "https://payok.io"
_API_ID__: int = api_id
_API_KEY__: str = api_key
_SECRET_KEY__: Optional[str] = secret_key
_SHOP_ID__: Optional[int] = shop_id
SSL_CONTEXT_ = ssl.create_default_context(cafile=certifi.where())
ession: ClientSession = ClientSession()
| (self, method: Optional[str], url: Optional[str], params: dict) -> Dict |
37,326 | async_payok.asyncpayok | createInvoice |
Создание ссылки формы (инвойса) на оплату
:param payment: уникальный айди платежа
:type payment: Union[int, str]
:param amount: сумма платежа
:type amount: float
:param currency: валюта платежа
:type currency: Optional[str]
:param desc: описание платежа
:type desc: Optinal[str]
:param email: почта плательщика
:type email: Optional[str]
:param success_url: ссылка для переадрессации после оплаты
:type success_url: Optional[str]
:param method: метод оплаты (см объект Method)
:type method: Optional[str]
:param lang: язык интерфейса формы (RU - ENG)
:type lang: Optional[str]
:param custom: параметр для уведомления
:type custom: Optional[str]
:return: ссылку на форму оплаты
:rtype: str
| nit__(
d: int,
ey: str,
t_key: Optional[str],
id: Optional[int]
ne:
_API_URL__: str = "https://payok.io"
_API_ID__: int = api_id
_API_KEY__: str = api_key
_SECRET_KEY__: Optional[str] = secret_key
_SHOP_ID__: Optional[int] = shop_id
SSL_CONTEXT_ = ssl.create_default_context(cafile=certifi.where())
ession: ClientSession = ClientSession()
| (self, payment: Union[int, str], amount: float, currency: Optional[str] = 'RUB', desc: Optional[str] = 'Description', email: Optional[str] = None, success_url: Optional[str] = None, method: Optional[str] = None, lang: Optional[str] = None, custom: Optional[str] = None) -> str |
37,327 | async_payok.asyncpayok | getBalance |
Получение баланса аккаунта
:return: баланс\Реф баланс
:rtype: Balance
| nit__(
d: int,
ey: str,
t_key: Optional[str],
id: Optional[int]
ne:
_API_URL__: str = "https://payok.io"
_API_ID__: int = api_id
_API_KEY__: str = api_key
_SECRET_KEY__: Optional[str] = secret_key
_SHOP_ID__: Optional[int] = shop_id
SSL_CONTEXT_ = ssl.create_default_context(cafile=certifi.where())
ession: ClientSession = ClientSession()
| (self) -> async_payok.models.balance.Balance |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.