index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
52,917 |
argparse_ext
|
_format_args
| null |
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '{%s}' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['{%s}' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
|
(self, action, default_metavar)
|
52,919 |
argparse_ext
|
_format_usage
| null |
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(positionals, groups)
usage = ' '.join([s for s in [
prog, '[options]' if optionals else '', action_usage
] if s])
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
|
(self, usage, actions, groups, prefix)
|
52,920 |
argparse_ext
|
_get_default_metavar_for_optional
| null |
def _get_default_metavar_for_optional(self, action):
return action.dest
|
(self, action)
|
52,926 |
argparse_ext
|
_metavar_formatter
| null |
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
# elif action.choices is not None:
# choice_strs = [str(choice) for choice in action.choices]
# result = '%s' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
|
(self, action, default_metavar)
|
52,935 |
argparse_ext
|
RawDescriptionHelpFormatter
|
retain any formatting in descriptions;
|
class RawDescriptionHelpFormatter(HelpFormatter):
'''
retain any formatting in descriptions;
'''
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(keepends=True))
|
(prog, indent_increment=4, max_help_position=48, width=None)
|
52,940 |
argparse_ext
|
_fill_text
| null |
# actions = sorted(actions, key=lambda x: x.option_strings[::-1])
# super().add_arguments(actions)
class RawDescriptionHelpFormatter(HelpFormatter):
'''
retain any formatting in descriptions;
'''
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(keepends=True))
|
(self, text, width, indent)
|
52,964 |
notebook_shim
|
_jupyter_server_extension_points
| null |
def _jupyter_server_extension_points():
return [
{
'module': 'notebook_shim.nbserver',
}
]
|
()
|
52,965 |
latest_user_agents
|
LatestUserAgentsError
|
Custom exception used by this module.
|
class LatestUserAgentsError(Exception):
"""Custom exception used by this module."""
pass
| null |
52,968 |
latest_user_agents
|
_download
| null |
def _download():
with _cache_lock:
with _cache_db_connection() as connection:
with _cache_db_transaction(connection) as cursor:
now = int(time.time())
# Record the time of the last download attempt
cursor.execute(
"""
INSERT OR REPLACE INTO "last_download_attempt"
("id", "last_download_attempt") VALUES (0, ?)
""",
(now,),
)
# Download the latest user agents
response = requests.get(_download_url, timeout=5)
response.raise_for_status()
user_agents = response.json()
with _cache_db_transaction(connection) as cursor:
now = int(time.time())
# Insert new user agents
cursor.executemany(
"""
INSERT OR REPLACE INTO "user_agents"
("last_seen", "user_agent") VALUES (?, ?)
""",
[(now, ua) for ua in user_agents],
)
# Delete user agents older than 14 days
cursor.execute(
'DELETE FROM "user_agents" WHERE "last_seen" < ?',
(now - (14 * 86400),),
)
return user_agents
|
()
|
52,969 |
latest_user_agents
|
_get_cache_age
| null |
def _get_cache_age():
with _cache_lock:
with _cache_db_connection() as connection:
cursor = connection.cursor()
cursor.execute('SELECT MAX("last_seen") FROM "user_agents"')
row = cursor.fetchone()
if row is None or row[0] is None:
return None
return int(time.time() - row[0])
|
()
|
52,970 |
latest_user_agents
|
_get_last_request_time
| null |
def _get_last_request_time():
with _cache_lock:
with _cache_db_connection() as connection:
cursor = connection.cursor()
cursor.execute(
"""
SELECT "last_download_attempt"
FROM "last_download_attempt"
WHERE "id" = 0
""")
# The one row should always be present
return cursor.fetchone()[0]
|
()
|
52,971 |
latest_user_agents
|
_read_cache
| null |
def _read_cache():
with _cache_lock:
with _cache_db_connection() as connection:
return [
row[0] for row in connection.execute(
'SELECT "user_agent" from "user_agents"')
]
|
()
|
52,972 |
latest_user_agents
|
clear_user_agent_cache
|
Clear the local cache of user agents.
|
def clear_user_agent_cache():
"""Clear the local cache of user agents."""
global _cached_user_agents
with _cache_lock:
_cached_user_agents = None
try:
shutil.rmtree(_cache_dir)
except FileNotFoundError:
pass
|
()
|
52,974 |
latest_user_agents
|
get_latest_user_agents
|
Get the latest user agent strings for major browsers and OSs.
|
def get_latest_user_agents():
"""Get the latest user agent strings for major browsers and OSs."""
global _cached_user_agents
if _cached_user_agents is not None:
# Cached in memory
return _cached_user_agents
with _cache_lock:
if _cached_user_agents is not None:
# Another thread must have filled the cache while we were
# waiting for the lock
return _cached_user_agents
cache_age = _get_cache_age()
if cache_age is not None:
if (cache_age < 86400
or time.time() - _get_last_request_time() < 3600):
# Cache is less than a day old
_cached_user_agents = _read_cache()
return _cached_user_agents
# Cache is at least a day old, and the last request
# was over an hour ago
try:
_cached_user_agents = _download()
except Exception:
if cache_age >= 7 * 86400:
raise LatestUserAgentsError((
'User agent cache is {:.1f} days old, '
'and attempted update failed').format(cache_age))
else:
# Just keep using the cache for now
_cached_user_agents = _read_cache()
return _cached_user_agents
_cached_user_agents = _download()
return _cached_user_agents
|
()
|
52,975 |
latest_user_agents
|
get_random_user_agent
|
Get a random user agent string.
|
def get_random_user_agent():
"""Get a random user agent string."""
return random.choice(get_latest_user_agents())
|
()
|
52,983 |
appdirs
|
user_cache_dir
|
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
|
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
|
(appname=None, appauthor=None, version=None, opinion=True)
|
52,985 |
phylib
|
_Formatter
| null |
class _Formatter(logging.Formatter):
color_codes = {'L': '94', 'D': '90', 'I': '0', 'W': '33', 'E': '31'}
def format(self, record):
# Only keep the first character in the level name.
record.levelname = record.levelname[0]
filename = op.splitext(op.basename(record.pathname))[0]
record.caller = '{:s}:{:d}'.format(filename, record.lineno).ljust(20)
message = super(_Formatter, self).format(record)
color_code = self.color_codes.get(record.levelname, '90')
message = '\33[%sm%s\33[0m' % (color_code, message)
return message
|
(fmt=None, datefmt=None, style='%', validate=True, *, defaults=None)
|
52,986 |
logging
|
__init__
|
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument. If datefmt is omitted, you get an
ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged:: 3.2
Added the ``style`` parameter.
|
def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *,
defaults=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument. If datefmt is omitted, you get an
ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged:: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style][0](fmt, defaults=defaults)
if validate:
self._style.validate()
self._fmt = self._style._fmt
self.datefmt = datefmt
|
(self, fmt=None, datefmt=None, style='%', validate=True, *, defaults=None)
|
52,987 |
phylib
|
format
| null |
def format(self, record):
# Only keep the first character in the level name.
record.levelname = record.levelname[0]
filename = op.splitext(op.basename(record.pathname))[0]
record.caller = '{:s}:{:d}'.format(filename, record.lineno).ljust(20)
message = super(_Formatter, self).format(record)
color_code = self.color_codes.get(record.levelname, '90')
message = '\33[%sm%s\33[0m' % (color_code, message)
return message
|
(self, record)
|
52,988 |
logging
|
formatException
|
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
|
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
|
(self, ei)
|
52,989 |
logging
|
formatMessage
| null |
def formatMessage(self, record):
return self._style.format(record)
|
(self, record)
|
52,990 |
logging
|
formatStack
|
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
|
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
|
(self, stack_info)
|
52,991 |
logging
|
formatTime
|
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
The resulting string is returned. This function uses a user-configurable
function to convert the creation time to a tuple. By default,
time.localtime() is used; to change this for a particular formatter
instance, set the 'converter' attribute to a function with the same
signature as time.localtime() or time.gmtime(). To change it for all
formatters, for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
|
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
The resulting string is returned. This function uses a user-configurable
function to convert the creation time to a tuple. By default,
time.localtime() is used; to change this for a particular formatter
instance, set the 'converter' attribute to a function with the same
signature as time.localtime() or time.gmtime(). To change it for all
formatters, for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
s = time.strftime(self.default_time_format, ct)
if self.default_msec_format:
s = self.default_msec_format % (s, record.msecs)
return s
|
(self, record, datefmt=None)
|
52,992 |
logging
|
usesTime
|
Check if the format uses the creation time of the record.
|
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
|
(self)
|
52,993 |
phylib
|
_add_log_file
|
Create a log file with DEBUG level.
|
def _add_log_file(filename): # pragma: no cover
"""Create a log file with DEBUG level."""
handler = logging.FileHandler(str(filename))
handler.setLevel(logging.DEBUG)
formatter = _Formatter(fmt=_logger_fmt, datefmt=_logger_date_fmt)
handler.setFormatter(formatter)
logging.getLogger('phy').addHandler(handler)
|
(filename)
|
52,994 |
phylib.utils._misc
|
_git_version
|
Return the git version.
|
def _git_version():
"""Return the git version."""
curdir = os.getcwd()
os.chdir(str(Path(__file__).parent))
try:
with open(os.devnull, 'w') as fnull:
version = ('-git-' + subprocess.check_output(
['git', 'describe', '--abbrev=8', '--dirty', '--always', '--tags'],
stderr=fnull).strip().decode('ascii'))
return version
except (OSError, subprocess.CalledProcessError): # pragma: no cover
return ""
finally:
os.chdir(curdir)
|
()
|
52,995 |
phylib
|
add_default_handler
| null |
def add_default_handler(level='INFO', logger=logger):
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = _Formatter(fmt=_logger_fmt, datefmt=_logger_date_fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
(level='INFO', logger=<Logger phylib (DEBUG)>)
|
52,998 |
phylib
|
on_exit
| null |
@atexit.register
def on_exit(): # pragma: no cover
# Close the logging handlers.
for handler in logger.handlers:
handler.close()
logger.removeHandler(handler)
|
()
|
53,001 |
phylib
|
test
|
Run the full testing suite of phylib.
|
def test(): # pragma: no cover
"""Run the full testing suite of phylib."""
import pytest
pytest.main()
|
()
|
53,003 |
meteostat.interface.base
|
Base
|
Base class that provides features which are used across the package
|
class Base:
"""
Base class that provides features which are used across the package
"""
# Base URL of the Meteostat bulk data interface
endpoint: str = "https://bulk.meteostat.net/v2/"
# Location of the cache directory
cache_dir: str = os.path.expanduser("~") + os.sep + ".meteostat" + os.sep + "cache"
# Auto clean cache directories?
autoclean: bool = True
# Maximum age of a cached file in seconds
max_age: int = 24 * 60 * 60
# Number of processes used for processing files
processes: int = 1
# Number of threads used for processing files
threads: int = 1
|
()
|
53,004 |
meteostat.interface.daily
|
Daily
|
Retrieve daily weather observations for one or multiple weather stations or
a single geographical point
|
class Daily(TimeSeries):
"""
Retrieve daily weather observations for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = "daily"
# Granularity
granularity = Granularity.DAILY
# Default frequency
_freq: str = "1D"
# Flag which represents model data
_model_flag = "G"
# Columns
_columns: list = [
"date",
"tavg",
"tmin",
"tmax",
"prcp",
"snow",
"wdir",
"wspd",
"wpgt",
"pres",
"tsun",
]
# Index of first meteorological column
_first_met_col = 1
# Data types
_types: dict = {
"tavg": "float64",
"tmin": "float64",
"tmax": "float64",
"prcp": "float64",
"snow": "float64",
"wdir": "float64",
"wspd": "float64",
"wpgt": "float64",
"pres": "float64",
"tsun": "float64",
}
# Columns for date parsing
_parse_dates: dict = {"time": [0]}
# Default aggregation functions
aggregations: dict = {
"tavg": "mean",
"tmin": "min",
"tmax": "max",
"prcp": "sum",
"snow": "max",
"wdir": degree_mean,
"wspd": "mean",
"wpgt": "max",
"pres": "mean",
"tsun": "sum",
}
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return (self._end - self._start).days + 1
|
(loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: datetime.datetime = None, end: datetime.datetime = None, model: bool = True, flags: bool = False) -> None
|
53,005 |
meteostat.interface.daily
|
__init__
| null |
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
|
(self, loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, model: bool = True, flags: bool = False) -> NoneType
|
53,006 |
meteostat.interface.timeseries
|
_filter_model
|
Remove model data from time series
|
def _filter_model(self) -> None:
"""
Remove model data from time series
"""
columns = self._columns[self._first_met_col :]
for col_name in columns:
self._data.loc[
(pd.isna(self._data[f"{col_name}_flag"]))
| (self._data[f"{col_name}_flag"].str.contains(self._model_flag)),
col_name,
] = np.NaN
# Conditionally, remove flags from DataFrame
if not self._flags:
self._data.drop(
map(lambda col_name: f"{col_name}_flag", columns), axis=1, inplace=True
)
# Drop NaN-only rows
self._data.dropna(how="all", subset=columns, inplace=True)
|
(self) -> NoneType
|
53,007 |
meteostat.interface.meteodata
|
_get_data
|
Get all required data dumps
|
def _get_data(self) -> None:
"""
Get all required data dumps
"""
if len(self._stations) > 0:
# Get list of datasets
datasets = self._get_datasets()
# Data Processings
return processing_handler(
datasets, self._load_data, self.processes, self.threads
)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
|
(self) -> NoneType
|
53,008 |
meteostat.interface.meteodata
|
_get_datasets
|
Get list of datasets
|
def _get_datasets(self) -> list:
"""
Get list of datasets
"""
if self.granularity == Granularity.HOURLY and self.chunked:
datasets = [
(str(station), year)
for station in self._stations
for year in self._annual_steps
]
else:
datasets = [(str(station),) for station in self._stations]
return datasets
|
(self) -> list
|
53,009 |
meteostat.interface.timeseries
|
_get_flags
|
Get all source flags
|
def _get_flags(self) -> None:
"""
Get all source flags
"""
if len(self._stations) > 0:
# Get list of datasets
datasets = self._get_datasets()
# Data Processings
return processing_handler(
datasets, self._load_flags, self.processes, self.threads
)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
|
(self) -> NoneType
|
53,010 |
meteostat.interface.timeseries
|
_init_time_series
|
Common initialization for all time series, regardless
of its granularity
|
def _init_time_series(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
"""
Common initialization for all time series, regardless
of its granularity
"""
# Set list of weather stations based on user
# input or retrieve list of stations programatically
# if location is a geographical point
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
stations = loc.get_stations("daily", start, end, model)
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Preserve settings
self._start = start if self._start is None else self._start
self._end = end if self._end is None else self._end
self._model = model
self._flags = flags
# Get data for all weather stations
self._data = self._get_data()
# Load source flags through map file
# if flags are explicitly requested or
# model data is excluded
if flags or not model:
flags = self._get_flags()
self._data = self._data.merge(
flags, on=["station", "time"], how="left", suffixes=[None, "_flag"]
)
# Remove model data from DataFrame and
# drop flags if not specified otherwise
if not model:
self._filter_model()
# Interpolate data spatially if requested
# location is a geographical point
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache if auto cleaning is enabled
if self.max_age > 0 and self.autoclean:
self.clear_cache()
|
(self, loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, model: False = True, flags: False = False) -> NoneType
|
53,011 |
meteostat.interface.meteodata
|
_load_data
|
Load file for a single station from Meteostat
|
def _load_data(self, station: str, year: Union[int, None] = None) -> None:
"""
Load file for a single station from Meteostat
"""
# File name
file = generate_endpoint_path(self.granularity, station, year)
# Get local file path
path = get_local_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint, file, self._columns, self._types, self._parse_dates
)
# Validate and prepare data for further processing
if self.granularity == Granularity.NORMALS and df.index.size > 0:
# Add weather station ID
# pylint: disable=unsupported-assignment-operation
df["station"] = station
# Set index
df = df.set_index(["station", "start", "end", "month"])
else:
df = validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Localize time column
if (
self.granularity == Granularity.HOURLY
and self._timezone is not None
and len(df.index) > 0
):
df = localize(df, self._timezone)
# Filter time period and append to DataFrame
# pylint: disable=no-else-return
if self.granularity == Granularity.NORMALS and df.index.size > 0 and self._end:
# Get time index
end = df.index.get_level_values("end")
# Filter & return
return df.loc[end == self._end]
elif not self.granularity == Granularity.NORMALS:
df = filter_time(df, self._start, self._end)
# Return
return df
|
(self, station: str, year: Optional[int] = None) -> NoneType
|
53,012 |
meteostat.interface.timeseries
|
_load_flags
|
Load flag file for a single station from Meteostat
|
def _load_flags(self, station: str, year: Union[int, None] = None) -> None:
"""
Load flag file for a single station from Meteostat
"""
# File name
file = generate_endpoint_path(self.granularity, station, year, True)
# Get local file path
path = get_local_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
{key: "string" for key in self._columns[self._first_met_col :]},
self._parse_dates,
)
# Validate Series
df = validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Localize time column
if (
self.granularity == Granularity.HOURLY
and self._timezone is not None
and len(df.index) > 0
):
df = localize(df, self._timezone)
# Filter time period and append to DataFrame
if self._start and self._end:
df = filter_time(df, self._start, self._end)
return df
|
(self, station: str, year: Optional[int] = None) -> NoneType
|
53,013 |
meteostat.interface.meteodata
|
_resolve_point
|
Project weather station data onto a single point
|
def _resolve_point(
self, method: str, stations: pd.DataFrame, alt: int, adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self._stations.size == 0 or self._data.size == 0:
return None
if method == "nearest":
if adapt_temp:
# Join elevation of involved weather stations
data = self._data.join(stations["elevation"], on="station")
# Adapt temperature-like data based on altitude
data = adjust_temp(data, alt)
# Drop elevation & round
data = data.drop("elevation", axis=1).round(1)
else:
data = self._data
if self.granularity == Granularity.NORMALS:
self._data = data.groupby(level=["start", "end", "month"]).agg("first")
else:
self._data = data.groupby(
pd.Grouper(level="time", freq=self._freq)
).agg("first")
else:
# Join score and elevation of involved weather stations
data = self._data.join(stations[["score", "elevation"]], on="station")
# Adapt temperature-like data based on altitude
if adapt_temp:
data = adjust_temp(data, alt)
# Exclude non-mean data & perform aggregation
if not self.granularity == Granularity.NORMALS:
excluded = data["wdir"]
excluded = excluded.groupby(
pd.Grouper(level="time", freq=self._freq)
).agg("first")
# Aggregate mean data
if self.granularity == Granularity.NORMALS:
data = data.groupby(level=["start", "end", "month"]).apply(
weighted_average
)
# Remove obsolete index column
try:
data = data.reset_index(level=3, drop=True)
except IndexError:
pass
else:
data = data.groupby(pd.Grouper(level="time", freq=self._freq)).apply(
weighted_average
)
# Drop RangeIndex
data.index = data.index.droplevel(1)
# Merge excluded fields
data["wdir"] = excluded
# Drop score and elevation
self._data = data.drop(["score", "elevation"], axis=1).round(1)
# Set placeholder station ID
self._data["station"] = "XXXXX"
# Set index
if self.granularity == Granularity.NORMALS:
self._data = self._data.set_index("station", append=True)
self._data = self._data.reorder_levels(["station", "start", "end", "month"])
else:
self._data = self._data.set_index(
["station", self._data.index.get_level_values("time")]
)
# Set station index
self._stations = pd.Index(["XXXXX"])
|
(self, method: str, stations: pandas.core.frame.DataFrame, alt: int, adapt_temp: bool) -> NoneType
|
53,014 |
meteostat.series.aggregate
|
aggregate
|
Aggregate observations
|
def aggregate(self, freq: str = None, spatial: bool = False):
"""
Aggregate observations
"""
if self.count() > 0 and not self._data.isnull().values.all():
# Create temporal instance
temp = copy(self)
# Set default frequency if not set
if freq is None:
freq = self._freq
# Time aggregation
temp._data = temp._data.groupby(
["station", pd.Grouper(level="time", freq=freq)]
).agg(temp.aggregations)
# Spatial aggregation
if spatial:
temp._data = temp._data.groupby(
[pd.Grouper(level="time", freq=freq)]
).mean()
# Round
temp._data = temp._data.round(1)
# Return class instance
return temp
# Show warning & return self
warn("Skipping aggregation on empty DataFrame")
return self
|
(self, freq: Optional[str] = None, spatial: bool = False)
|
53,015 |
meteostat.series.convert
|
convert
|
Convert columns to a different unit
|
def convert(self, units: dict):
"""
Convert columns to a different unit
"""
# Create temporal instance
temp = copy(self)
# Change data units
for parameter, unit in units.items():
if parameter in temp._columns:
temp._data[parameter] = temp._data[parameter].apply(unit)
# Return class instance
return temp
|
(self, units: dict)
|
53,016 |
meteostat.series.count
|
count
|
Return number of rows in DataFrame
|
def count(self) -> int:
"""
Return number of rows in DataFrame
"""
return len(self._data.index)
|
(self) -> int
|
53,017 |
meteostat.series.coverage
|
coverage
|
Calculate data coverage (overall or by parameter)
|
def coverage(self, parameter: str = None) -> float:
"""
Calculate data coverage (overall or by parameter)
"""
if parameter is None:
return len(self._data.index) / self.expected_rows()
return round(self._data[parameter].count() / self.expected_rows(), 2)
|
(self, parameter: Optional[str] = None) -> float
|
53,018 |
meteostat.interface.daily
|
expected_rows
|
Return the number of rows expected for the defined date range
|
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return (self._end - self._start).days + 1
|
(self) -> int
|
53,019 |
meteostat.series.fetch
|
fetch
|
Fetch DataFrame
|
def fetch(self) -> pd.DataFrame:
"""
Fetch DataFrame
"""
# Copy DataFrame
temp = copy(self._data)
# Remove station index if it's a single station
if len(self._stations) == 1 and "station" in temp.index.names:
temp = temp.reset_index(level="station", drop=True)
# Return data frame
return temp
|
(self) -> pandas.core.frame.DataFrame
|
53,020 |
meteostat.series.interpolate
|
interpolate
|
Interpolate NULL values
|
def interpolate(self, limit: int = 3):
"""
Interpolate NULL values
"""
if self.count() > 0 and not self._data.isnull().values.all():
# Create temporal instance
temp = copy(self)
# Apply interpolation
temp._data = temp._data.groupby("station", group_keys=False).apply(
lambda group: group.interpolate(
method="linear", limit=limit, limit_direction="both", axis=0
)
)
# Return class instance
return temp
# Show warning & return self
warn("Skipping interpolation on empty DataFrame")
return self
|
(self, limit: int = 3)
|
53,021 |
meteostat.series.normalize
|
normalize
|
Normalize the DataFrame
|
def normalize(self):
"""
Normalize the DataFrame
"""
if self.count() == 0:
warn("Pointless normalization of empty DataFrame")
# Create temporal instance
temp = copy(self)
if temp._start and temp._end and temp.coverage() < 1:
# Create result DataFrame
result = pd.DataFrame(columns=temp._columns[temp._first_met_col :])
# Handle tz-aware date ranges
if hasattr(temp, "_timezone") and temp._timezone is not None:
timezone = pytz.timezone(temp._timezone)
start = temp._start.astimezone(timezone)
end = temp._end.astimezone(timezone)
else:
start = temp._start
end = temp._end
# Go through list of weather stations
for station in temp._stations:
# Create data frame
df = pd.DataFrame(columns=temp._columns[temp._first_met_col :])
# Add time series
df["time"] = pd.date_range(
start,
end,
freq=self._freq,
tz=temp._timezone if hasattr(temp, "_timezone") else None,
)
# Add station ID
df["station"] = station
# Add columns
for column in temp._columns[temp._first_met_col :]:
# Add column to DataFrame
df[column] = NaN
result = pd.concat([result, df], axis=0)
# Set index
result = result.set_index(["station", "time"])
# Merge data
temp._data = (
pd.concat([temp._data, result], axis=0)
.groupby(["station", "time"], as_index=True)
.first()
)
# None -> NaN
temp._data = temp._data.fillna(NaN)
# Return class instance
return temp
|
(self)
|
53,022 |
meteostat.interface.hourly
|
Hourly
|
Retrieve hourly weather observations for one or multiple weather stations or
a single geographical point
|
class Hourly(TimeSeries):
"""
Retrieve hourly weather observations for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = "hourly"
# Granularity
granularity = Granularity.HOURLY
# Download data as annual chunks
chunked: bool = True
# The time zone
_timezone: str = None
# Default frequency
_freq: str = "1H"
# Flag which represents model data
_model_flag = "E"
# Raw data columns
_columns: list = [
"date",
"hour",
"temp",
"dwpt",
"rhum",
"prcp",
"snow",
"wdir",
"wspd",
"wpgt",
"pres",
"tsun",
"coco",
]
# Index of first meteorological column
_first_met_col = 2
# Data types
_types: dict = {
"temp": "float64",
"dwpt": "float64",
"rhum": "float64",
"prcp": "float64",
"snow": "float64",
"wdir": "float64",
"wspd": "float64",
"wpgt": "float64",
"pres": "float64",
"tsun": "float64",
"coco": "float64",
}
# Columns for date parsing
_parse_dates: dict = {"time": [0, 1]}
# Default aggregation functions
aggregations: dict = {
"temp": "mean",
"dwpt": "mean",
"rhum": "mean",
"prcp": "sum",
"snow": "max",
"wdir": degree_mean,
"wspd": "mean",
"wpgt": "max",
"pres": "mean",
"tsun": "sum",
"coco": "max",
}
def _set_time(
self, start: datetime = None, end: datetime = None, timezone: str = None
) -> None:
"""
Set & adapt the period's time zone
"""
# Don't use chunks if full dataset is requested
if start == None:
self.chunked = False
if timezone:
# Save timezone
self._timezone = timezone
if start and end:
# Initialize time zone
timezone = pytz.timezone(self._timezone)
# Set start date
start = timezone.localize(start, is_dst=None).astimezone(pytz.utc)
# Set end date
end = timezone.localize(end, is_dst=None).astimezone(pytz.utc)
if self.chunked:
self._annual_steps = [start.year + i for i in range(end.year - start.year + 1)]
self._start = start
self._end = end
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
timezone: str = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Set time zone and adapt period
self._set_time(start, end, timezone)
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return floor((self._end - self._start).total_seconds() / 3600) + 1
|
(loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: datetime.datetime = None, end: datetime.datetime = None, timezone: str = None, model: bool = True, flags: bool = False) -> None
|
53,023 |
meteostat.interface.hourly
|
__init__
| null |
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
timezone: str = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Set time zone and adapt period
self._set_time(start, end, timezone)
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
|
(self, loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, timezone: Optional[str] = None, model: bool = True, flags: bool = False) -> NoneType
|
53,032 |
meteostat.interface.hourly
|
_set_time
|
Set & adapt the period's time zone
|
def _set_time(
self, start: datetime = None, end: datetime = None, timezone: str = None
) -> None:
"""
Set & adapt the period's time zone
"""
# Don't use chunks if full dataset is requested
if start == None:
self.chunked = False
if timezone:
# Save timezone
self._timezone = timezone
if start and end:
# Initialize time zone
timezone = pytz.timezone(self._timezone)
# Set start date
start = timezone.localize(start, is_dst=None).astimezone(pytz.utc)
# Set end date
end = timezone.localize(end, is_dst=None).astimezone(pytz.utc)
if self.chunked:
self._annual_steps = [start.year + i for i in range(end.year - start.year + 1)]
self._start = start
self._end = end
|
(self, start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, timezone: Optional[str] = None) -> NoneType
|
53,037 |
meteostat.interface.hourly
|
expected_rows
|
Return the number of rows expected for the defined date range
|
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return floor((self._end - self._start).total_seconds() / 3600) + 1
|
(self) -> int
|
53,041 |
meteostat.interface.monthly
|
Monthly
|
Retrieve monthly weather data for one or multiple weather stations or
a single geographical point
|
class Monthly(TimeSeries):
"""
Retrieve monthly weather data for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = "monthly"
# Granularity
granularity = Granularity.MONTHLY
# Default frequency
_freq: str = "1MS"
# Flag which represents model data
_model_flag = "I"
# Columns
_columns: list = [
"year",
"month",
"tavg",
"tmin",
"tmax",
"prcp",
"wspd",
"pres",
"tsun",
]
# Index of first meteorological column
_first_met_col = 2
# Data types
_types: dict = {
"tavg": "float64",
"tmin": "float64",
"tmax": "float64",
"prcp": "float64",
"wspd": "float64",
"pres": "float64",
"tsun": "float64",
}
# Columns for date parsing
_parse_dates: dict = {"time": [0, 1]}
# Default aggregation functions
aggregations: dict = {
"tavg": "mean",
"tmin": "mean",
"tmax": "mean",
"prcp": "sum",
"wspd": "mean",
"pres": "mean",
"tsun": "sum",
}
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Set start date
if start is not None:
start = start.replace(day=1)
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return (
(self._end.year - self._start.year) * 12
+ self._end.month
- self._start.month
) + 1
|
(loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: datetime.datetime = None, end: datetime.datetime = None, model: bool = True, flags: bool = False) -> None
|
53,042 |
meteostat.interface.monthly
|
__init__
| null |
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
# Set start date
if start is not None:
start = start.replace(day=1)
# Initialize time series
self._init_time_series(loc, start, end, model, flags)
|
(self, loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, model: bool = True, flags: bool = False) -> NoneType
|
53,055 |
meteostat.interface.monthly
|
expected_rows
|
Return the number of rows expected for the defined date range
|
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return (
(self._end.year - self._start.year) * 12
+ self._end.month
- self._start.month
) + 1
|
(self) -> int
|
53,059 |
meteostat.interface.normals
|
Normals
|
Retrieve climate normals for one or multiple weather stations or
a single geographical point
|
class Normals(MeteoData):
"""
Retrieve climate normals for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = "normals"
# Granularity
granularity = Granularity.NORMALS
# The list of weather Stations
_stations: pd.Index = None
# The first year of the period
_start: int = None
# The last year of the period
_end: int = None
# The data frame
_data: pd.DataFrame = pd.DataFrame()
# Columns
_columns: list = [
"start",
"end",
"month",
"tmin",
"tmax",
"prcp",
"wspd",
"pres",
"tsun",
]
# Index of first meteorological column
_first_met_col = 3
# Data types
_types: dict = {
"tmin": "float64",
"tmax": "float64",
"prcp": "float64",
"wspd": "float64",
"pres": "float64",
"tsun": "float64",
}
# Which columns should be parsed as dates?
_parse_dates = None
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: int = None,
end: int = None,
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
if start and end:
stations = loc.get_stations(
"monthly", datetime(start, 1, 1), datetime(end, 12, 31)
)
else:
stations = loc.get_stations()
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Check period
if (start and end) and (
end - start != 29 or end % 10 != 0 or end >= datetime.now().year
):
raise ValueError("Invalid reference period")
# Set period
self._start = start
self._end = end
# Get data for all weather stations
self._data = self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0 and self.autoclean:
self.clear_cache()
def normalize(self):
"""
Normalize the DataFrame
"""
# Create temporal instance
temp = copy(self)
if self.count() == 0:
warn("Pointless normalization of empty DataFrame")
# Go through list of weather stations
for station in temp._stations:
# The list of periods
periods: pd.Index = pd.Index([])
# Get periods
if self.count() > 0:
periods = temp._data[
temp._data.index.get_level_values("station") == station
].index.unique("end")
elif periods.size == 0 and self._end:
periods = pd.Index([self._end])
# Go through all periods
for period in periods:
# Create DataFrame
df = pd.DataFrame(columns=temp._columns[temp._first_met_col :])
# Populate index columns
df["month"] = range(1, 13)
df["station"] = station
df["start"] = period - 29
df["end"] = period
# Set index
df.set_index(["station", "start", "end", "month"], inplace=True)
# Merge data
temp._data = (
pd.concat([temp._data, df], axis=0)
.groupby(["station", "start", "end", "month"], as_index=True)
.first()
if temp._data.index.size > 0
else df
)
# None -> NaN
temp._data = temp._data.fillna(np.NaN)
# Return class instance
return temp
def fetch(self) -> pd.DataFrame:
"""
Fetch DataFrame
"""
# Copy DataFrame
temp = copy(self._data)
# Add avg. temperature column
temp.insert(
0, "tavg", temp[["tmin", "tmax"]].dropna(how="any").mean(axis=1).round(1)
)
# Remove station index if it's a single station
if len(self._stations) == 1 and "station" in temp.index.names:
temp = temp.reset_index(level="station", drop=True)
# Remove start & end year if period is set
if self._start and self._end and self.count() > 0:
temp = temp.reset_index(level="start", drop=True)
temp = temp.reset_index(level="end", drop=True)
# Return data frame
return temp
# Import methods
from meteostat.series.convert import convert
from meteostat.series.count import count
from meteostat.core.cache import clear_cache
|
(loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: int = None, end: int = None) -> None
|
53,060 |
meteostat.interface.normals
|
__init__
| null |
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: int = None,
end: int = None,
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
if start and end:
stations = loc.get_stations(
"monthly", datetime(start, 1, 1), datetime(end, 12, 31)
)
else:
stations = loc.get_stations()
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Check period
if (start and end) and (
end - start != 29 or end % 10 != 0 or end >= datetime.now().year
):
raise ValueError("Invalid reference period")
# Set period
self._start = start
self._end = end
# Get data for all weather stations
self._data = self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0 and self.autoclean:
self.clear_cache()
|
(self, loc: Union[pandas.core.frame.DataFrame, meteostat.interface.point.Point, list, str], start: Optional[int] = None, end: Optional[int] = None) -> NoneType
|
53,067 |
meteostat.interface.normals
|
fetch
|
Fetch DataFrame
|
def fetch(self) -> pd.DataFrame:
"""
Fetch DataFrame
"""
# Copy DataFrame
temp = copy(self._data)
# Add avg. temperature column
temp.insert(
0, "tavg", temp[["tmin", "tmax"]].dropna(how="any").mean(axis=1).round(1)
)
# Remove station index if it's a single station
if len(self._stations) == 1 and "station" in temp.index.names:
temp = temp.reset_index(level="station", drop=True)
# Remove start & end year if period is set
if self._start and self._end and self.count() > 0:
temp = temp.reset_index(level="start", drop=True)
temp = temp.reset_index(level="end", drop=True)
# Return data frame
return temp
|
(self) -> pandas.core.frame.DataFrame
|
53,068 |
meteostat.interface.normals
|
normalize
|
Normalize the DataFrame
|
def normalize(self):
"""
Normalize the DataFrame
"""
# Create temporal instance
temp = copy(self)
if self.count() == 0:
warn("Pointless normalization of empty DataFrame")
# Go through list of weather stations
for station in temp._stations:
# The list of periods
periods: pd.Index = pd.Index([])
# Get periods
if self.count() > 0:
periods = temp._data[
temp._data.index.get_level_values("station") == station
].index.unique("end")
elif periods.size == 0 and self._end:
periods = pd.Index([self._end])
# Go through all periods
for period in periods:
# Create DataFrame
df = pd.DataFrame(columns=temp._columns[temp._first_met_col :])
# Populate index columns
df["month"] = range(1, 13)
df["station"] = station
df["start"] = period - 29
df["end"] = period
# Set index
df.set_index(["station", "start", "end", "month"], inplace=True)
# Merge data
temp._data = (
pd.concat([temp._data, df], axis=0)
.groupby(["station", "start", "end", "month"], as_index=True)
.first()
if temp._data.index.size > 0
else df
)
# None -> NaN
temp._data = temp._data.fillna(np.NaN)
# Return class instance
return temp
|
(self)
|
53,069 |
meteostat.interface.point
|
Point
|
Automatically select weather stations by geographic location
|
class Point:
"""
Automatically select weather stations by geographic location
"""
# The interpolation method (weighted or nearest)
method: str = "nearest"
# Maximum radius for nearby stations
radius: int = 35000
# Maximum difference in altitude
alt_range: int = 350
# Maximum number of stations
max_count: int = 4
# Adapt temperature data based on altitude
adapt_temp: bool = True
# Distance Weight
weight_dist: float = 0.6
# Altitude Weight
weight_alt: float = 0.4
# The list of weather stations
_stations: pd.Index = None
# The latitude
_lat: float = None
# The longitude
_lon: float = None
# The altitude
_alt: int = None
def __init__(self, lat: float, lon: float, alt: int = None) -> None:
self._lat = lat
self._lon = lon
self._alt = alt
if alt is None:
self.adapt_temp = False
def get_stations(
self,
freq: str = None,
start: datetime = None,
end: datetime = None,
model: bool = True,
) -> pd.DataFrame:
"""
Get list of nearby weather stations
"""
# Get nearby weather stations
stations = Stations()
stations = stations.nearby(self._lat, self._lon, self.radius)
# Guess altitude if not set
if self._alt is None:
self._alt = stations.fetch().head(self.max_count)["elevation"].mean()
# Captue unfiltered weather stations
unfiltered = stations.fetch()
if self.alt_range:
unfiltered = unfiltered[
abs(self._alt - unfiltered["elevation"]) <= self.alt_range
]
# Apply inventory filter
if freq and start and end:
age = (datetime.now() - end).days
if model == False or age > 180:
stations = stations.inventory(freq, (start, end))
# Apply altitude filter
stations = stations.fetch()
if self.alt_range:
stations = stations[
abs(self._alt - stations["elevation"]) <= self.alt_range
]
# Fill up stations
selected: int = len(stations.index)
if selected < self.max_count:
# Remove already included stations from unfiltered
unfiltered = unfiltered.loc[~unfiltered.index.isin(stations.index)]
# Append to existing DataFrame
stations = pd.concat((stations, unfiltered.head(self.max_count - selected)))
# Score values
if self.radius:
# Calculate score values
stations["score"] = (
(1 - (stations["distance"] / self.radius)) * self.weight_dist
) + (
(1 - (abs(self._alt - stations["elevation"]) / self.alt_range))
* self.weight_alt
)
# Sort by score (descending)
stations = stations.sort_values("score", ascending=False)
# Capture result
self._stations = stations.index[: self.max_count]
return stations.head(self.max_count)
@property
def alt(self) -> int:
"""
Returns the point's altitude
"""
# Return altitude
return self._alt
@property
def stations(self) -> pd.Index:
"""
Returns the point's weather stations
"""
# Return weather stations
return self._stations
|
(lat: float, lon: float, alt: int = None) -> None
|
53,070 |
meteostat.interface.point
|
__init__
| null |
def __init__(self, lat: float, lon: float, alt: int = None) -> None:
self._lat = lat
self._lon = lon
self._alt = alt
if alt is None:
self.adapt_temp = False
|
(self, lat: float, lon: float, alt: Optional[int] = None) -> NoneType
|
53,071 |
meteostat.interface.point
|
get_stations
|
Get list of nearby weather stations
|
def get_stations(
self,
freq: str = None,
start: datetime = None,
end: datetime = None,
model: bool = True,
) -> pd.DataFrame:
"""
Get list of nearby weather stations
"""
# Get nearby weather stations
stations = Stations()
stations = stations.nearby(self._lat, self._lon, self.radius)
# Guess altitude if not set
if self._alt is None:
self._alt = stations.fetch().head(self.max_count)["elevation"].mean()
# Captue unfiltered weather stations
unfiltered = stations.fetch()
if self.alt_range:
unfiltered = unfiltered[
abs(self._alt - unfiltered["elevation"]) <= self.alt_range
]
# Apply inventory filter
if freq and start and end:
age = (datetime.now() - end).days
if model == False or age > 180:
stations = stations.inventory(freq, (start, end))
# Apply altitude filter
stations = stations.fetch()
if self.alt_range:
stations = stations[
abs(self._alt - stations["elevation"]) <= self.alt_range
]
# Fill up stations
selected: int = len(stations.index)
if selected < self.max_count:
# Remove already included stations from unfiltered
unfiltered = unfiltered.loc[~unfiltered.index.isin(stations.index)]
# Append to existing DataFrame
stations = pd.concat((stations, unfiltered.head(self.max_count - selected)))
# Score values
if self.radius:
# Calculate score values
stations["score"] = (
(1 - (stations["distance"] / self.radius)) * self.weight_dist
) + (
(1 - (abs(self._alt - stations["elevation"]) / self.alt_range))
* self.weight_alt
)
# Sort by score (descending)
stations = stations.sort_values("score", ascending=False)
# Capture result
self._stations = stations.index[: self.max_count]
return stations.head(self.max_count)
|
(self, freq: Optional[str] = None, start: Optional[datetime.datetime] = None, end: Optional[datetime.datetime] = None, model: bool = True) -> pandas.core.frame.DataFrame
|
53,072 |
meteostat.interface.stations
|
Stations
|
Select weather stations from the full list of stations
|
class Stations(Base):
"""
Select weather stations from the full list of stations
"""
# The cache subdirectory
cache_subdir: str = "stations"
# The list of selected weather Stations
_data: pd.DataFrame = None
# Raw data columns
_columns: list = [
"id",
"name",
"country",
"region",
"wmo",
"icao",
"latitude",
"longitude",
"elevation",
"timezone",
"hourly_start",
"hourly_end",
"daily_start",
"daily_end",
"monthly_start",
"monthly_end",
]
# Processed data columns with types
_types: dict = {
"id": "string",
"name": "object",
"country": "string",
"region": "string",
"wmo": "string",
"icao": "string",
"latitude": "float64",
"longitude": "float64",
"elevation": "float64",
"timezone": "string",
}
# Columns for date parsing
_parse_dates: list = [10, 11, 12, 13, 14, 15]
def _load(self) -> None:
"""
Load file from Meteostat
"""
# File name
file = "stations/slim.csv.gz"
# Get local file path
path = get_local_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint, file, self._columns, self._types, self._parse_dates, True
)
# Add index
df = df.set_index("id")
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Set data
self._data = df
def __init__(self) -> None:
# Get all weather stations
self._load()
def nearby(self, lat: float, lon: float, radius: int = None) -> "Stations":
"""
Sort/filter weather stations by physical distance
"""
# Create temporal instance
temp = copy(self)
# Get distance for each station
temp._data["distance"] = get_distance(
lat, lon, temp._data["latitude"], temp._data["longitude"]
)
# Filter by radius
if radius:
temp._data = temp._data[temp._data["distance"] <= radius]
# Sort stations by distance
temp._data.columns.str.strip()
temp._data = temp._data.sort_values("distance")
# Return self
return temp
def region(self, country: str, state: str = None) -> "Stations":
"""
Filter weather stations by country/region code
"""
# Create temporal instance
temp = copy(self)
# Country code
temp._data = temp._data[temp._data["country"] == country]
# State code
if state is not None:
temp._data = temp._data[temp._data["region"] == state]
# Return self
return temp
def bounds(self, top_left: tuple, bottom_right: tuple) -> "Stations":
"""
Filter weather stations by geographical bounds
"""
# Create temporal instance
temp = copy(self)
# Return stations in boundaries
temp._data = temp._data[
(temp._data["latitude"] <= top_left[0])
& (temp._data["latitude"] >= bottom_right[0])
& (temp._data["longitude"] <= bottom_right[1])
& (temp._data["longitude"] >= top_left[1])
]
# Return self
return temp
def inventory(
self, freq: str, required: Union[datetime, tuple, bool] = True
) -> "Stations":
"""
Filter weather stations by inventory data
"""
# Create temporal instance
temp = copy(self)
if required is True:
# Make sure data exists at all
temp._data = temp._data[(pd.isna(temp._data[freq + "_start"]) == False)]
elif isinstance(required, tuple):
# Make sure data exists across period
temp._data = temp._data[
(pd.isna(temp._data[freq + "_start"]) == False)
& (temp._data[freq + "_start"] <= required[0])
& (
temp._data[freq + "_end"] + timedelta(seconds=temp.max_age)
>= required[1]
)
]
else:
# Make sure data exists on a certain day
temp._data = temp._data[
(pd.isna(temp._data[freq + "_start"]) == False)
& (temp._data[freq + "_start"] <= required)
& (
temp._data[freq + "_end"] + timedelta(seconds=temp.max_age)
>= required
)
]
return temp
def convert(self, units: dict) -> "Stations":
"""
Convert columns to a different unit
"""
# Create temporal instance
temp = copy(self)
# Change data units
for parameter, unit in units.items():
if parameter in temp._data.columns.values:
temp._data[parameter] = temp._data[parameter].apply(unit)
# Return class instance
return temp
def count(self) -> int:
"""
Return number of weather stations in current selection
"""
return len(self._data.index)
def fetch(self, limit: int = None, sample: bool = False) -> pd.DataFrame:
"""
Fetch all weather stations or a (sampled) subset
"""
# Copy DataFrame
temp = copy(self._data)
# Return limited number of sampled entries
if sample and limit:
return temp.sample(limit)
# Return limited number of entries
if limit:
return temp.head(limit)
# Return all entries
return temp
# Import additional methods
from meteostat.core.cache import clear_cache
|
() -> None
|
53,073 |
meteostat.interface.stations
|
__init__
| null |
def __init__(self) -> None:
# Get all weather stations
self._load()
|
(self) -> NoneType
|
53,074 |
meteostat.interface.stations
|
_load
|
Load file from Meteostat
|
def _load(self) -> None:
"""
Load file from Meteostat
"""
# File name
file = "stations/slim.csv.gz"
# Get local file path
path = get_local_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint, file, self._columns, self._types, self._parse_dates, True
)
# Add index
df = df.set_index("id")
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Set data
self._data = df
|
(self) -> NoneType
|
53,075 |
meteostat.interface.stations
|
bounds
|
Filter weather stations by geographical bounds
|
def bounds(self, top_left: tuple, bottom_right: tuple) -> "Stations":
"""
Filter weather stations by geographical bounds
"""
# Create temporal instance
temp = copy(self)
# Return stations in boundaries
temp._data = temp._data[
(temp._data["latitude"] <= top_left[0])
& (temp._data["latitude"] >= bottom_right[0])
& (temp._data["longitude"] <= bottom_right[1])
& (temp._data["longitude"] >= top_left[1])
]
# Return self
return temp
|
(self, top_left: tuple, bottom_right: tuple) -> meteostat.interface.stations.Stations
|
53,076 |
meteostat.interface.stations
|
convert
|
Convert columns to a different unit
|
def convert(self, units: dict) -> "Stations":
"""
Convert columns to a different unit
"""
# Create temporal instance
temp = copy(self)
# Change data units
for parameter, unit in units.items():
if parameter in temp._data.columns.values:
temp._data[parameter] = temp._data[parameter].apply(unit)
# Return class instance
return temp
|
(self, units: dict) -> meteostat.interface.stations.Stations
|
53,077 |
meteostat.interface.stations
|
count
|
Return number of weather stations in current selection
|
def count(self) -> int:
"""
Return number of weather stations in current selection
"""
return len(self._data.index)
|
(self) -> int
|
53,078 |
meteostat.interface.stations
|
fetch
|
Fetch all weather stations or a (sampled) subset
|
def fetch(self, limit: int = None, sample: bool = False) -> pd.DataFrame:
"""
Fetch all weather stations or a (sampled) subset
"""
# Copy DataFrame
temp = copy(self._data)
# Return limited number of sampled entries
if sample and limit:
return temp.sample(limit)
# Return limited number of entries
if limit:
return temp.head(limit)
# Return all entries
return temp
|
(self, limit: Optional[int] = None, sample: bool = False) -> pandas.core.frame.DataFrame
|
53,079 |
meteostat.interface.stations
|
inventory
|
Filter weather stations by inventory data
|
def inventory(
self, freq: str, required: Union[datetime, tuple, bool] = True
) -> "Stations":
"""
Filter weather stations by inventory data
"""
# Create temporal instance
temp = copy(self)
if required is True:
# Make sure data exists at all
temp._data = temp._data[(pd.isna(temp._data[freq + "_start"]) == False)]
elif isinstance(required, tuple):
# Make sure data exists across period
temp._data = temp._data[
(pd.isna(temp._data[freq + "_start"]) == False)
& (temp._data[freq + "_start"] <= required[0])
& (
temp._data[freq + "_end"] + timedelta(seconds=temp.max_age)
>= required[1]
)
]
else:
# Make sure data exists on a certain day
temp._data = temp._data[
(pd.isna(temp._data[freq + "_start"]) == False)
& (temp._data[freq + "_start"] <= required)
& (
temp._data[freq + "_end"] + timedelta(seconds=temp.max_age)
>= required
)
]
return temp
|
(self, freq: str, required: Union[datetime.datetime, tuple, bool] = True) -> meteostat.interface.stations.Stations
|
53,080 |
meteostat.interface.stations
|
nearby
|
Sort/filter weather stations by physical distance
|
def nearby(self, lat: float, lon: float, radius: int = None) -> "Stations":
"""
Sort/filter weather stations by physical distance
"""
# Create temporal instance
temp = copy(self)
# Get distance for each station
temp._data["distance"] = get_distance(
lat, lon, temp._data["latitude"], temp._data["longitude"]
)
# Filter by radius
if radius:
temp._data = temp._data[temp._data["distance"] <= radius]
# Sort stations by distance
temp._data.columns.str.strip()
temp._data = temp._data.sort_values("distance")
# Return self
return temp
|
(self, lat: float, lon: float, radius: Optional[int] = None) -> meteostat.interface.stations.Stations
|
53,081 |
meteostat.interface.stations
|
region
|
Filter weather stations by country/region code
|
def region(self, country: str, state: str = None) -> "Stations":
"""
Filter weather stations by country/region code
"""
# Create temporal instance
temp = copy(self)
# Country code
temp._data = temp._data[temp._data["country"] == country]
# State code
if state is not None:
temp._data = temp._data[temp._data["region"] == state]
# Return self
return temp
|
(self, country: str, state: Optional[str] = None) -> meteostat.interface.stations.Stations
|
53,082 |
meteostat.interface.timeseries
|
TimeSeries
|
TimeSeries class which provides features which are
used across all time series classes
|
class TimeSeries(MeteoData):
"""
TimeSeries class which provides features which are
used across all time series classes
"""
# The list of origin weather Stations
_origin_stations: Union[pd.Index, None] = None
# The start date
_start: Union[datetime, None] = None
# The end date
_end: Union[datetime, None] = None
# Include model data?
_model: bool = True
# Fetch source flags?
_flags = bool = False
def _load_flags(self, station: str, year: Union[int, None] = None) -> None:
"""
Load flag file for a single station from Meteostat
"""
# File name
file = generate_endpoint_path(self.granularity, station, year, True)
# Get local file path
path = get_local_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
{key: "string" for key in self._columns[self._first_met_col :]},
self._parse_dates,
)
# Validate Series
df = validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Localize time column
if (
self.granularity == Granularity.HOURLY
and self._timezone is not None
and len(df.index) > 0
):
df = localize(df, self._timezone)
# Filter time period and append to DataFrame
if self._start and self._end:
df = filter_time(df, self._start, self._end)
return df
def _get_flags(self) -> None:
"""
Get all source flags
"""
if len(self._stations) > 0:
# Get list of datasets
datasets = self._get_datasets()
# Data Processings
return processing_handler(
datasets, self._load_flags, self.processes, self.threads
)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
def _filter_model(self) -> None:
"""
Remove model data from time series
"""
columns = self._columns[self._first_met_col :]
for col_name in columns:
self._data.loc[
(pd.isna(self._data[f"{col_name}_flag"]))
| (self._data[f"{col_name}_flag"].str.contains(self._model_flag)),
col_name,
] = np.NaN
# Conditionally, remove flags from DataFrame
if not self._flags:
self._data.drop(
map(lambda col_name: f"{col_name}_flag", columns), axis=1, inplace=True
)
# Drop NaN-only rows
self._data.dropna(how="all", subset=columns, inplace=True)
def _init_time_series(
self,
loc: Union[pd.DataFrame, Point, list, str], # Station(s) or geo point
start: datetime = None,
end: datetime = None,
model: bool = True, # Include model data?
flags: bool = False, # Load source flags?
) -> None:
"""
Common initialization for all time series, regardless
of its granularity
"""
# Set list of weather stations based on user
# input or retrieve list of stations programatically
# if location is a geographical point
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
stations = loc.get_stations("daily", start, end, model)
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Preserve settings
self._start = start if self._start is None else self._start
self._end = end if self._end is None else self._end
self._model = model
self._flags = flags
# Get data for all weather stations
self._data = self._get_data()
# Load source flags through map file
# if flags are explicitly requested or
# model data is excluded
if flags or not model:
flags = self._get_flags()
self._data = self._data.merge(
flags, on=["station", "time"], how="left", suffixes=[None, "_flag"]
)
# Remove model data from DataFrame and
# drop flags if not specified otherwise
if not model:
self._filter_model()
# Interpolate data spatially if requested
# location is a geographical point
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache if auto cleaning is enabled
if self.max_age > 0 and self.autoclean:
self.clear_cache()
# Import methods
from meteostat.series.normalize import normalize
from meteostat.series.interpolate import interpolate
from meteostat.series.aggregate import aggregate
from meteostat.series.convert import convert
from meteostat.series.coverage import coverage
from meteostat.series.count import count
from meteostat.series.fetch import fetch
from meteostat.series.stations import stations
from meteostat.core.cache import clear_cache
|
()
|
53,103 |
saucenao_api.saucenao_api
|
AIOSauceNao
| null |
class AIOSauceNao(SauceNao):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._session = None
async def __aenter__(self):
self._session = aiohttp.ClientSession()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self._session:
await self._session.close()
async def from_file(self, file: BinaryIO) -> SauceResponse:
return await self._search(self.params, {'file': file})
async def from_url(self, url: str) -> SauceResponse:
params = self.params.copy()
params['url'] = url
return await self._search(params)
async def _search(self, params, files=None):
session = self._session or aiohttp.ClientSession()
async with session.post(self.SAUCENAO_URL, params=params, data=files) as resp:
status_code = resp.status
# close only if not called via 'async with AIOSauceNao(...)'
if not self._session:
await session.close()
if status_code == 200:
parsed_resp = await resp.json()
raw = self._verify_response(parsed_resp, params)
return SauceResponse(raw)
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
# Actually server returns 200 and user_id=0 if key is bad
elif status_code == 403:
raise BadKeyError('Invalid API key')
elif status_code == 413:
raise BadFileSizeError('File is too large')
elif status_code == 429:
parsed_resp = await resp.json()
if 'Daily' in parsed_resp['header']['message']:
raise LongLimitReachedError('24 hours limit reached')
raise ShortLimitReachedError('30 seconds limit reached')
raise UnknownApiError(f'Server returned status code {status_code}')
@staticmethod
def _verify_response(parsed_resp, params):
resp_header = parsed_resp['header']
status = resp_header['status']
user_id = int(resp_header['user_id'])
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if status < 0:
raise UnknownClientError('Unknown client error, status < 0')
elif status > 0:
raise UnknownServerError('Unknown API error, status > 0')
elif user_id < 0:
raise UnknownServerError('Unknown API error, user_id < 0')
# Request passed, but api_key was ignored
elif user_id == 0 and 'api_key' in params:
raise BadKeyError('Invalid API key')
long_remaining = resp_header['long_remaining']
short_remaining = resp_header['short_remaining']
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if short_remaining < 0:
raise ShortLimitReachedError('30 seconds limit reached')
elif long_remaining < 0:
raise LongLimitReachedError('24 hours limit reached')
return parsed_resp
|
(*args, **kwargs)
|
53,104 |
saucenao_api.saucenao_api
|
__aenter__
| null |
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._session = None
|
(self)
|
53,108 |
saucenao_api.saucenao_api
|
_verify_response
| null |
@staticmethod
def _verify_response(parsed_resp, params):
resp_header = parsed_resp['header']
status = resp_header['status']
user_id = int(resp_header['user_id'])
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if status < 0:
raise UnknownClientError('Unknown client error, status < 0')
elif status > 0:
raise UnknownServerError('Unknown API error, status > 0')
elif user_id < 0:
raise UnknownServerError('Unknown API error, user_id < 0')
# Request passed, but api_key was ignored
elif user_id == 0 and 'api_key' in params:
raise BadKeyError('Invalid API key')
long_remaining = resp_header['long_remaining']
short_remaining = resp_header['short_remaining']
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if short_remaining < 0:
raise ShortLimitReachedError('30 seconds limit reached')
elif long_remaining < 0:
raise LongLimitReachedError('24 hours limit reached')
return parsed_resp
|
(parsed_resp, params)
|
53,111 |
saucenao_api.containers
|
BasicSauce
| null |
class BasicSauce:
def __init__(self, raw):
result_header = raw['header']
self.raw: dict = raw
self.similarity: float = float(result_header['similarity'])
self.thumbnail: str = result_header['thumbnail']
self.index_id: int = result_header['index_id']
self.index_name: str = result_header['index_name']
self.title: Optional[str] = self._get_title(raw['data'])
self.urls: List[str] = self._get_urls(raw['data'])
self.author: Optional[str] = self._get_author(raw['data'])
@staticmethod
def _get_title(data):
# Order is important!
if 'title' in data:
return data['title']
elif 'eng_name' in data:
return data['eng_name']
elif 'material' in data:
return data['material']
elif 'source' in data:
return data['source']
elif 'created_at' in data:
return data['created_at']
@staticmethod
def _get_urls(data):
if 'ext_urls' in data:
return data['ext_urls']
elif 'getchu_id' in data:
return [f'http://www.getchu.com/soft.phtml?id={data["getchu_id"]}']
return []
@staticmethod
def _get_author(data):
# Order is important!
if 'author' in data:
return data['author']
elif 'author_name' in data:
return data['author_name']
elif 'member_name' in data:
return data['member_name']
elif 'pawoo_user_username' in data:
return data['pawoo_user_username']
elif 'twitter_user_handle' in data:
return data['twitter_user_handle']
elif 'company' in data:
return data['company']
elif 'creator' in data:
if isinstance(data['creator'], list):
return data['creator'][0]
return data['creator']
def __repr__(self):
return f'<BasicSauce(title={repr(self.title)}, similarity={self.similarity:.2f})>'
|
(raw)
|
53,112 |
saucenao_api.containers
|
__init__
| null |
def __init__(self, raw):
result_header = raw['header']
self.raw: dict = raw
self.similarity: float = float(result_header['similarity'])
self.thumbnail: str = result_header['thumbnail']
self.index_id: int = result_header['index_id']
self.index_name: str = result_header['index_name']
self.title: Optional[str] = self._get_title(raw['data'])
self.urls: List[str] = self._get_urls(raw['data'])
self.author: Optional[str] = self._get_author(raw['data'])
|
(self, raw)
|
53,113 |
saucenao_api.containers
|
__repr__
| null |
def __repr__(self):
return f'<BasicSauce(title={repr(self.title)}, similarity={self.similarity:.2f})>'
|
(self)
|
53,114 |
saucenao_api.containers
|
_get_author
| null |
@staticmethod
def _get_author(data):
# Order is important!
if 'author' in data:
return data['author']
elif 'author_name' in data:
return data['author_name']
elif 'member_name' in data:
return data['member_name']
elif 'pawoo_user_username' in data:
return data['pawoo_user_username']
elif 'twitter_user_handle' in data:
return data['twitter_user_handle']
elif 'company' in data:
return data['company']
elif 'creator' in data:
if isinstance(data['creator'], list):
return data['creator'][0]
return data['creator']
|
(data)
|
53,115 |
saucenao_api.containers
|
_get_title
| null |
@staticmethod
def _get_title(data):
# Order is important!
if 'title' in data:
return data['title']
elif 'eng_name' in data:
return data['eng_name']
elif 'material' in data:
return data['material']
elif 'source' in data:
return data['source']
elif 'created_at' in data:
return data['created_at']
|
(data)
|
53,116 |
saucenao_api.containers
|
_get_urls
| null |
@staticmethod
def _get_urls(data):
if 'ext_urls' in data:
return data['ext_urls']
elif 'getchu_id' in data:
return [f'http://www.getchu.com/soft.phtml?id={data["getchu_id"]}']
return []
|
(data)
|
53,117 |
saucenao_api.containers
|
BookSauce
| null |
class BookSauce(BasicSauce):
def __init__(self, raw):
super().__init__(raw)
data = raw['data']
self.part: str = data['part']
def __repr__(self):
return f'<BookSauce(title={repr(self.title)}, part={repr(self.part)}, similarity={self.similarity:.2f})>'
|
(raw)
|
53,118 |
saucenao_api.containers
|
__init__
| null |
def __init__(self, raw):
super().__init__(raw)
data = raw['data']
self.part: str = data['part']
|
(self, raw)
|
53,119 |
saucenao_api.containers
|
__repr__
| null |
def __repr__(self):
return f'<BookSauce(title={repr(self.title)}, part={repr(self.part)}, similarity={self.similarity:.2f})>'
|
(self)
|
53,123 |
saucenao_api.saucenao_api
|
SauceNao
| null |
class SauceNao:
SAUCENAO_URL = 'https://saucenao.com/search.php'
def __init__(self,
api_key: Optional[str] = None,
*,
testmode: int = 0,
dbmask: Optional[int] = None,
dbmaski: Optional[int] = None,
db: int = DB.ALL,
numres: int = 6,
frame: int = 1,
hide: int = Hide.NONE,
bgcolor: int = BgColor.NONE,
) -> None:
params = dict()
if api_key is not None:
params['api_key'] = api_key
if dbmask is not None:
params['dbmask'] = dbmask
if dbmaski is not None:
params['dbmaski'] = dbmaski
params['testmode'] = testmode
params['db'] = db
params['numres'] = numres
params['hide'] = hide
params['frame'] = frame
params['bgcolor'] = bgcolor # from https://saucenao.com/testing/
params['output_type'] = _OutputType.JSON
self.params = params
def from_file(self, file: BinaryIO) -> SauceResponse:
return self._search(self.params, {'file': file})
def from_url(self, url: str) -> SauceResponse:
params = self.params.copy()
params['url'] = url
return self._search(params)
def _search(self, params, files=None):
resp = requests.post(self.SAUCENAO_URL, params=params, files=files)
status_code = resp.status_code
if status_code == 200:
raw = self._verify_response(resp, params)
return SauceResponse(raw)
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
# Actually server returns 200 and user_id=0 if key is bad
elif status_code == 403:
raise BadKeyError('Invalid API key')
elif status_code == 413:
raise BadFileSizeError('File is too large')
elif status_code == 429:
if 'Daily' in resp.json()['header']['message']:
raise LongLimitReachedError('24 hours limit reached')
raise ShortLimitReachedError('30 seconds limit reached')
raise UnknownApiError(f'Server returned status code {status_code}')
@staticmethod
def _verify_response(resp, params):
parsed_resp = resp.json()
resp_header = parsed_resp['header']
status = resp_header['status']
user_id = int(resp_header['user_id'])
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if status < 0:
raise UnknownClientError('Unknown client error, status < 0')
elif status > 0:
raise UnknownServerError('Unknown API error, status > 0')
elif user_id < 0:
raise UnknownServerError('Unknown API error, user_id < 0')
# Request passed, but api_key was ignored
elif user_id == 0 and 'api_key' in params:
raise BadKeyError('Invalid API key')
long_remaining = resp_header['long_remaining']
short_remaining = resp_header['short_remaining']
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if short_remaining < 0:
raise ShortLimitReachedError('30 seconds limit reached')
elif long_remaining < 0:
raise LongLimitReachedError('24 hours limit reached')
return parsed_resp
|
(api_key: Optional[str] = None, *, testmode: int = 0, dbmask: Optional[int] = None, dbmaski: Optional[int] = None, db: int = 999, numres: int = 6, frame: int = 1, hide: int = 0, bgcolor: int = 'none') -> None
|
53,124 |
saucenao_api.saucenao_api
|
__init__
| null |
def __init__(self,
api_key: Optional[str] = None,
*,
testmode: int = 0,
dbmask: Optional[int] = None,
dbmaski: Optional[int] = None,
db: int = DB.ALL,
numres: int = 6,
frame: int = 1,
hide: int = Hide.NONE,
bgcolor: int = BgColor.NONE,
) -> None:
params = dict()
if api_key is not None:
params['api_key'] = api_key
if dbmask is not None:
params['dbmask'] = dbmask
if dbmaski is not None:
params['dbmaski'] = dbmaski
params['testmode'] = testmode
params['db'] = db
params['numres'] = numres
params['hide'] = hide
params['frame'] = frame
params['bgcolor'] = bgcolor # from https://saucenao.com/testing/
params['output_type'] = _OutputType.JSON
self.params = params
|
(self, api_key: Optional[str] = None, *, testmode: int = 0, dbmask: Optional[int] = None, dbmaski: Optional[int] = None, db: int = 999, numres: int = 6, frame: int = 1, hide: int = 0, bgcolor: int = 'none') -> NoneType
|
53,125 |
saucenao_api.saucenao_api
|
_search
| null |
def _search(self, params, files=None):
resp = requests.post(self.SAUCENAO_URL, params=params, files=files)
status_code = resp.status_code
if status_code == 200:
raw = self._verify_response(resp, params)
return SauceResponse(raw)
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
# Actually server returns 200 and user_id=0 if key is bad
elif status_code == 403:
raise BadKeyError('Invalid API key')
elif status_code == 413:
raise BadFileSizeError('File is too large')
elif status_code == 429:
if 'Daily' in resp.json()['header']['message']:
raise LongLimitReachedError('24 hours limit reached')
raise ShortLimitReachedError('30 seconds limit reached')
raise UnknownApiError(f'Server returned status code {status_code}')
|
(self, params, files=None)
|
53,126 |
saucenao_api.saucenao_api
|
_verify_response
| null |
@staticmethod
def _verify_response(resp, params):
parsed_resp = resp.json()
resp_header = parsed_resp['header']
status = resp_header['status']
user_id = int(resp_header['user_id'])
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if status < 0:
raise UnknownClientError('Unknown client error, status < 0')
elif status > 0:
raise UnknownServerError('Unknown API error, status > 0')
elif user_id < 0:
raise UnknownServerError('Unknown API error, user_id < 0')
# Request passed, but api_key was ignored
elif user_id == 0 and 'api_key' in params:
raise BadKeyError('Invalid API key')
long_remaining = resp_header['long_remaining']
short_remaining = resp_header['short_remaining']
# Taken from https://saucenao.com/tools/examples/api/identify_images_v1.1.py
if short_remaining < 0:
raise ShortLimitReachedError('30 seconds limit reached')
elif long_remaining < 0:
raise LongLimitReachedError('24 hours limit reached')
return parsed_resp
|
(resp, params)
|
53,127 |
saucenao_api.saucenao_api
|
from_file
| null |
def from_file(self, file: BinaryIO) -> SauceResponse:
return self._search(self.params, {'file': file})
|
(self, file: <class 'BinaryIO'>) -> saucenao_api.containers.SauceResponse
|
53,128 |
saucenao_api.saucenao_api
|
from_url
| null |
def from_url(self, url: str) -> SauceResponse:
params = self.params.copy()
params['url'] = url
return self._search(params)
|
(self, url: str) -> saucenao_api.containers.SauceResponse
|
53,129 |
saucenao_api.containers
|
VideoSauce
| null |
class VideoSauce(BasicSauce):
def __init__(self, raw):
super().__init__(raw)
data = raw['data']
self.part: str = data['part']
self.year: str = data['year']
self.est_time: str = data['est_time']
def __repr__(self):
return f'<VideoSauce(title={repr(self.title)}, part={repr(self.part)}, similarity={self.similarity:.2f})>'
|
(raw)
|
53,130 |
saucenao_api.containers
|
__init__
| null |
def __init__(self, raw):
super().__init__(raw)
data = raw['data']
self.part: str = data['part']
self.year: str = data['year']
self.est_time: str = data['est_time']
|
(self, raw)
|
53,131 |
saucenao_api.containers
|
__repr__
| null |
def __repr__(self):
return f'<VideoSauce(title={repr(self.title)}, part={repr(self.part)}, similarity={self.similarity:.2f})>'
|
(self)
|
53,139 |
tmdbapis.objs.reload
|
Account
|
Represents a single User Account.
Attributes:
avatar_hash (str): Avatar Hash Value.
avatar_path (str): Avatar Path.
avatar_url (str): Avatar Full URL.
country (:class:`~tmdbapis.objs.simple.Country`): Country object for the ISO 3166-1 Country Code.
id (str): v3 User Account ID.
include_adult (bool): Default include adult items in search results
iso_3166_1 (str): Default ISO 3166-1 Alpha-2 Country Code of the User Account.
iso_639_1 (str): Default ISO 639-1 Language Code of the User Account.
language (:class:`~tmdbapis.objs.simple.Language`): Language object for the ISO 639-1 Language Code.
name (str): User Account Name.
username (str): User Account Username.
|
class Account(TMDbReload):
""" Represents a single User Account.
Attributes:
avatar_hash (str): Avatar Hash Value.
avatar_path (str): Avatar Path.
avatar_url (str): Avatar Full URL.
country (:class:`~tmdbapis.objs.simple.Country`): Country object for the ISO 3166-1 Country Code.
id (str): v3 User Account ID.
include_adult (bool): Default include adult items in search results
iso_3166_1 (str): Default ISO 3166-1 Alpha-2 Country Code of the User Account.
iso_639_1 (str): Default ISO 639-1 Language Code of the User Account.
language (:class:`~tmdbapis.objs.simple.Language`): Language object for the ISO 639-1 Language Code.
name (str): User Account Name.
username (str): User Account Username.
"""
def __init__(self, tmdb):
super().__init__(tmdb, None)
def _load(self, data, partial=False):
super()._load(None, partial=partial)
self.avatar_hash = self._parse(attrs=["avatar", "gravatar", "hash"])
self.avatar_path = self._parse(attrs=["avatar", "tmdb", "avatar_path"])
self.avatar_url = self._image_url(self.avatar_path)
self.country = self._tmdb._get_object(self._data, "country")
self.id = self._parse(attrs="id", value_type="int")
self.include_adult = self._parse(attrs="include_adult")
self.iso_3166_1 = self._parse(attrs="iso_3166_1")
self.iso_639_1 = self._parse(attrs="iso_639_1")
self.language = self._tmdb._get_object(self._data, "language")
self.name = self._parse(attrs="name")
self.username = self._parse(attrs="username")
self._finish(self.name)
def _full_load(self, partial=None):
return self._api.account_get_details()
def created_lists(self, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.created_lists` """
return self._tmdb.created_lists(v3=v3)
def favorite_movies(self, sort_by: str = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.favorite_movies` """
return self._tmdb.favorite_movies(sort_by=sort_by, v3=v3)
def favorite_tv_shows(self, sort_by: Optional[str] = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.favorite_tv_shows` """
return self._tmdb.favorite_tv_shows(sort_by=sort_by, v3=v3)
def movie_recommendations(self, sort_by: Optional[str] = None):
""" Alias for :meth:`~.tmdb.TMDbAPIs.movie_recommendations` """
return self._tmdb.movie_recommendations(sort_by=sort_by)
def movie_watchlist(self, sort_by: Optional[str] = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.movie_watchlist` """
return self._tmdb.movie_watchlist(sort_by=sort_by, v3=v3)
def rated_episodes(self, sort_by: Optional[str] = None):
""" Alias for :meth:`~.tmdb.TMDbAPIs.rated_episodes` """
return self._tmdb.rated_episodes(sort_by=sort_by)
def rated_movies(self, sort_by: Optional[str] = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.rated_movies` """
return self._tmdb.rated_movies(sort_by=sort_by, v3=v3)
def rated_tv_shows(self, sort_by: Optional[str] = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.rated_tv_shows` """
return self._tmdb.rated_tv_shows(sort_by=sort_by, v3=v3)
def tv_show_recommendations(self, sort_by: Optional[str] = None):
""" Alias for :meth:`~.tmdb.TMDbAPIs.tv_show_recommendations` """
return self._tmdb.tv_show_recommendations(sort_by=sort_by)
def tv_show_watchlist(self, sort_by: Optional[str] = None, v3: bool = False):
""" Alias for :meth:`~.tmdb.TMDbAPIs.tv_show_watchlist` """
return self._tmdb.tv_show_watchlist(sort_by=sort_by, v3=v3)
|
(tmdb)
|
53,140 |
tmdbapis.objs.base
|
__delattr__
| null |
def __delattr__(self, key):
raise AttributeError("Attributes cannot be deleted")
|
(self, key)
|
53,141 |
tmdbapis.objs.base
|
__eq__
| null |
def __eq__(self, other):
if type(self) is type(other):
if self.id is None and other.id is None:
return self._name == other._name
elif self.id is not None and other.id is not None:
return self.id == other.id
else:
return False
elif isinstance(other, int) and self.id is not None:
return self.id == other
else:
return str(self._name) == str(other)
|
(self, other)
|
53,142 |
tmdbapis.objs.base
|
__getattribute__
| null |
def __getattribute__(self, item):
value = super().__getattribute__(item)
if item.startswith("_") or self._loading or not self._partial or \
(isinstance(value, (list, dict, int, float)) and value) or \
(not isinstance(value, (list, dict, int, float)) and value is not None):
return value
self._load(None)
return super().__getattribute__(item)
|
(self, item)
|
53,143 |
tmdbapis.objs.reload
|
__init__
| null |
def __init__(self, tmdb):
super().__init__(tmdb, None)
|
(self, tmdb)
|
53,145 |
tmdbapis.objs.base
|
__setattr__
| null |
def __setattr__(self, key, value):
if key.startswith("_") or self._loading:
super().__setattr__(key, value)
else:
raise AttributeError("Attributes cannot be edited")
|
(self, key, value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.