repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_text.py | dictlist_convert_to_float | def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
"""
for d in dict_list:
try:
d[key] = float(d[key])
except ValueError:
d[key] = None | python | def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
"""
for d in dict_list:
try:
d[key] = float(d[key])
except ValueError:
d[key] = None | [
"def",
"dictlist_convert_to_float",
"(",
"dict_list",
":",
"Iterable",
"[",
"Dict",
"]",
",",
"key",
":",
"str",
")",
"->",
"None",
":",
"for",
"d",
"in",
"dict_list",
":",
"try",
":",
"d",
"[",
"key",
"]",
"=",
"float",
"(",
"d",
"[",
"key",
"]",
")",
"except",
"ValueError",
":",
"d",
"[",
"key",
"]",
"=",
"None"
] | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``. | [
"Process",
"an",
"iterable",
"of",
"dictionaries",
".",
"For",
"each",
"dictionary",
"d",
"convert",
"(",
"in",
"place",
")",
"d",
"[",
"key",
"]",
"to",
"a",
"float",
".",
"If",
"that",
"fails",
"convert",
"it",
"to",
"None",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L540-L549 |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_text.py | dictlist_convert_to_bool | def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
"""
for d in dict_list:
# d[key] = True if d[key] == "Y" else False
d[key] = 1 if d[key] == "Y" else 0 | python | def dictlist_convert_to_bool(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``.
"""
for d in dict_list:
# d[key] = True if d[key] == "Y" else False
d[key] = 1 if d[key] == "Y" else 0 | [
"def",
"dictlist_convert_to_bool",
"(",
"dict_list",
":",
"Iterable",
"[",
"Dict",
"]",
",",
"key",
":",
"str",
")",
"->",
"None",
":",
"for",
"d",
"in",
"dict_list",
":",
"# d[key] = True if d[key] == \"Y\" else False",
"d",
"[",
"key",
"]",
"=",
"1",
"if",
"d",
"[",
"key",
"]",
"==",
"\"Y\"",
"else",
"0"
] | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a bool. If that fails, convert it to ``None``. | [
"Process",
"an",
"iterable",
"of",
"dictionaries",
".",
"For",
"each",
"dictionary",
"d",
"convert",
"(",
"in",
"place",
")",
"d",
"[",
"key",
"]",
"to",
"a",
"bool",
".",
"If",
"that",
"fails",
"convert",
"it",
"to",
"None",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L552-L559 |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_text.py | dictlist_replace | def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``.
"""
for d in dict_list:
d[key] = value | python | def dictlist_replace(dict_list: Iterable[Dict], key: str, value: Any) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``.
"""
for d in dict_list:
d[key] = value | [
"def",
"dictlist_replace",
"(",
"dict_list",
":",
"Iterable",
"[",
"Dict",
"]",
",",
"key",
":",
"str",
",",
"value",
":",
"Any",
")",
"->",
"None",
":",
"for",
"d",
"in",
"dict_list",
":",
"d",
"[",
"key",
"]",
"=",
"value"
] | Process an iterable of dictionaries. For each dictionary ``d``, change
(in place) ``d[key]`` to ``value``. | [
"Process",
"an",
"iterable",
"of",
"dictionaries",
".",
"For",
"each",
"dictionary",
"d",
"change",
"(",
"in",
"place",
")",
"d",
"[",
"key",
"]",
"to",
"value",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L562-L568 |
RudolfCardinal/pythonlib | cardinal_pythonlib/rnc_text.py | dictlist_wipe_key | def dictlist_wipe_key(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, delete
``d[key]`` if it exists.
"""
for d in dict_list:
d.pop(key, None) | python | def dictlist_wipe_key(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, delete
``d[key]`` if it exists.
"""
for d in dict_list:
d.pop(key, None) | [
"def",
"dictlist_wipe_key",
"(",
"dict_list",
":",
"Iterable",
"[",
"Dict",
"]",
",",
"key",
":",
"str",
")",
"->",
"None",
":",
"for",
"d",
"in",
"dict_list",
":",
"d",
".",
"pop",
"(",
"key",
",",
"None",
")"
] | Process an iterable of dictionaries. For each dictionary ``d``, delete
``d[key]`` if it exists. | [
"Process",
"an",
"iterable",
"of",
"dictionaries",
".",
"For",
"each",
"dictionary",
"d",
"delete",
"d",
"[",
"key",
"]",
"if",
"it",
"exists",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_text.py#L571-L577 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/function_cache.py | get_call_signature | def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig | python | def get_call_signature(fn: FunctionType,
args: ArgsType,
kwargs: KwargsType,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key.
"""
# Note that the function won't have the __self__ argument (as in
# fn.__self__), at this point, even if it's a member function.
try:
call_sig = json_encode((fn.__qualname__, args, kwargs))
except TypeError:
log.critical(
"\nTo decorate using @django_cache_function without specifying "
"cache_key, the decorated function's owning class and its "
"parameters must be JSON-serializable (see jsonfunc.py, "
"django_cache_fn.py).\n")
raise
if debug_cache:
log.debug("Making call signature {!r}", call_sig)
return call_sig | [
"def",
"get_call_signature",
"(",
"fn",
":",
"FunctionType",
",",
"args",
":",
"ArgsType",
",",
"kwargs",
":",
"KwargsType",
",",
"debug_cache",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"# Note that the function won't have the __self__ argument (as in",
"# fn.__self__), at this point, even if it's a member function.",
"try",
":",
"call_sig",
"=",
"json_encode",
"(",
"(",
"fn",
".",
"__qualname__",
",",
"args",
",",
"kwargs",
")",
")",
"except",
"TypeError",
":",
"log",
".",
"critical",
"(",
"\"\\nTo decorate using @django_cache_function without specifying \"",
"\"cache_key, the decorated function's owning class and its \"",
"\"parameters must be JSON-serializable (see jsonfunc.py, \"",
"\"django_cache_fn.py).\\n\"",
")",
"raise",
"if",
"debug_cache",
":",
"log",
".",
"debug",
"(",
"\"Making call signature {!r}\"",
",",
"call_sig",
")",
"return",
"call_sig"
] | Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use indirectly as a
cache key. The string is a JSON representation. See ``make_cache_key`` for
a more suitable actual cache key. | [
"Takes",
"a",
"function",
"and",
"its",
"args",
"/",
"kwargs",
"and",
"produces",
"a",
"string",
"description",
"of",
"the",
"function",
"call",
"(",
"the",
"call",
"signature",
")",
"suitable",
"for",
"use",
"indirectly",
"as",
"a",
"cache",
"key",
".",
"The",
"string",
"is",
"a",
"JSON",
"representation",
".",
"See",
"make_cache_key",
"for",
"a",
"more",
"suitable",
"actual",
"cache",
"key",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/function_cache.py#L47-L70 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/function_cache.py | make_cache_key | def make_cache_key(call_signature: str,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use as a cache key.
The string is an MD5 hash of the JSON-encoded call signature.
The logic behind these decisions is as follows:
- We have a bunch of components of arbitrary type, and we need to get
a unique string out.
- We shouldn't use ``str()``, because that is often poorly specified; e.g.
is ``'a.b.c'`` a ``TableId``, or is it a ``ColumnId`` with no ``'db'``
field?
- We could use ``repr()``: sometimes that gives us helpful things that
could in principle be passed to ``eval()``, in which case ``repr()`` would
be fine, but sometimes it doesn't, and gives unhelpful things like
``'<__main__.Thing object at 0x7ff3093ebda0>'``.
- However, if something encodes to JSON, that representation should
be reversible and thus contain the right sort of information.
- Note also that bound methods will come with a ``self`` argument, for
which the address may be very relevant...
- Let's go with ``repr()``. Users of the cache decorator should not pass
objects whose ``repr()`` includes the memory address of the object unless
they want those objects to be treated as distinct.
- Ah, no. The cache itself will pickle and unpickle things, and this
will change memory addresses of objects. So we can't store a reference
to an object using ``repr()`` and using ``cache.add()``/``pickle()`` and
hope they'll come out the same.
- Use the JSON after all.
- And do it in ``get_call_signature()``, not here.
- That means that any class we wish to decorate WITHOUT specifying a
cache key manually must support JSON.
"""
key = hashlib.md5(call_signature.encode("utf-8")).hexdigest()
if debug_cache:
log.debug("Making cache key {} from call_signature {!r}",
key, call_signature)
return key | python | def make_cache_key(call_signature: str,
debug_cache: bool = False) -> str:
"""
Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use as a cache key.
The string is an MD5 hash of the JSON-encoded call signature.
The logic behind these decisions is as follows:
- We have a bunch of components of arbitrary type, and we need to get
a unique string out.
- We shouldn't use ``str()``, because that is often poorly specified; e.g.
is ``'a.b.c'`` a ``TableId``, or is it a ``ColumnId`` with no ``'db'``
field?
- We could use ``repr()``: sometimes that gives us helpful things that
could in principle be passed to ``eval()``, in which case ``repr()`` would
be fine, but sometimes it doesn't, and gives unhelpful things like
``'<__main__.Thing object at 0x7ff3093ebda0>'``.
- However, if something encodes to JSON, that representation should
be reversible and thus contain the right sort of information.
- Note also that bound methods will come with a ``self`` argument, for
which the address may be very relevant...
- Let's go with ``repr()``. Users of the cache decorator should not pass
objects whose ``repr()`` includes the memory address of the object unless
they want those objects to be treated as distinct.
- Ah, no. The cache itself will pickle and unpickle things, and this
will change memory addresses of objects. So we can't store a reference
to an object using ``repr()`` and using ``cache.add()``/``pickle()`` and
hope they'll come out the same.
- Use the JSON after all.
- And do it in ``get_call_signature()``, not here.
- That means that any class we wish to decorate WITHOUT specifying a
cache key manually must support JSON.
"""
key = hashlib.md5(call_signature.encode("utf-8")).hexdigest()
if debug_cache:
log.debug("Making cache key {} from call_signature {!r}",
key, call_signature)
return key | [
"def",
"make_cache_key",
"(",
"call_signature",
":",
"str",
",",
"debug_cache",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"key",
"=",
"hashlib",
".",
"md5",
"(",
"call_signature",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
".",
"hexdigest",
"(",
")",
"if",
"debug_cache",
":",
"log",
".",
"debug",
"(",
"\"Making cache key {} from call_signature {!r}\"",
",",
"key",
",",
"call_signature",
")",
"return",
"key"
] | Takes a function and its args/kwargs, and produces a string description
of the function call (the call signature) suitable for use as a cache key.
The string is an MD5 hash of the JSON-encoded call signature.
The logic behind these decisions is as follows:
- We have a bunch of components of arbitrary type, and we need to get
a unique string out.
- We shouldn't use ``str()``, because that is often poorly specified; e.g.
is ``'a.b.c'`` a ``TableId``, or is it a ``ColumnId`` with no ``'db'``
field?
- We could use ``repr()``: sometimes that gives us helpful things that
could in principle be passed to ``eval()``, in which case ``repr()`` would
be fine, but sometimes it doesn't, and gives unhelpful things like
``'<__main__.Thing object at 0x7ff3093ebda0>'``.
- However, if something encodes to JSON, that representation should
be reversible and thus contain the right sort of information.
- Note also that bound methods will come with a ``self`` argument, for
which the address may be very relevant...
- Let's go with ``repr()``. Users of the cache decorator should not pass
objects whose ``repr()`` includes the memory address of the object unless
they want those objects to be treated as distinct.
- Ah, no. The cache itself will pickle and unpickle things, and this
will change memory addresses of objects. So we can't store a reference
to an object using ``repr()`` and using ``cache.add()``/``pickle()`` and
hope they'll come out the same.
- Use the JSON after all.
- And do it in ``get_call_signature()``, not here.
- That means that any class we wish to decorate WITHOUT specifying a
cache key manually must support JSON. | [
"Takes",
"a",
"function",
"and",
"its",
"args",
"/",
"kwargs",
"and",
"produces",
"a",
"string",
"description",
"of",
"the",
"function",
"call",
"(",
"the",
"call",
"signature",
")",
"suitable",
"for",
"use",
"as",
"a",
"cache",
"key",
".",
"The",
"string",
"is",
"an",
"MD5",
"hash",
"of",
"the",
"JSON",
"-",
"encoded",
"call",
"signature",
".",
"The",
"logic",
"behind",
"these",
"decisions",
"is",
"as",
"follows",
":"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/function_cache.py#L73-L110 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/function_cache.py | django_cache_function | def django_cache_function(timeout: int = 5 * 60,
cache_key: str = '',
debug_cache: bool = False):
"""
Decorator to add caching to a function in Django.
Uses the Django default cache.
Args:
timeout: timeout in seconds; use None for "never expire", as 0 means
"do not cache".
cache_key: optional cache key to use (if falsy, we'll invent one)
debug_cache: show hits/misses?
"""
cache_key = cache_key or None
def decorator(fn):
def wrapper(*args, **kwargs):
# - NOTE that Django returns None from cache.get() for "not in
# cache", so can't cache a None value;
# https://docs.djangoproject.com/en/1.10/topics/cache/#basic-usage # noqa
# - We need to store a bit more than just the function result
# anyway, to detect hash collisions when the user doesn't specify
# the cache_key, so we may as well use that format even if the
# user does specify the cache_key, and then we can store a None
# result properly as well.
if cache_key:
# User specified a cache key. This is easy.
call_sig = ''
_cache_key = cache_key
check_stored_call_sig = False
else:
# User didn't specify a cache key, so we'll do one
# automatically. Since we do this via a hash, there is a small
# but non-zero chance of a hash collision.
call_sig = get_call_signature(fn, args, kwargs)
_cache_key = make_cache_key(call_sig)
check_stored_call_sig = True
if debug_cache:
log.critical("Checking cache for key: " + _cache_key)
cache_result_tuple = cache.get(_cache_key) # TALKS TO CACHE HERE
if cache_result_tuple is None:
if debug_cache:
log.debug("Cache miss")
else:
if debug_cache:
log.debug("Cache hit")
cached_call_sig, func_result = cache_result_tuple
if (not check_stored_call_sig) or cached_call_sig == call_sig:
return func_result
log.warning(
"... Cache hit was due to hash collision; cached_call_sig "
"{} != call_sig {}".format(
repr(cached_call_sig), repr(call_sig)))
# If we get here, either it wasn't in the cache, or something
# was in the cache that matched by cache_key but was actually a
# hash collision. Either way, we must do the real work.
func_result = fn(*args, **kwargs)
cache_result_tuple = (call_sig, func_result)
cache.set(key=_cache_key, value=cache_result_tuple,
timeout=timeout) # TALKS TO CACHE HERE
return func_result
return wrapper
return decorator | python | def django_cache_function(timeout: int = 5 * 60,
cache_key: str = '',
debug_cache: bool = False):
"""
Decorator to add caching to a function in Django.
Uses the Django default cache.
Args:
timeout: timeout in seconds; use None for "never expire", as 0 means
"do not cache".
cache_key: optional cache key to use (if falsy, we'll invent one)
debug_cache: show hits/misses?
"""
cache_key = cache_key or None
def decorator(fn):
def wrapper(*args, **kwargs):
# - NOTE that Django returns None from cache.get() for "not in
# cache", so can't cache a None value;
# https://docs.djangoproject.com/en/1.10/topics/cache/#basic-usage # noqa
# - We need to store a bit more than just the function result
# anyway, to detect hash collisions when the user doesn't specify
# the cache_key, so we may as well use that format even if the
# user does specify the cache_key, and then we can store a None
# result properly as well.
if cache_key:
# User specified a cache key. This is easy.
call_sig = ''
_cache_key = cache_key
check_stored_call_sig = False
else:
# User didn't specify a cache key, so we'll do one
# automatically. Since we do this via a hash, there is a small
# but non-zero chance of a hash collision.
call_sig = get_call_signature(fn, args, kwargs)
_cache_key = make_cache_key(call_sig)
check_stored_call_sig = True
if debug_cache:
log.critical("Checking cache for key: " + _cache_key)
cache_result_tuple = cache.get(_cache_key) # TALKS TO CACHE HERE
if cache_result_tuple is None:
if debug_cache:
log.debug("Cache miss")
else:
if debug_cache:
log.debug("Cache hit")
cached_call_sig, func_result = cache_result_tuple
if (not check_stored_call_sig) or cached_call_sig == call_sig:
return func_result
log.warning(
"... Cache hit was due to hash collision; cached_call_sig "
"{} != call_sig {}".format(
repr(cached_call_sig), repr(call_sig)))
# If we get here, either it wasn't in the cache, or something
# was in the cache that matched by cache_key but was actually a
# hash collision. Either way, we must do the real work.
func_result = fn(*args, **kwargs)
cache_result_tuple = (call_sig, func_result)
cache.set(key=_cache_key, value=cache_result_tuple,
timeout=timeout) # TALKS TO CACHE HERE
return func_result
return wrapper
return decorator | [
"def",
"django_cache_function",
"(",
"timeout",
":",
"int",
"=",
"5",
"*",
"60",
",",
"cache_key",
":",
"str",
"=",
"''",
",",
"debug_cache",
":",
"bool",
"=",
"False",
")",
":",
"cache_key",
"=",
"cache_key",
"or",
"None",
"def",
"decorator",
"(",
"fn",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# - NOTE that Django returns None from cache.get() for \"not in",
"# cache\", so can't cache a None value;",
"# https://docs.djangoproject.com/en/1.10/topics/cache/#basic-usage # noqa",
"# - We need to store a bit more than just the function result",
"# anyway, to detect hash collisions when the user doesn't specify",
"# the cache_key, so we may as well use that format even if the",
"# user does specify the cache_key, and then we can store a None",
"# result properly as well.",
"if",
"cache_key",
":",
"# User specified a cache key. This is easy.",
"call_sig",
"=",
"''",
"_cache_key",
"=",
"cache_key",
"check_stored_call_sig",
"=",
"False",
"else",
":",
"# User didn't specify a cache key, so we'll do one",
"# automatically. Since we do this via a hash, there is a small",
"# but non-zero chance of a hash collision.",
"call_sig",
"=",
"get_call_signature",
"(",
"fn",
",",
"args",
",",
"kwargs",
")",
"_cache_key",
"=",
"make_cache_key",
"(",
"call_sig",
")",
"check_stored_call_sig",
"=",
"True",
"if",
"debug_cache",
":",
"log",
".",
"critical",
"(",
"\"Checking cache for key: \"",
"+",
"_cache_key",
")",
"cache_result_tuple",
"=",
"cache",
".",
"get",
"(",
"_cache_key",
")",
"# TALKS TO CACHE HERE",
"if",
"cache_result_tuple",
"is",
"None",
":",
"if",
"debug_cache",
":",
"log",
".",
"debug",
"(",
"\"Cache miss\"",
")",
"else",
":",
"if",
"debug_cache",
":",
"log",
".",
"debug",
"(",
"\"Cache hit\"",
")",
"cached_call_sig",
",",
"func_result",
"=",
"cache_result_tuple",
"if",
"(",
"not",
"check_stored_call_sig",
")",
"or",
"cached_call_sig",
"==",
"call_sig",
":",
"return",
"func_result",
"log",
".",
"warning",
"(",
"\"... Cache hit was due to hash collision; cached_call_sig \"",
"\"{} != call_sig {}\"",
".",
"format",
"(",
"repr",
"(",
"cached_call_sig",
")",
",",
"repr",
"(",
"call_sig",
")",
")",
")",
"# If we get here, either it wasn't in the cache, or something",
"# was in the cache that matched by cache_key but was actually a",
"# hash collision. Either way, we must do the real work.",
"func_result",
"=",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"cache_result_tuple",
"=",
"(",
"call_sig",
",",
"func_result",
")",
"cache",
".",
"set",
"(",
"key",
"=",
"_cache_key",
",",
"value",
"=",
"cache_result_tuple",
",",
"timeout",
"=",
"timeout",
")",
"# TALKS TO CACHE HERE",
"return",
"func_result",
"return",
"wrapper",
"return",
"decorator"
] | Decorator to add caching to a function in Django.
Uses the Django default cache.
Args:
timeout: timeout in seconds; use None for "never expire", as 0 means
"do not cache".
cache_key: optional cache key to use (if falsy, we'll invent one)
debug_cache: show hits/misses? | [
"Decorator",
"to",
"add",
"caching",
"to",
"a",
"function",
"in",
"Django",
".",
"Uses",
"the",
"Django",
"default",
"cache",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/function_cache.py#L113-L179 |
davenquinn/Attitude | attitude/coordinates/rotations.py | transform | def transform(v1, v2):
"""
Create an affine transformation matrix that maps vector 1
onto vector 2
https://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another
"""
theta = angle(v1,v2)
x = N.cross(v1,v2)
x = x / N.linalg.norm(x)
A = N.array([
[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
R = N.exp(A*theta)
return R | python | def transform(v1, v2):
"""
Create an affine transformation matrix that maps vector 1
onto vector 2
https://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another
"""
theta = angle(v1,v2)
x = N.cross(v1,v2)
x = x / N.linalg.norm(x)
A = N.array([
[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
R = N.exp(A*theta)
return R | [
"def",
"transform",
"(",
"v1",
",",
"v2",
")",
":",
"theta",
"=",
"angle",
"(",
"v1",
",",
"v2",
")",
"x",
"=",
"N",
".",
"cross",
"(",
"v1",
",",
"v2",
")",
"x",
"=",
"x",
"/",
"N",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
"A",
"=",
"N",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"x",
"[",
"2",
"]",
",",
"x",
"[",
"1",
"]",
"]",
",",
"[",
"x",
"[",
"2",
"]",
",",
"0",
",",
"-",
"x",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
",",
"0",
"]",
"]",
")",
"R",
"=",
"N",
".",
"exp",
"(",
"A",
"*",
"theta",
")",
"return",
"R"
] | Create an affine transformation matrix that maps vector 1
onto vector 2
https://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another | [
"Create",
"an",
"affine",
"transformation",
"matrix",
"that",
"maps",
"vector",
"1",
"onto",
"vector",
"2"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/coordinates/rotations.py#L11-L28 |
davenquinn/Attitude | attitude/coordinates/rotations.py | cartesian | def cartesian(lon, lat):
"""
Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian.
"""
return N.array([
N.cos(lat)*N.cos(lon),
N.cos(lat)*N.sin(lon),
N.sin(lat)
]) | python | def cartesian(lon, lat):
"""
Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian.
"""
return N.array([
N.cos(lat)*N.cos(lon),
N.cos(lat)*N.sin(lon),
N.sin(lat)
]) | [
"def",
"cartesian",
"(",
"lon",
",",
"lat",
")",
":",
"return",
"N",
".",
"array",
"(",
"[",
"N",
".",
"cos",
"(",
"lat",
")",
"*",
"N",
".",
"cos",
"(",
"lon",
")",
",",
"N",
".",
"cos",
"(",
"lat",
")",
"*",
"N",
".",
"sin",
"(",
"lon",
")",
",",
"N",
".",
"sin",
"(",
"lat",
")",
"]",
")"
] | Converts spherical positions in (lon, lat) to cartesian coordiantes [x,y,z].
For the purposes of this library's focus on orientations, this operates in a
*north = vertical* framework. That is, positions around the equator are in the
[x,y] plane, and dipping planes occur with higher latitudes.
This is intuitive for strike and dip representations, as it maps
(strike, dip) to (lon, lat). However, we note that it is distinct from the
traditional stereonet representation, which puts the X-Y plane along the prime
meridian. | [
"Converts",
"spherical",
"positions",
"in",
"(",
"lon",
"lat",
")",
"to",
"cartesian",
"coordiantes",
"[",
"x",
"y",
"z",
"]",
".",
"For",
"the",
"purposes",
"of",
"this",
"library",
"s",
"focus",
"on",
"orientations",
"this",
"operates",
"in",
"a",
"*",
"north",
"=",
"vertical",
"*",
"framework",
".",
"That",
"is",
"positions",
"around",
"the",
"equator",
"are",
"in",
"the",
"[",
"x",
"y",
"]",
"plane",
"and",
"dipping",
"planes",
"occur",
"with",
"higher",
"latitudes",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/coordinates/rotations.py#L40-L56 |
ivanprjcts/sdklib | sdklib/http/authorization.py | x_11paths_authorization | def x_11paths_authorization(app_id, secret, context, utc=None):
"""
Calculate the authentication headers to be sent with a request to the API.
:param app_id:
:param secret:
:param context
:param utc:
:return: array a map with the Authorization and Date headers needed to sign a Latch API request
"""
utc = utc or context.headers[X_11PATHS_DATE_HEADER_NAME]
url_path = ensure_url_path_starts_with_slash(context.url_path)
url_path_query = url_path
if context.query_params:
url_path_query += "?%s" % (url_encode(context.query_params, sort=True))
string_to_sign = (context.method.upper().strip() + "\n" +
utc + "\n" +
_get_11paths_serialized_headers(context.headers) + "\n" +
url_path_query.strip())
if context.body_params and isinstance(context.renderer, FormRenderer):
string_to_sign = string_to_sign + "\n" + url_encode(context.body_params, sort=True).replace("&", "")
authorization_header_value = (AUTHORIZATION_METHOD + AUTHORIZATION_HEADER_FIELD_SEPARATOR + app_id +
AUTHORIZATION_HEADER_FIELD_SEPARATOR + _sign_data(secret, string_to_sign))
return authorization_header_value | python | def x_11paths_authorization(app_id, secret, context, utc=None):
"""
Calculate the authentication headers to be sent with a request to the API.
:param app_id:
:param secret:
:param context
:param utc:
:return: array a map with the Authorization and Date headers needed to sign a Latch API request
"""
utc = utc or context.headers[X_11PATHS_DATE_HEADER_NAME]
url_path = ensure_url_path_starts_with_slash(context.url_path)
url_path_query = url_path
if context.query_params:
url_path_query += "?%s" % (url_encode(context.query_params, sort=True))
string_to_sign = (context.method.upper().strip() + "\n" +
utc + "\n" +
_get_11paths_serialized_headers(context.headers) + "\n" +
url_path_query.strip())
if context.body_params and isinstance(context.renderer, FormRenderer):
string_to_sign = string_to_sign + "\n" + url_encode(context.body_params, sort=True).replace("&", "")
authorization_header_value = (AUTHORIZATION_METHOD + AUTHORIZATION_HEADER_FIELD_SEPARATOR + app_id +
AUTHORIZATION_HEADER_FIELD_SEPARATOR + _sign_data(secret, string_to_sign))
return authorization_header_value | [
"def",
"x_11paths_authorization",
"(",
"app_id",
",",
"secret",
",",
"context",
",",
"utc",
"=",
"None",
")",
":",
"utc",
"=",
"utc",
"or",
"context",
".",
"headers",
"[",
"X_11PATHS_DATE_HEADER_NAME",
"]",
"url_path",
"=",
"ensure_url_path_starts_with_slash",
"(",
"context",
".",
"url_path",
")",
"url_path_query",
"=",
"url_path",
"if",
"context",
".",
"query_params",
":",
"url_path_query",
"+=",
"\"?%s\"",
"%",
"(",
"url_encode",
"(",
"context",
".",
"query_params",
",",
"sort",
"=",
"True",
")",
")",
"string_to_sign",
"=",
"(",
"context",
".",
"method",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"+",
"\"\\n\"",
"+",
"utc",
"+",
"\"\\n\"",
"+",
"_get_11paths_serialized_headers",
"(",
"context",
".",
"headers",
")",
"+",
"\"\\n\"",
"+",
"url_path_query",
".",
"strip",
"(",
")",
")",
"if",
"context",
".",
"body_params",
"and",
"isinstance",
"(",
"context",
".",
"renderer",
",",
"FormRenderer",
")",
":",
"string_to_sign",
"=",
"string_to_sign",
"+",
"\"\\n\"",
"+",
"url_encode",
"(",
"context",
".",
"body_params",
",",
"sort",
"=",
"True",
")",
".",
"replace",
"(",
"\"&\"",
",",
"\"\"",
")",
"authorization_header_value",
"=",
"(",
"AUTHORIZATION_METHOD",
"+",
"AUTHORIZATION_HEADER_FIELD_SEPARATOR",
"+",
"app_id",
"+",
"AUTHORIZATION_HEADER_FIELD_SEPARATOR",
"+",
"_sign_data",
"(",
"secret",
",",
"string_to_sign",
")",
")",
"return",
"authorization_header_value"
] | Calculate the authentication headers to be sent with a request to the API.
:param app_id:
:param secret:
:param context
:param utc:
:return: array a map with the Authorization and Date headers needed to sign a Latch API request | [
"Calculate",
"the",
"authentication",
"headers",
"to",
"be",
"sent",
"with",
"a",
"request",
"to",
"the",
"API",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/authorization.py#L35-L63 |
ivanprjcts/sdklib | sdklib/http/authorization.py | _sign_data | def _sign_data(secret, data):
"""
Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
"""
sha1_hash = hmac.new(secret.encode(), data.encode(), sha1)
return binascii.b2a_base64(sha1_hash.digest())[:-1].decode('utf8') | python | def _sign_data(secret, data):
"""
Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key.
"""
sha1_hash = hmac.new(secret.encode(), data.encode(), sha1)
return binascii.b2a_base64(sha1_hash.digest())[:-1].decode('utf8') | [
"def",
"_sign_data",
"(",
"secret",
",",
"data",
")",
":",
"sha1_hash",
"=",
"hmac",
".",
"new",
"(",
"secret",
".",
"encode",
"(",
")",
",",
"data",
".",
"encode",
"(",
")",
",",
"sha1",
")",
"return",
"binascii",
".",
"b2a_base64",
"(",
"sha1_hash",
".",
"digest",
"(",
")",
")",
"[",
":",
"-",
"1",
"]",
".",
"decode",
"(",
"'utf8'",
")"
] | Sign data.
:param data: the string to sign
:return: string base64 encoding of the HMAC-SHA1 hash of the data parameter using {@code secretKey} as cipher key. | [
"Sign",
"data",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/authorization.py#L66-L74 |
ivanprjcts/sdklib | sdklib/http/authorization.py | _get_11paths_serialized_headers | def _get_11paths_serialized_headers(x_headers):
"""
Prepares and returns a string ready to be signed from the 11-paths specific HTTP headers received.
:param x_headers: a non necessarily ordered map (array without duplicates) of the HTTP headers to be ordered.
:return: string The serialized headers, an empty string if no headers are passed, or None if there's a problem such
as non 11paths specific headers
"""
if x_headers:
headers = to_key_val_list(x_headers, sort=True, insensitive=True)
serialized_headers = ""
for key, value in headers:
if key.lower().startswith(X_11PATHS_HEADER_PREFIX.lower()) and \
key.lower() != X_11PATHS_DATE_HEADER_NAME.lower():
serialized_headers += key.lower() + X_11PATHS_HEADER_SEPARATOR + value.replace("\n", " ") + " "
return serialized_headers.strip()
else:
return "" | python | def _get_11paths_serialized_headers(x_headers):
"""
Prepares and returns a string ready to be signed from the 11-paths specific HTTP headers received.
:param x_headers: a non necessarily ordered map (array without duplicates) of the HTTP headers to be ordered.
:return: string The serialized headers, an empty string if no headers are passed, or None if there's a problem such
as non 11paths specific headers
"""
if x_headers:
headers = to_key_val_list(x_headers, sort=True, insensitive=True)
serialized_headers = ""
for key, value in headers:
if key.lower().startswith(X_11PATHS_HEADER_PREFIX.lower()) and \
key.lower() != X_11PATHS_DATE_HEADER_NAME.lower():
serialized_headers += key.lower() + X_11PATHS_HEADER_SEPARATOR + value.replace("\n", " ") + " "
return serialized_headers.strip()
else:
return "" | [
"def",
"_get_11paths_serialized_headers",
"(",
"x_headers",
")",
":",
"if",
"x_headers",
":",
"headers",
"=",
"to_key_val_list",
"(",
"x_headers",
",",
"sort",
"=",
"True",
",",
"insensitive",
"=",
"True",
")",
"serialized_headers",
"=",
"\"\"",
"for",
"key",
",",
"value",
"in",
"headers",
":",
"if",
"key",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"X_11PATHS_HEADER_PREFIX",
".",
"lower",
"(",
")",
")",
"and",
"key",
".",
"lower",
"(",
")",
"!=",
"X_11PATHS_DATE_HEADER_NAME",
".",
"lower",
"(",
")",
":",
"serialized_headers",
"+=",
"key",
".",
"lower",
"(",
")",
"+",
"X_11PATHS_HEADER_SEPARATOR",
"+",
"value",
".",
"replace",
"(",
"\"\\n\"",
",",
"\" \"",
")",
"+",
"\" \"",
"return",
"serialized_headers",
".",
"strip",
"(",
")",
"else",
":",
"return",
"\"\""
] | Prepares and returns a string ready to be signed from the 11-paths specific HTTP headers received.
:param x_headers: a non necessarily ordered map (array without duplicates) of the HTTP headers to be ordered.
:return: string The serialized headers, an empty string if no headers are passed, or None if there's a problem such
as non 11paths specific headers | [
"Prepares",
"and",
"returns",
"a",
"string",
"ready",
"to",
"be",
"signed",
"from",
"the",
"11",
"-",
"paths",
"specific",
"HTTP",
"headers",
"received",
"."
] | train | https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/authorization.py#L96-L113 |
davenquinn/Attitude | attitude/geom/transform.py | rotate_2D | def rotate_2D(angle):
"""
Returns a 2x2 transformation matrix to rotate
by an angle in two dimensions
"""
return N.array([[N.cos(angle),-N.sin(angle)],
[N.sin(angle),N.cos(angle)]]) | python | def rotate_2D(angle):
"""
Returns a 2x2 transformation matrix to rotate
by an angle in two dimensions
"""
return N.array([[N.cos(angle),-N.sin(angle)],
[N.sin(angle),N.cos(angle)]]) | [
"def",
"rotate_2D",
"(",
"angle",
")",
":",
"return",
"N",
".",
"array",
"(",
"[",
"[",
"N",
".",
"cos",
"(",
"angle",
")",
",",
"-",
"N",
".",
"sin",
"(",
"angle",
")",
"]",
",",
"[",
"N",
".",
"sin",
"(",
"angle",
")",
",",
"N",
".",
"cos",
"(",
"angle",
")",
"]",
"]",
")"
] | Returns a 2x2 transformation matrix to rotate
by an angle in two dimensions | [
"Returns",
"a",
"2x2",
"transformation",
"matrix",
"to",
"rotate",
"by",
"an",
"angle",
"in",
"two",
"dimensions"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/geom/transform.py#L3-L9 |
davenquinn/Attitude | attitude/display/hyperbola.py | apparent_dip_correction | def apparent_dip_correction(axes):
"""
Produces a two-dimensional rotation matrix that
rotates a projected dataset to correct for apparent dip
"""
a1 = axes[0].copy()
a1[-1] = 0
cosa = angle(axes[0],a1,cos=True)
_ = 1-cosa**2
if _ > 1e-12:
sina = N.sqrt(_)
if cosa < 0:
sina *= -1
# Construct rotation matrix
R= N.array([[cosa,sina],[-sina,cosa]])
else:
# Small angle, don't bother
# (small angles can lead to spurious results)
R = N.identity(2)
#if axes[0,0] < 0:
# return R.T
#else:
return R | python | def apparent_dip_correction(axes):
"""
Produces a two-dimensional rotation matrix that
rotates a projected dataset to correct for apparent dip
"""
a1 = axes[0].copy()
a1[-1] = 0
cosa = angle(axes[0],a1,cos=True)
_ = 1-cosa**2
if _ > 1e-12:
sina = N.sqrt(_)
if cosa < 0:
sina *= -1
# Construct rotation matrix
R= N.array([[cosa,sina],[-sina,cosa]])
else:
# Small angle, don't bother
# (small angles can lead to spurious results)
R = N.identity(2)
#if axes[0,0] < 0:
# return R.T
#else:
return R | [
"def",
"apparent_dip_correction",
"(",
"axes",
")",
":",
"a1",
"=",
"axes",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"a1",
"[",
"-",
"1",
"]",
"=",
"0",
"cosa",
"=",
"angle",
"(",
"axes",
"[",
"0",
"]",
",",
"a1",
",",
"cos",
"=",
"True",
")",
"_",
"=",
"1",
"-",
"cosa",
"**",
"2",
"if",
"_",
">",
"1e-12",
":",
"sina",
"=",
"N",
".",
"sqrt",
"(",
"_",
")",
"if",
"cosa",
"<",
"0",
":",
"sina",
"*=",
"-",
"1",
"# Construct rotation matrix",
"R",
"=",
"N",
".",
"array",
"(",
"[",
"[",
"cosa",
",",
"sina",
"]",
",",
"[",
"-",
"sina",
",",
"cosa",
"]",
"]",
")",
"else",
":",
"# Small angle, don't bother",
"# (small angles can lead to spurious results)",
"R",
"=",
"N",
".",
"identity",
"(",
"2",
")",
"#if axes[0,0] < 0:",
"# return R.T",
"#else:",
"return",
"R"
] | Produces a two-dimensional rotation matrix that
rotates a projected dataset to correct for apparent dip | [
"Produces",
"a",
"two",
"-",
"dimensional",
"rotation",
"matrix",
"that",
"rotates",
"a",
"projected",
"dataset",
"to",
"correct",
"for",
"apparent",
"dip"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/hyperbola.py#L9-L31 |
davenquinn/Attitude | attitude/display/hyperbola.py | hyperbolic_errors | def hyperbolic_errors(hyp_axes, xvals,
transformation=None, axes=None,
means=None, correct_apparent_dip=True,
reverse=False):
"""
Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
if means is None:
means = N.array([0,0])
arr = augment_tensor(N.diag(hyp_axes))
# Transform ellipsoid to dual hyperboloid
hyp = conic(arr).dual()
if len(hyp_axes) == 3:
# Three_dimensional case
if transformation is None:
transformation = N.identity(3)
if axes is None:
axes = N.array([[0,1,0],[0,0,1]])
hyp = hyp.transform(augment_tensor(transformation))
n_ = N.cross(axes[0],axes[1])
# Create a plane containing the two axes specified
# in the function call
p = plane(n_) # no offset (goes through origin)
h1 = hyp.slice(p, axes=axes)[0]
else:
# We have a 2d geometry
h1 = hyp
# Major axes of the conic sliced in the requested viewing
# geometry
A = N.sqrt(h1.semiaxes())
yvals = A[1]*N.cosh(N.arcsinh(xvals/A[0]))
vals = N.array([xvals,yvals]).transpose()
nom = N.array([xvals,N.zeros(xvals.shape)]).transpose()
# Rotate the whole result if the PCA axes aren't aligned to the
# major axes of the view coordinate system
ax1 = apparent_dip_correction(axes)
# This is a dirty hack to flip things left to right
if reverse:
ax1 = ax1.T
# Top
t = dot(vals,ax1).T+means[:,N.newaxis]
# Btm
vals[:,-1] *= -1
b = dot(vals,ax1).T+means[:,N.newaxis]
nom = dot(nom,ax1).T+means[:,N.newaxis]
return nom, b, t[:,::-1] | python | def hyperbolic_errors(hyp_axes, xvals,
transformation=None, axes=None,
means=None, correct_apparent_dip=True,
reverse=False):
"""
Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
if means is None:
means = N.array([0,0])
arr = augment_tensor(N.diag(hyp_axes))
# Transform ellipsoid to dual hyperboloid
hyp = conic(arr).dual()
if len(hyp_axes) == 3:
# Three_dimensional case
if transformation is None:
transformation = N.identity(3)
if axes is None:
axes = N.array([[0,1,0],[0,0,1]])
hyp = hyp.transform(augment_tensor(transformation))
n_ = N.cross(axes[0],axes[1])
# Create a plane containing the two axes specified
# in the function call
p = plane(n_) # no offset (goes through origin)
h1 = hyp.slice(p, axes=axes)[0]
else:
# We have a 2d geometry
h1 = hyp
# Major axes of the conic sliced in the requested viewing
# geometry
A = N.sqrt(h1.semiaxes())
yvals = A[1]*N.cosh(N.arcsinh(xvals/A[0]))
vals = N.array([xvals,yvals]).transpose()
nom = N.array([xvals,N.zeros(xvals.shape)]).transpose()
# Rotate the whole result if the PCA axes aren't aligned to the
# major axes of the view coordinate system
ax1 = apparent_dip_correction(axes)
# This is a dirty hack to flip things left to right
if reverse:
ax1 = ax1.T
# Top
t = dot(vals,ax1).T+means[:,N.newaxis]
# Btm
vals[:,-1] *= -1
b = dot(vals,ax1).T+means[:,N.newaxis]
nom = dot(nom,ax1).T+means[:,N.newaxis]
return nom, b, t[:,::-1] | [
"def",
"hyperbolic_errors",
"(",
"hyp_axes",
",",
"xvals",
",",
"transformation",
"=",
"None",
",",
"axes",
"=",
"None",
",",
"means",
"=",
"None",
",",
"correct_apparent_dip",
"=",
"True",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"means",
"is",
"None",
":",
"means",
"=",
"N",
".",
"array",
"(",
"[",
"0",
",",
"0",
"]",
")",
"arr",
"=",
"augment_tensor",
"(",
"N",
".",
"diag",
"(",
"hyp_axes",
")",
")",
"# Transform ellipsoid to dual hyperboloid",
"hyp",
"=",
"conic",
"(",
"arr",
")",
".",
"dual",
"(",
")",
"if",
"len",
"(",
"hyp_axes",
")",
"==",
"3",
":",
"# Three_dimensional case",
"if",
"transformation",
"is",
"None",
":",
"transformation",
"=",
"N",
".",
"identity",
"(",
"3",
")",
"if",
"axes",
"is",
"None",
":",
"axes",
"=",
"N",
".",
"array",
"(",
"[",
"[",
"0",
",",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"0",
",",
"1",
"]",
"]",
")",
"hyp",
"=",
"hyp",
".",
"transform",
"(",
"augment_tensor",
"(",
"transformation",
")",
")",
"n_",
"=",
"N",
".",
"cross",
"(",
"axes",
"[",
"0",
"]",
",",
"axes",
"[",
"1",
"]",
")",
"# Create a plane containing the two axes specified",
"# in the function call",
"p",
"=",
"plane",
"(",
"n_",
")",
"# no offset (goes through origin)",
"h1",
"=",
"hyp",
".",
"slice",
"(",
"p",
",",
"axes",
"=",
"axes",
")",
"[",
"0",
"]",
"else",
":",
"# We have a 2d geometry",
"h1",
"=",
"hyp",
"# Major axes of the conic sliced in the requested viewing",
"# geometry",
"A",
"=",
"N",
".",
"sqrt",
"(",
"h1",
".",
"semiaxes",
"(",
")",
")",
"yvals",
"=",
"A",
"[",
"1",
"]",
"*",
"N",
".",
"cosh",
"(",
"N",
".",
"arcsinh",
"(",
"xvals",
"/",
"A",
"[",
"0",
"]",
")",
")",
"vals",
"=",
"N",
".",
"array",
"(",
"[",
"xvals",
",",
"yvals",
"]",
")",
".",
"transpose",
"(",
")",
"nom",
"=",
"N",
".",
"array",
"(",
"[",
"xvals",
",",
"N",
".",
"zeros",
"(",
"xvals",
".",
"shape",
")",
"]",
")",
".",
"transpose",
"(",
")",
"# Rotate the whole result if the PCA axes aren't aligned to the",
"# major axes of the view coordinate system",
"ax1",
"=",
"apparent_dip_correction",
"(",
"axes",
")",
"# This is a dirty hack to flip things left to right",
"if",
"reverse",
":",
"ax1",
"=",
"ax1",
".",
"T",
"# Top",
"t",
"=",
"dot",
"(",
"vals",
",",
"ax1",
")",
".",
"T",
"+",
"means",
"[",
":",
",",
"N",
".",
"newaxis",
"]",
"# Btm",
"vals",
"[",
":",
",",
"-",
"1",
"]",
"*=",
"-",
"1",
"b",
"=",
"dot",
"(",
"vals",
",",
"ax1",
")",
".",
"T",
"+",
"means",
"[",
":",
",",
"N",
".",
"newaxis",
"]",
"nom",
"=",
"dot",
"(",
"nom",
",",
"ax1",
")",
".",
"T",
"+",
"means",
"[",
":",
",",
"N",
".",
"newaxis",
"]",
"return",
"nom",
",",
"b",
",",
"t",
"[",
":",
",",
":",
":",
"-",
"1",
"]"
] | Returns a function that can be used to create a view of the
hyperbolic error ellipse from a specific direction.
This creates a hyperbolic quadric and slices it to form a conic
on a 2d cartesian plane aligned with the requested direction.
A function is returned that takes x values (distance along nominal
line) and returns y values (width of error hyperbola)
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data | [
"Returns",
"a",
"function",
"that",
"can",
"be",
"used",
"to",
"create",
"a",
"view",
"of",
"the",
"hyperbolic",
"error",
"ellipse",
"from",
"a",
"specific",
"direction",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/hyperbola.py#L33-L103 |
davenquinn/Attitude | attitude/display/hyperbola.py | hyperbola_values | def hyperbola_values(hyp_axes, xvals):
"""
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
A = N.sqrt(hyp_axes)
return A[1]*N.cosh(N.arcsinh(xvals/A[0])) | python | def hyperbola_values(hyp_axes, xvals):
"""
kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data
"""
A = N.sqrt(hyp_axes)
return A[1]*N.cosh(N.arcsinh(xvals/A[0])) | [
"def",
"hyperbola_values",
"(",
"hyp_axes",
",",
"xvals",
")",
":",
"A",
"=",
"N",
".",
"sqrt",
"(",
"hyp_axes",
")",
"return",
"A",
"[",
"1",
"]",
"*",
"N",
".",
"cosh",
"(",
"N",
".",
"arcsinh",
"(",
"xvals",
"/",
"A",
"[",
"0",
"]",
")",
")"
] | kwargs:
transformation rotation to apply to quadric prior to slicing
(e.g. transformation into 'world' coordinates
axes axes on which to slice the data | [
"kwargs",
":",
"transformation",
"rotation",
"to",
"apply",
"to",
"quadric",
"prior",
"to",
"slicing",
"(",
"e",
".",
"g",
".",
"transformation",
"into",
"world",
"coordinates",
"axes",
"axes",
"on",
"which",
"to",
"slice",
"the",
"data"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/hyperbola.py#L193-L201 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sizeformatter.py | sizeof_fmt | def sizeof_fmt(num: float, suffix: str = 'B') -> str:
"""
Formats a number of bytes in a human-readable binary format (e.g. ``2048``
becomes ``'2 KiB'``); from http://stackoverflow.com/questions/1094841.
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | python | def sizeof_fmt(num: float, suffix: str = 'B') -> str:
"""
Formats a number of bytes in a human-readable binary format (e.g. ``2048``
becomes ``'2 KiB'``); from http://stackoverflow.com/questions/1094841.
"""
for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | [
"def",
"sizeof_fmt",
"(",
"num",
":",
"float",
",",
"suffix",
":",
"str",
"=",
"'B'",
")",
"->",
"str",
":",
"for",
"unit",
"in",
"(",
"''",
",",
"'Ki'",
",",
"'Mi'",
",",
"'Gi'",
",",
"'Ti'",
",",
"'Pi'",
",",
"'Ei'",
",",
"'Zi'",
")",
":",
"if",
"abs",
"(",
"num",
")",
"<",
"1024.0",
":",
"return",
"\"%3.1f%s%s\"",
"%",
"(",
"num",
",",
"unit",
",",
"suffix",
")",
"num",
"/=",
"1024.0",
"return",
"\"%.1f%s%s\"",
"%",
"(",
"num",
",",
"'Yi'",
",",
"suffix",
")"
] | Formats a number of bytes in a human-readable binary format (e.g. ``2048``
becomes ``'2 KiB'``); from http://stackoverflow.com/questions/1094841. | [
"Formats",
"a",
"number",
"of",
"bytes",
"in",
"a",
"human",
"-",
"readable",
"binary",
"format",
"(",
"e",
".",
"g",
".",
"2048",
"becomes",
"2",
"KiB",
")",
";",
"from",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"1094841",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sizeformatter.py#L29-L38 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sizeformatter.py | bytes2human | def bytes2human(n: Union[int, float],
format: str = '%(value).1f %(symbol)s',
symbols: str = 'customary') -> str:
"""
Converts a number of bytes into a human-readable format.
From http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Args:
n: number of bytes
format: a format specification string
symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"``
or ``"iec_ext"``; see http://goo.gl/kTQMs
Returns:
the formatted number
Examples:
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
""" # noqa
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n) | python | def bytes2human(n: Union[int, float],
format: str = '%(value).1f %(symbol)s',
symbols: str = 'customary') -> str:
"""
Converts a number of bytes into a human-readable format.
From http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Args:
n: number of bytes
format: a format specification string
symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"``
or ``"iec_ext"``; see http://goo.gl/kTQMs
Returns:
the formatted number
Examples:
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
""" # noqa
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n) | [
"def",
"bytes2human",
"(",
"n",
":",
"Union",
"[",
"int",
",",
"float",
"]",
",",
"format",
":",
"str",
"=",
"'%(value).1f %(symbol)s'",
",",
"symbols",
":",
"str",
"=",
"'customary'",
")",
"->",
"str",
":",
"# noqa",
"n",
"=",
"int",
"(",
"n",
")",
"if",
"n",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"n < 0\"",
")",
"symbols",
"=",
"SYMBOLS",
"[",
"symbols",
"]",
"prefix",
"=",
"{",
"}",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"symbols",
"[",
"1",
":",
"]",
")",
":",
"prefix",
"[",
"s",
"]",
"=",
"1",
"<<",
"(",
"i",
"+",
"1",
")",
"*",
"10",
"for",
"symbol",
"in",
"reversed",
"(",
"symbols",
"[",
"1",
":",
"]",
")",
":",
"if",
"n",
">=",
"prefix",
"[",
"symbol",
"]",
":",
"value",
"=",
"float",
"(",
"n",
")",
"/",
"prefix",
"[",
"symbol",
"]",
"return",
"format",
"%",
"locals",
"(",
")",
"return",
"format",
"%",
"dict",
"(",
"symbol",
"=",
"symbols",
"[",
"0",
"]",
",",
"value",
"=",
"n",
")"
] | Converts a number of bytes into a human-readable format.
From http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Args:
n: number of bytes
format: a format specification string
symbols: can be one of ``"customary"``, ``"customary_ext"``, ``"iec"``
or ``"iec_ext"``; see http://goo.gl/kTQMs
Returns:
the formatted number
Examples:
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K' | [
"Converts",
"a",
"number",
"of",
"bytes",
"into",
"a",
"human",
"-",
"readable",
"format",
".",
"From",
"http",
":",
"//",
"code",
".",
"activestate",
".",
"com",
"/",
"recipes",
"/",
"578019",
"-",
"bytes",
"-",
"to",
"-",
"human",
"-",
"human",
"-",
"to",
"-",
"bytes",
"-",
"converter",
"/",
".",
"Args",
":",
"n",
":",
"number",
"of",
"bytes",
"format",
":",
"a",
"format",
"specification",
"string",
"symbols",
":",
"can",
"be",
"one",
"of",
"customary",
"customary_ext",
"iec",
"or",
"iec_ext",
";",
"see",
"http",
":",
"//",
"goo",
".",
"gl",
"/",
"kTQMs"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sizeformatter.py#L52-L113 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sizeformatter.py | human2bytes | def human2bytes(s: str) -> int:
"""
Modified from
http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format, :exc:`ValueError` is raised.
>>> human2bytes('0 B')
0
>>> human2bytes('1 K')
1024
>>> human2bytes('1 M')
1048576
>>> human2bytes('1 Gi')
1073741824
>>> human2bytes('1 tera')
1099511627776
>>> human2bytes('0.5kilo')
512
>>> human2bytes('0.1 byte')
0
>>> human2bytes('1 k') # k is an alias for K
1024
>>> human2bytes('12 foo')
Traceback (most recent call last):
...
ValueError: can't interpret '12 foo'
""" # noqa
if not s:
raise ValueError("Can't interpret {!r} as integer".format(s))
try:
return int(s)
except ValueError:
pass
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
letter = s.strip()
for name, sset in SYMBOLS.items():
if letter in sset:
break
else:
if letter == 'k':
# treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa
sset = SYMBOLS['customary']
letter = letter.upper()
else:
raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) | python | def human2bytes(s: str) -> int:
"""
Modified from
http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format, :exc:`ValueError` is raised.
>>> human2bytes('0 B')
0
>>> human2bytes('1 K')
1024
>>> human2bytes('1 M')
1048576
>>> human2bytes('1 Gi')
1073741824
>>> human2bytes('1 tera')
1099511627776
>>> human2bytes('0.5kilo')
512
>>> human2bytes('0.1 byte')
0
>>> human2bytes('1 k') # k is an alias for K
1024
>>> human2bytes('12 foo')
Traceback (most recent call last):
...
ValueError: can't interpret '12 foo'
""" # noqa
if not s:
raise ValueError("Can't interpret {!r} as integer".format(s))
try:
return int(s)
except ValueError:
pass
init = s
num = ""
while s and s[0:1].isdigit() or s[0:1] == '.':
num += s[0]
s = s[1:]
num = float(num)
letter = s.strip()
for name, sset in SYMBOLS.items():
if letter in sset:
break
else:
if letter == 'k':
# treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa
sset = SYMBOLS['customary']
letter = letter.upper()
else:
raise ValueError("can't interpret %r" % init)
prefix = {sset[0]: 1}
for i, s in enumerate(sset[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) | [
"def",
"human2bytes",
"(",
"s",
":",
"str",
")",
"->",
"int",
":",
"# noqa",
"if",
"not",
"s",
":",
"raise",
"ValueError",
"(",
"\"Can't interpret {!r} as integer\"",
".",
"format",
"(",
"s",
")",
")",
"try",
":",
"return",
"int",
"(",
"s",
")",
"except",
"ValueError",
":",
"pass",
"init",
"=",
"s",
"num",
"=",
"\"\"",
"while",
"s",
"and",
"s",
"[",
"0",
":",
"1",
"]",
".",
"isdigit",
"(",
")",
"or",
"s",
"[",
"0",
":",
"1",
"]",
"==",
"'.'",
":",
"num",
"+=",
"s",
"[",
"0",
"]",
"s",
"=",
"s",
"[",
"1",
":",
"]",
"num",
"=",
"float",
"(",
"num",
")",
"letter",
"=",
"s",
".",
"strip",
"(",
")",
"for",
"name",
",",
"sset",
"in",
"SYMBOLS",
".",
"items",
"(",
")",
":",
"if",
"letter",
"in",
"sset",
":",
"break",
"else",
":",
"if",
"letter",
"==",
"'k'",
":",
"# treat 'k' as an alias for 'K' as per https://en.wikipedia.org/wiki/Binary_prefix # noqa",
"sset",
"=",
"SYMBOLS",
"[",
"'customary'",
"]",
"letter",
"=",
"letter",
".",
"upper",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"can't interpret %r\"",
"%",
"init",
")",
"prefix",
"=",
"{",
"sset",
"[",
"0",
"]",
":",
"1",
"}",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"sset",
"[",
"1",
":",
"]",
")",
":",
"prefix",
"[",
"s",
"]",
"=",
"1",
"<<",
"(",
"i",
"+",
"1",
")",
"*",
"10",
"return",
"int",
"(",
"num",
"*",
"prefix",
"[",
"letter",
"]",
")"
] | Modified from
http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/.
Attempts to guess the string format based on default symbols
set and return the corresponding bytes as an integer.
When unable to recognize the format, :exc:`ValueError` is raised.
>>> human2bytes('0 B')
0
>>> human2bytes('1 K')
1024
>>> human2bytes('1 M')
1048576
>>> human2bytes('1 Gi')
1073741824
>>> human2bytes('1 tera')
1099511627776
>>> human2bytes('0.5kilo')
512
>>> human2bytes('0.1 byte')
0
>>> human2bytes('1 k') # k is an alias for K
1024
>>> human2bytes('12 foo')
Traceback (most recent call last):
...
ValueError: can't interpret '12 foo' | [
"Modified",
"from",
"http",
":",
"//",
"code",
".",
"activestate",
".",
"com",
"/",
"recipes",
"/",
"578019",
"-",
"bytes",
"-",
"to",
"-",
"human",
"-",
"human",
"-",
"to",
"-",
"bytes",
"-",
"converter",
"/",
".",
"Attempts",
"to",
"guess",
"the",
"string",
"format",
"based",
"on",
"default",
"symbols",
"set",
"and",
"return",
"the",
"corresponding",
"bytes",
"as",
"an",
"integer",
".",
"When",
"unable",
"to",
"recognize",
"the",
"format",
":",
"exc",
":",
"ValueError",
"is",
"raised",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sizeformatter.py#L116-L173 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | get_all_dependencies | def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
"""
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
"""
extra_dependencies = extra_dependencies or [] # type: List[TableDependency] # noqa
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values()) # type: List[Table]
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
# noinspection PyProtectedMember
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies | python | def get_all_dependencies(metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependency]:
"""
Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method.
"""
extra_dependencies = extra_dependencies or [] # type: List[TableDependency] # noqa
for td in extra_dependencies:
td.set_metadata_if_none(metadata)
dependencies = set([td.sqla_tuple() for td in extra_dependencies])
tables = list(metadata.tables.values()) # type: List[Table]
for table in tables:
for fkc in table.foreign_key_constraints:
if fkc.use_alter is True:
# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa
continue
dependent_on = fkc.referred_table
if dependent_on is not table:
dependencies.add((dependent_on, table))
if hasattr(table, "_extra_dependencies"):
# noinspection PyProtectedMember
dependencies.update(
(parent, table) for parent in table._extra_dependencies
)
dependencies = [
TableDependency(parent_table=parent, child_table=child)
for parent, child in dependencies
]
if sort:
dependencies.sort(key=lambda td_: (td_.parent_tablename,
td_.child_tablename))
return dependencies | [
"def",
"get_all_dependencies",
"(",
"metadata",
":",
"MetaData",
",",
"extra_dependencies",
":",
"List",
"[",
"TableDependency",
"]",
"=",
"None",
",",
"sort",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"TableDependency",
"]",
":",
"extra_dependencies",
"=",
"extra_dependencies",
"or",
"[",
"]",
"# type: List[TableDependency] # noqa",
"for",
"td",
"in",
"extra_dependencies",
":",
"td",
".",
"set_metadata_if_none",
"(",
"metadata",
")",
"dependencies",
"=",
"set",
"(",
"[",
"td",
".",
"sqla_tuple",
"(",
")",
"for",
"td",
"in",
"extra_dependencies",
"]",
")",
"tables",
"=",
"list",
"(",
"metadata",
".",
"tables",
".",
"values",
"(",
")",
")",
"# type: List[Table]",
"for",
"table",
"in",
"tables",
":",
"for",
"fkc",
"in",
"table",
".",
"foreign_key_constraints",
":",
"if",
"fkc",
".",
"use_alter",
"is",
"True",
":",
"# http://docs.sqlalchemy.org/en/latest/core/constraints.html#sqlalchemy.schema.ForeignKeyConstraint.params.use_alter # noqa",
"continue",
"dependent_on",
"=",
"fkc",
".",
"referred_table",
"if",
"dependent_on",
"is",
"not",
"table",
":",
"dependencies",
".",
"add",
"(",
"(",
"dependent_on",
",",
"table",
")",
")",
"if",
"hasattr",
"(",
"table",
",",
"\"_extra_dependencies\"",
")",
":",
"# noinspection PyProtectedMember",
"dependencies",
".",
"update",
"(",
"(",
"parent",
",",
"table",
")",
"for",
"parent",
"in",
"table",
".",
"_extra_dependencies",
")",
"dependencies",
"=",
"[",
"TableDependency",
"(",
"parent_table",
"=",
"parent",
",",
"child_table",
"=",
"child",
")",
"for",
"parent",
",",
"child",
"in",
"dependencies",
"]",
"if",
"sort",
":",
"dependencies",
".",
"sort",
"(",
"key",
"=",
"lambda",
"td_",
":",
"(",
"td_",
".",
"parent_tablename",
",",
"td_",
".",
"child_tablename",
")",
")",
"return",
"dependencies"
] | Describes how the tables found in the metadata depend on each other.
(If table B contains a foreign key to table A, for example, then B depends
on A.)
Args:
metadata: the metadata to inspect
extra_dependencies: additional table dependencies to specify manually
sort: sort into alphabetical order of (parent, child) table names?
Returns:
a list of :class:`TableDependency` objects
See :func:`sort_tables_and_constraints` for method. | [
"Describes",
"how",
"the",
"tables",
"found",
"in",
"the",
"metadata",
"depend",
"on",
"each",
"other",
".",
"(",
"If",
"table",
"B",
"contains",
"a",
"foreign",
"key",
"to",
"table",
"A",
"for",
"example",
"then",
"B",
"depends",
"on",
"A",
".",
")"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L207-L256 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | classify_tables_by_dependency_type | def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
"""
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
"""
tables = list(metadata.tables.values()) # type: List[Table]
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {} # type: Dict[Table, TableDependencyClassification]
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
# Check for circularity
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications | python | def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
"""
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
"""
tables = list(metadata.tables.values()) # type: List[Table]
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {} # type: Dict[Table, TableDependencyClassification]
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
# Check for circularity
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications | [
"def",
"classify_tables_by_dependency_type",
"(",
"metadata",
":",
"MetaData",
",",
"extra_dependencies",
":",
"List",
"[",
"TableDependency",
"]",
"=",
"None",
",",
"sort",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"TableDependencyClassification",
"]",
":",
"tables",
"=",
"list",
"(",
"metadata",
".",
"tables",
".",
"values",
"(",
")",
")",
"# type: List[Table]",
"all_deps",
"=",
"get_all_dependencies",
"(",
"metadata",
",",
"extra_dependencies",
")",
"tdcmap",
"=",
"{",
"}",
"# type: Dict[Table, TableDependencyClassification]",
"for",
"table",
"in",
"tables",
":",
"parents",
"=",
"[",
"td",
".",
"parent_table",
"for",
"td",
"in",
"all_deps",
"if",
"td",
".",
"child_table",
"==",
"table",
"]",
"children",
"=",
"[",
"td",
".",
"child_table",
"for",
"td",
"in",
"all_deps",
"if",
"td",
".",
"parent_table",
"==",
"table",
"]",
"tdcmap",
"[",
"table",
"]",
"=",
"TableDependencyClassification",
"(",
"table",
",",
"parents",
"=",
"parents",
",",
"children",
"=",
"children",
")",
"# Check for circularity",
"def",
"parents_contain",
"(",
"start",
":",
"Table",
",",
"probe",
":",
"Table",
")",
"->",
"Tuple",
"[",
"bool",
",",
"List",
"[",
"Table",
"]",
"]",
":",
"tdc_",
"=",
"tdcmap",
"[",
"start",
"]",
"if",
"probe",
"in",
"tdc_",
".",
"parents",
":",
"return",
"True",
",",
"[",
"start",
",",
"probe",
"]",
"for",
"parent",
"in",
"tdc_",
".",
"parents",
":",
"contains_",
",",
"chain_",
"=",
"parents_contain",
"(",
"start",
"=",
"parent",
",",
"probe",
"=",
"probe",
")",
"if",
"contains_",
":",
"return",
"True",
",",
"[",
"start",
"]",
"+",
"chain_",
"return",
"False",
",",
"[",
"]",
"def",
"children_contain",
"(",
"start",
":",
"Table",
",",
"probe",
":",
"Table",
")",
"->",
"Tuple",
"[",
"bool",
",",
"List",
"[",
"Table",
"]",
"]",
":",
"tdc_",
"=",
"tdcmap",
"[",
"start",
"]",
"if",
"probe",
"in",
"tdc_",
".",
"children",
":",
"return",
"True",
",",
"[",
"start",
",",
"probe",
"]",
"for",
"child",
"in",
"tdc_",
".",
"children",
":",
"contains_",
",",
"chain_",
"=",
"children_contain",
"(",
"start",
"=",
"child",
",",
"probe",
"=",
"probe",
")",
"if",
"contains_",
":",
"return",
"True",
",",
"[",
"start",
"]",
"+",
"chain_",
"return",
"False",
",",
"[",
"]",
"for",
"table",
"in",
"tables",
":",
"tdc",
"=",
"tdcmap",
"[",
"table",
"]",
"contains",
",",
"chain",
"=",
"parents_contain",
"(",
"start",
"=",
"table",
",",
"probe",
"=",
"table",
")",
"if",
"contains",
":",
"tdc",
".",
"set_circular",
"(",
"contains",
",",
"chain",
")",
"else",
":",
"contains",
",",
"chain",
"=",
"children_contain",
"(",
"start",
"=",
"table",
",",
"probe",
"=",
"table",
")",
"if",
"contains",
":",
"tdc",
".",
"set_circular",
"(",
"contains",
",",
"chain",
")",
"else",
":",
"tdc",
".",
"set_circular",
"(",
"False",
")",
"classifications",
"=",
"list",
"(",
"tdcmap",
".",
"values",
"(",
")",
")",
"if",
"sort",
":",
"classifications",
".",
"sort",
"(",
"key",
"=",
"lambda",
"c",
":",
"c",
".",
"tablename",
")",
"return",
"classifications"
] | Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table | [
"Inspects",
"a",
"metadata",
"object",
"(",
"optionally",
"adding",
"other",
"specified",
"dependencies",
")",
"and",
"returns",
"a",
"list",
"of",
"objects",
"describing",
"their",
"dependencies",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L369-L438 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | merge_db | def merge_db(base_class: Type,
src_engine: Engine,
dst_session: Session,
allow_missing_src_tables: bool = True,
allow_missing_src_columns: bool = True,
translate_fn: Callable[[TranslationContext], None] = None,
skip_tables: List[TableIdentity] = None,
only_tables: List[TableIdentity] = None,
tables_to_keep_pks_for: List[TableIdentity] = None,
extra_table_dependencies: List[TableDependency] = None,
dummy_run: bool = False,
info_only: bool = False,
report_every: int = 1000,
flush_per_table: bool = True,
flush_per_record: bool = False,
commit_with_flush: bool = False,
commit_at_end: bool = True,
prevent_eager_load: bool = True,
trcon_info: Dict[str, Any] = None) -> None:
"""
Copies an entire database as far as it is described by ``metadata`` and
``base_class``, from SQLAlchemy ORM session ``src_session`` to
``dst_session``, and in the process:
- creates new primary keys at the destination, or raises an error if it
doesn't know how (typically something like: ``Field 'name' doesn't have a
default value``)
- maintains relationships, or raises an error if it doesn't know how
Basic method:
- Examines the metadata for the SQLAlchemy ORM base class you provide.
- Assumes that the tables exist (in the destination).
- For each table/ORM class found in the metadata:
- Queries (via the ORM) from the source.
- For each ORM instance retrieved:
- Writes information to the destination SQLAlchemy session.
- If that ORM object has relationships, process them too.
If a table is missing in the source, then that's OK if and only if
``allow_missing_src_tables`` is set. (Similarly with columns and
``allow_missing_src_columns``; we ask the ORM to perform a partial load,
of a subset of attributes only.)
Args:
base_class:
your ORM base class, e.g. from ``Base = declarative_base()``
src_engine:
SQLALchemy :class:`Engine` for the source database
dst_session:
SQLAlchemy :class:`Session` for the destination database
allow_missing_src_tables:
proceed if tables are missing from the source (allowing you to
import from older, incomplete databases)
allow_missing_src_columns:
proceed if columns are missing from the source (allowing you to
import from older, incomplete databases)
translate_fn:
optional function called with each instance, so you can modify
instances in the pipeline. Signature:
.. code-block:: python
def my_translate_fn(trcon: TranslationContext) -> None:
# We can modify trcon.newobj, or replace it (including
# setting trcon.newobj = None to omit this object).
pass
skip_tables:
tables to skip (specified as a list of :class:`TableIdentity`)
only_tables:
tables to restrict the processor to (specified as a list of
:class:`TableIdentity`)
tables_to_keep_pks_for:
tables for which PKs are guaranteed to be safe to insert into the
destination database, without modification (specified as a list of
:class:`TableIdentity`)
extra_table_dependencies:
optional list of :class:`TableDependency` objects (q.v.)
dummy_run:
don't alter the destination database
info_only:
show info, then stop
report_every:
provide a progress report every *n* records
flush_per_table:
flush the session after every table (reasonable)
flush_per_record:
flush the session after every instance (AVOID this if tables may
refer to themselves)
commit_with_flush:
``COMMIT`` with each flush?
commit_at_end:
``COMMIT`` when finished?
prevent_eager_load:
disable any eager loading (use lazy loading instead)
trcon_info:
additional dictionary passed to ``TranslationContext.info``
(see :class:`.TranslationContext`)
"""
log.info("merge_db(): starting")
if dummy_run:
log.warning("Dummy run only; destination will not be changed")
# Check parameters before we modify them
if only_tables is not None and not only_tables:
log.warning("... only_tables == []; nothing to do")
return
# Finalize parameters
skip_tables = skip_tables or [] # type: List[TableIdentity]
only_tables = only_tables or [] # type: List[TableIdentity]
tables_to_keep_pks_for = tables_to_keep_pks_for or [] # type: List[TableIdentity] # noqa
extra_table_dependencies = extra_table_dependencies or [] # type: List[TableDependency] # noqa
trcon_info = trcon_info or {} # type: Dict[str, Any]
# We need both Core and ORM for the source.
# noinspection PyUnresolvedReferences
metadata = base_class.metadata # type: MetaData
src_session = sessionmaker(bind=src_engine)() # type: Session
dst_engine = get_engine_from_session(dst_session)
tablename_to_ormclass = get_orm_classes_by_table_name_from_base(base_class)
# Tell all TableIdentity objects about their metadata
for tilist in [skip_tables, only_tables, tables_to_keep_pks_for]:
for ti in tilist:
ti.set_metadata_if_none(metadata)
for td in extra_table_dependencies:
td.set_metadata_if_none(metadata)
# Get all lists of tables as their names
skip_table_names = [ti.tablename for ti in skip_tables]
only_table_names = [ti.tablename for ti in only_tables]
tables_to_keep_pks_for = [ti.tablename for ti in tables_to_keep_pks_for] # type: List[str] # noqa
# ... now all are of type List[str]
# Safety check: this is an imperfect check for source == destination, but
# it is fairly easy to pass in the wrong URL, so let's try our best:
_src_url = get_safe_url_from_engine(src_engine)
_dst_url = get_safe_url_from_session(dst_session)
assert _src_url != _dst_url or _src_url == SQLITE_MEMORY_URL, (
"Source and destination databases are the same!"
)
# Check the right tables are present.
src_tables = sorted(get_table_names(src_engine))
dst_tables = sorted(list(tablename_to_ormclass.keys()))
log.debug("Source tables: {!r}", src_tables)
log.debug("Destination tables: {!r}", dst_tables)
if not allow_missing_src_tables:
missing_tables = sorted(
d for d in dst_tables
if d not in src_tables and d not in skip_table_names
)
if missing_tables:
raise RuntimeError("The following tables are missing from the "
"source database: " + repr(missing_tables))
table_num = 0
overall_record_num = 0
tables = list(metadata.tables.values()) # type: List[Table]
# Very helpfully, MetaData.sorted_tables produces tables in order of
# relationship dependency ("each table is preceded by all tables which
# it references");
# http://docs.sqlalchemy.org/en/latest/core/metadata.html
# HOWEVER, it only works if you specify ForeignKey relationships
# explicitly.
# We can also add in user-specified dependencies, and therefore can do the
# sorting in one step with sqlalchemy.schema.sort_tables:
ordered_tables = sort_tables(
tables,
extra_dependencies=[td.sqla_tuple() for td in extra_table_dependencies]
)
# Note that the ordering is NOT NECESSARILY CONSISTENT, though (in that
# the order of stuff it doesn't care about varies across runs).
all_dependencies = get_all_dependencies(metadata, extra_table_dependencies)
dep_classifications = classify_tables_by_dependency_type(
metadata, extra_table_dependencies)
circular = [tdc for tdc in dep_classifications if tdc.circular]
assert not circular, "Circular dependencies! {!r}".format(circular)
log.debug("All table dependencies: {}",
"; ".join(str(td) for td in all_dependencies))
log.debug("Table dependency classifications: {}",
"; ".join(str(c) for c in dep_classifications))
log.info("Processing tables in the order: {!r}",
[table.name for table in ordered_tables])
objmap = {}
def flush() -> None:
if not dummy_run:
log.debug("Flushing session")
dst_session.flush()
if commit_with_flush:
log.debug("Committing...")
dst_session.commit()
def translate(oldobj_: object, newobj_: object) -> object:
if translate_fn is None:
return newobj_
tc = TranslationContext(oldobj=oldobj_,
newobj=newobj_,
objmap=objmap,
table=table,
tablename=tablename,
src_session=src_session,
dst_session=dst_session,
src_engine=src_engine,
dst_engine=dst_engine,
missing_src_columns=missing_columns,
src_table_names=src_tables,
info=trcon_info)
translate_fn(tc)
if tc.newobj is None:
log.debug("Instance skipped by user-supplied translate_fn")
return tc.newobj
# -------------------------------------------------------------------------
# Now, per table/ORM class...
# -------------------------------------------------------------------------
for table in ordered_tables:
tablename = table.name
if tablename in skip_table_names:
log.info("... skipping table {!r} (as per skip_tables)", tablename)
continue
if only_table_names and tablename not in only_table_names:
log.info("... ignoring table {!r} (as per only_tables)", tablename)
continue
if allow_missing_src_tables and tablename not in src_tables:
log.info("... ignoring table {!r} (not in source database)",
tablename)
continue
table_num += 1
table_record_num = 0
src_columns = sorted(get_column_names(src_engine, tablename))
dst_columns = sorted([column.name for column in table.columns])
missing_columns = sorted(list(set(dst_columns) - set(src_columns)))
if not allow_missing_src_columns:
if missing_columns:
raise RuntimeError(
"The following columns are missing from source table "
"{!r}: {!r}".format(tablename, missing_columns))
orm_class = tablename_to_ormclass[tablename]
pk_attrs = get_pk_attrnames(orm_class)
c2a = colname_to_attrname_dict(orm_class)
missing_attrs = map_keys_to_values(missing_columns, c2a)
tdc = [tdc for tdc in dep_classifications if tdc.table == table][0]
log.info("Processing table {!r} via ORM class {!r}",
tablename, orm_class)
log.debug("PK attributes: {!r}", pk_attrs)
log.debug("Table: {!r}", table)
log.debug("Dependencies: parents = {!r}; children = {!r}",
tdc.parent_names, tdc.child_names)
if info_only:
log.debug("info_only; skipping table contents")
continue
def wipe_primary_key(inst: object) -> None:
for attrname in pk_attrs:
setattr(inst, attrname, None)
query = src_session.query(orm_class)
if allow_missing_src_columns and missing_columns:
src_attrs = map_keys_to_values(src_columns, c2a)
log.info("Table {} is missing columns {} in the source",
tablename, missing_columns)
log.debug("... using only columns {} via attributes {}",
src_columns, src_attrs)
query = query.options(load_only(*src_attrs))
# PROBLEM: it will not ignore the PK.
if prevent_eager_load:
query = query.options(lazyload("*"))
wipe_pk = tablename not in tables_to_keep_pks_for
# How best to deal with relationships?
#
# This doesn't work:
# - process tables in order of dependencies, eager-loading
# relationships with
# for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa
# related_col = getattr(orm_class, relationship.key)
# query = query.options(joinedload(related_col))
# - expunge from old session / make_transient / wipe_primary_key/ add
# to new session
# ... get errors like
# sqlalchemy.exc.InvalidRequestError: Object '<Parent at
# 0x7f99492440b8>' is already attached to session '7' (this is
# '6')
#
# ... at the point of dst_session.add(instance)
# ... when adding the object on the child side of the relationship
# ... I suspect that we move the Parent from session S to session D,
# but when we eager-load the Parent from the Child, that makes
# another in session S, so when we add the Child to session D, its
# parent is in session S, which is wrong.
#
# We must, therefore, take a more interventional approach, in which we
# maintain a copy of the old object, make a copy using
# copy_sqla_object, and re-assign relationships accordingly.
for instance in query.all():
# log.debug("Source instance: {!r}", instance)
table_record_num += 1
overall_record_num += 1
if table_record_num % report_every == 0:
log.info("... progress{}: on table {} ({}); record {} this "
"table; overall record {}",
" (DUMMY RUN)" if dummy_run else "",
table_num, tablename,
table_record_num, overall_record_num)
if tdc.standalone:
# Our table has neither parents nor children. We can therefore
# simply move the instance from one session to the other,
# blanking primary keys.
# https://stackoverflow.com/questions/14636192/sqlalchemy-modification-of-detached-object # noqa
src_session.expunge(instance)
make_transient(instance)
if wipe_pk:
wipe_primary_key(instance)
instance = translate(instance, instance)
if not instance:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(instance)
# new PK will be created when session is flushed
else:
# Our table has either parents or children. We therefore make
# a copy and place the COPY in the destination session. If
# this object may be a parent, we maintain a log (in objmap)
# of the old-to-new mapping. If this object is a child, we
# re-assign its relationships based on the old-to-new mapping
# (since we will have processed the parent table first, having
# carefully ordered them in advance).
oldobj = instance # rename for clarity
newobj = copy_sqla_object(
oldobj, omit_pk=wipe_pk, omit_fk=True,
omit_attrs=missing_attrs, debug=False
)
rewrite_relationships(oldobj, newobj, objmap, debug=False,
skip_table_names=skip_table_names)
newobj = translate(oldobj, newobj)
if not newobj:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(newobj)
# new PK will be created when session is flushed
if tdc.is_parent:
objmap[oldobj] = newobj # for its children's benefit
if flush_per_record:
flush()
if flush_per_table:
flush()
flush()
if commit_at_end:
log.debug("Committing...")
dst_session.commit()
log.info("merge_db(): finished") | python | def merge_db(base_class: Type,
src_engine: Engine,
dst_session: Session,
allow_missing_src_tables: bool = True,
allow_missing_src_columns: bool = True,
translate_fn: Callable[[TranslationContext], None] = None,
skip_tables: List[TableIdentity] = None,
only_tables: List[TableIdentity] = None,
tables_to_keep_pks_for: List[TableIdentity] = None,
extra_table_dependencies: List[TableDependency] = None,
dummy_run: bool = False,
info_only: bool = False,
report_every: int = 1000,
flush_per_table: bool = True,
flush_per_record: bool = False,
commit_with_flush: bool = False,
commit_at_end: bool = True,
prevent_eager_load: bool = True,
trcon_info: Dict[str, Any] = None) -> None:
"""
Copies an entire database as far as it is described by ``metadata`` and
``base_class``, from SQLAlchemy ORM session ``src_session`` to
``dst_session``, and in the process:
- creates new primary keys at the destination, or raises an error if it
doesn't know how (typically something like: ``Field 'name' doesn't have a
default value``)
- maintains relationships, or raises an error if it doesn't know how
Basic method:
- Examines the metadata for the SQLAlchemy ORM base class you provide.
- Assumes that the tables exist (in the destination).
- For each table/ORM class found in the metadata:
- Queries (via the ORM) from the source.
- For each ORM instance retrieved:
- Writes information to the destination SQLAlchemy session.
- If that ORM object has relationships, process them too.
If a table is missing in the source, then that's OK if and only if
``allow_missing_src_tables`` is set. (Similarly with columns and
``allow_missing_src_columns``; we ask the ORM to perform a partial load,
of a subset of attributes only.)
Args:
base_class:
your ORM base class, e.g. from ``Base = declarative_base()``
src_engine:
SQLALchemy :class:`Engine` for the source database
dst_session:
SQLAlchemy :class:`Session` for the destination database
allow_missing_src_tables:
proceed if tables are missing from the source (allowing you to
import from older, incomplete databases)
allow_missing_src_columns:
proceed if columns are missing from the source (allowing you to
import from older, incomplete databases)
translate_fn:
optional function called with each instance, so you can modify
instances in the pipeline. Signature:
.. code-block:: python
def my_translate_fn(trcon: TranslationContext) -> None:
# We can modify trcon.newobj, or replace it (including
# setting trcon.newobj = None to omit this object).
pass
skip_tables:
tables to skip (specified as a list of :class:`TableIdentity`)
only_tables:
tables to restrict the processor to (specified as a list of
:class:`TableIdentity`)
tables_to_keep_pks_for:
tables for which PKs are guaranteed to be safe to insert into the
destination database, without modification (specified as a list of
:class:`TableIdentity`)
extra_table_dependencies:
optional list of :class:`TableDependency` objects (q.v.)
dummy_run:
don't alter the destination database
info_only:
show info, then stop
report_every:
provide a progress report every *n* records
flush_per_table:
flush the session after every table (reasonable)
flush_per_record:
flush the session after every instance (AVOID this if tables may
refer to themselves)
commit_with_flush:
``COMMIT`` with each flush?
commit_at_end:
``COMMIT`` when finished?
prevent_eager_load:
disable any eager loading (use lazy loading instead)
trcon_info:
additional dictionary passed to ``TranslationContext.info``
(see :class:`.TranslationContext`)
"""
log.info("merge_db(): starting")
if dummy_run:
log.warning("Dummy run only; destination will not be changed")
# Check parameters before we modify them
if only_tables is not None and not only_tables:
log.warning("... only_tables == []; nothing to do")
return
# Finalize parameters
skip_tables = skip_tables or [] # type: List[TableIdentity]
only_tables = only_tables or [] # type: List[TableIdentity]
tables_to_keep_pks_for = tables_to_keep_pks_for or [] # type: List[TableIdentity] # noqa
extra_table_dependencies = extra_table_dependencies or [] # type: List[TableDependency] # noqa
trcon_info = trcon_info or {} # type: Dict[str, Any]
# We need both Core and ORM for the source.
# noinspection PyUnresolvedReferences
metadata = base_class.metadata # type: MetaData
src_session = sessionmaker(bind=src_engine)() # type: Session
dst_engine = get_engine_from_session(dst_session)
tablename_to_ormclass = get_orm_classes_by_table_name_from_base(base_class)
# Tell all TableIdentity objects about their metadata
for tilist in [skip_tables, only_tables, tables_to_keep_pks_for]:
for ti in tilist:
ti.set_metadata_if_none(metadata)
for td in extra_table_dependencies:
td.set_metadata_if_none(metadata)
# Get all lists of tables as their names
skip_table_names = [ti.tablename for ti in skip_tables]
only_table_names = [ti.tablename for ti in only_tables]
tables_to_keep_pks_for = [ti.tablename for ti in tables_to_keep_pks_for] # type: List[str] # noqa
# ... now all are of type List[str]
# Safety check: this is an imperfect check for source == destination, but
# it is fairly easy to pass in the wrong URL, so let's try our best:
_src_url = get_safe_url_from_engine(src_engine)
_dst_url = get_safe_url_from_session(dst_session)
assert _src_url != _dst_url or _src_url == SQLITE_MEMORY_URL, (
"Source and destination databases are the same!"
)
# Check the right tables are present.
src_tables = sorted(get_table_names(src_engine))
dst_tables = sorted(list(tablename_to_ormclass.keys()))
log.debug("Source tables: {!r}", src_tables)
log.debug("Destination tables: {!r}", dst_tables)
if not allow_missing_src_tables:
missing_tables = sorted(
d for d in dst_tables
if d not in src_tables and d not in skip_table_names
)
if missing_tables:
raise RuntimeError("The following tables are missing from the "
"source database: " + repr(missing_tables))
table_num = 0
overall_record_num = 0
tables = list(metadata.tables.values()) # type: List[Table]
# Very helpfully, MetaData.sorted_tables produces tables in order of
# relationship dependency ("each table is preceded by all tables which
# it references");
# http://docs.sqlalchemy.org/en/latest/core/metadata.html
# HOWEVER, it only works if you specify ForeignKey relationships
# explicitly.
# We can also add in user-specified dependencies, and therefore can do the
# sorting in one step with sqlalchemy.schema.sort_tables:
ordered_tables = sort_tables(
tables,
extra_dependencies=[td.sqla_tuple() for td in extra_table_dependencies]
)
# Note that the ordering is NOT NECESSARILY CONSISTENT, though (in that
# the order of stuff it doesn't care about varies across runs).
all_dependencies = get_all_dependencies(metadata, extra_table_dependencies)
dep_classifications = classify_tables_by_dependency_type(
metadata, extra_table_dependencies)
circular = [tdc for tdc in dep_classifications if tdc.circular]
assert not circular, "Circular dependencies! {!r}".format(circular)
log.debug("All table dependencies: {}",
"; ".join(str(td) for td in all_dependencies))
log.debug("Table dependency classifications: {}",
"; ".join(str(c) for c in dep_classifications))
log.info("Processing tables in the order: {!r}",
[table.name for table in ordered_tables])
objmap = {}
def flush() -> None:
if not dummy_run:
log.debug("Flushing session")
dst_session.flush()
if commit_with_flush:
log.debug("Committing...")
dst_session.commit()
def translate(oldobj_: object, newobj_: object) -> object:
if translate_fn is None:
return newobj_
tc = TranslationContext(oldobj=oldobj_,
newobj=newobj_,
objmap=objmap,
table=table,
tablename=tablename,
src_session=src_session,
dst_session=dst_session,
src_engine=src_engine,
dst_engine=dst_engine,
missing_src_columns=missing_columns,
src_table_names=src_tables,
info=trcon_info)
translate_fn(tc)
if tc.newobj is None:
log.debug("Instance skipped by user-supplied translate_fn")
return tc.newobj
# -------------------------------------------------------------------------
# Now, per table/ORM class...
# -------------------------------------------------------------------------
for table in ordered_tables:
tablename = table.name
if tablename in skip_table_names:
log.info("... skipping table {!r} (as per skip_tables)", tablename)
continue
if only_table_names and tablename not in only_table_names:
log.info("... ignoring table {!r} (as per only_tables)", tablename)
continue
if allow_missing_src_tables and tablename not in src_tables:
log.info("... ignoring table {!r} (not in source database)",
tablename)
continue
table_num += 1
table_record_num = 0
src_columns = sorted(get_column_names(src_engine, tablename))
dst_columns = sorted([column.name for column in table.columns])
missing_columns = sorted(list(set(dst_columns) - set(src_columns)))
if not allow_missing_src_columns:
if missing_columns:
raise RuntimeError(
"The following columns are missing from source table "
"{!r}: {!r}".format(tablename, missing_columns))
orm_class = tablename_to_ormclass[tablename]
pk_attrs = get_pk_attrnames(orm_class)
c2a = colname_to_attrname_dict(orm_class)
missing_attrs = map_keys_to_values(missing_columns, c2a)
tdc = [tdc for tdc in dep_classifications if tdc.table == table][0]
log.info("Processing table {!r} via ORM class {!r}",
tablename, orm_class)
log.debug("PK attributes: {!r}", pk_attrs)
log.debug("Table: {!r}", table)
log.debug("Dependencies: parents = {!r}; children = {!r}",
tdc.parent_names, tdc.child_names)
if info_only:
log.debug("info_only; skipping table contents")
continue
def wipe_primary_key(inst: object) -> None:
for attrname in pk_attrs:
setattr(inst, attrname, None)
query = src_session.query(orm_class)
if allow_missing_src_columns and missing_columns:
src_attrs = map_keys_to_values(src_columns, c2a)
log.info("Table {} is missing columns {} in the source",
tablename, missing_columns)
log.debug("... using only columns {} via attributes {}",
src_columns, src_attrs)
query = query.options(load_only(*src_attrs))
# PROBLEM: it will not ignore the PK.
if prevent_eager_load:
query = query.options(lazyload("*"))
wipe_pk = tablename not in tables_to_keep_pks_for
# How best to deal with relationships?
#
# This doesn't work:
# - process tables in order of dependencies, eager-loading
# relationships with
# for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa
# related_col = getattr(orm_class, relationship.key)
# query = query.options(joinedload(related_col))
# - expunge from old session / make_transient / wipe_primary_key/ add
# to new session
# ... get errors like
# sqlalchemy.exc.InvalidRequestError: Object '<Parent at
# 0x7f99492440b8>' is already attached to session '7' (this is
# '6')
#
# ... at the point of dst_session.add(instance)
# ... when adding the object on the child side of the relationship
# ... I suspect that we move the Parent from session S to session D,
# but when we eager-load the Parent from the Child, that makes
# another in session S, so when we add the Child to session D, its
# parent is in session S, which is wrong.
#
# We must, therefore, take a more interventional approach, in which we
# maintain a copy of the old object, make a copy using
# copy_sqla_object, and re-assign relationships accordingly.
for instance in query.all():
# log.debug("Source instance: {!r}", instance)
table_record_num += 1
overall_record_num += 1
if table_record_num % report_every == 0:
log.info("... progress{}: on table {} ({}); record {} this "
"table; overall record {}",
" (DUMMY RUN)" if dummy_run else "",
table_num, tablename,
table_record_num, overall_record_num)
if tdc.standalone:
# Our table has neither parents nor children. We can therefore
# simply move the instance from one session to the other,
# blanking primary keys.
# https://stackoverflow.com/questions/14636192/sqlalchemy-modification-of-detached-object # noqa
src_session.expunge(instance)
make_transient(instance)
if wipe_pk:
wipe_primary_key(instance)
instance = translate(instance, instance)
if not instance:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(instance)
# new PK will be created when session is flushed
else:
# Our table has either parents or children. We therefore make
# a copy and place the COPY in the destination session. If
# this object may be a parent, we maintain a log (in objmap)
# of the old-to-new mapping. If this object is a child, we
# re-assign its relationships based on the old-to-new mapping
# (since we will have processed the parent table first, having
# carefully ordered them in advance).
oldobj = instance # rename for clarity
newobj = copy_sqla_object(
oldobj, omit_pk=wipe_pk, omit_fk=True,
omit_attrs=missing_attrs, debug=False
)
rewrite_relationships(oldobj, newobj, objmap, debug=False,
skip_table_names=skip_table_names)
newobj = translate(oldobj, newobj)
if not newobj:
continue # translate_fn elected to skip it
if not dummy_run:
dst_session.add(newobj)
# new PK will be created when session is flushed
if tdc.is_parent:
objmap[oldobj] = newobj # for its children's benefit
if flush_per_record:
flush()
if flush_per_table:
flush()
flush()
if commit_at_end:
log.debug("Committing...")
dst_session.commit()
log.info("merge_db(): finished") | [
"def",
"merge_db",
"(",
"base_class",
":",
"Type",
",",
"src_engine",
":",
"Engine",
",",
"dst_session",
":",
"Session",
",",
"allow_missing_src_tables",
":",
"bool",
"=",
"True",
",",
"allow_missing_src_columns",
":",
"bool",
"=",
"True",
",",
"translate_fn",
":",
"Callable",
"[",
"[",
"TranslationContext",
"]",
",",
"None",
"]",
"=",
"None",
",",
"skip_tables",
":",
"List",
"[",
"TableIdentity",
"]",
"=",
"None",
",",
"only_tables",
":",
"List",
"[",
"TableIdentity",
"]",
"=",
"None",
",",
"tables_to_keep_pks_for",
":",
"List",
"[",
"TableIdentity",
"]",
"=",
"None",
",",
"extra_table_dependencies",
":",
"List",
"[",
"TableDependency",
"]",
"=",
"None",
",",
"dummy_run",
":",
"bool",
"=",
"False",
",",
"info_only",
":",
"bool",
"=",
"False",
",",
"report_every",
":",
"int",
"=",
"1000",
",",
"flush_per_table",
":",
"bool",
"=",
"True",
",",
"flush_per_record",
":",
"bool",
"=",
"False",
",",
"commit_with_flush",
":",
"bool",
"=",
"False",
",",
"commit_at_end",
":",
"bool",
"=",
"True",
",",
"prevent_eager_load",
":",
"bool",
"=",
"True",
",",
"trcon_info",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
")",
"->",
"None",
":",
"log",
".",
"info",
"(",
"\"merge_db(): starting\"",
")",
"if",
"dummy_run",
":",
"log",
".",
"warning",
"(",
"\"Dummy run only; destination will not be changed\"",
")",
"# Check parameters before we modify them",
"if",
"only_tables",
"is",
"not",
"None",
"and",
"not",
"only_tables",
":",
"log",
".",
"warning",
"(",
"\"... only_tables == []; nothing to do\"",
")",
"return",
"# Finalize parameters",
"skip_tables",
"=",
"skip_tables",
"or",
"[",
"]",
"# type: List[TableIdentity]",
"only_tables",
"=",
"only_tables",
"or",
"[",
"]",
"# type: List[TableIdentity]",
"tables_to_keep_pks_for",
"=",
"tables_to_keep_pks_for",
"or",
"[",
"]",
"# type: List[TableIdentity] # noqa",
"extra_table_dependencies",
"=",
"extra_table_dependencies",
"or",
"[",
"]",
"# type: List[TableDependency] # noqa",
"trcon_info",
"=",
"trcon_info",
"or",
"{",
"}",
"# type: Dict[str, Any]",
"# We need both Core and ORM for the source.",
"# noinspection PyUnresolvedReferences",
"metadata",
"=",
"base_class",
".",
"metadata",
"# type: MetaData",
"src_session",
"=",
"sessionmaker",
"(",
"bind",
"=",
"src_engine",
")",
"(",
")",
"# type: Session",
"dst_engine",
"=",
"get_engine_from_session",
"(",
"dst_session",
")",
"tablename_to_ormclass",
"=",
"get_orm_classes_by_table_name_from_base",
"(",
"base_class",
")",
"# Tell all TableIdentity objects about their metadata",
"for",
"tilist",
"in",
"[",
"skip_tables",
",",
"only_tables",
",",
"tables_to_keep_pks_for",
"]",
":",
"for",
"ti",
"in",
"tilist",
":",
"ti",
".",
"set_metadata_if_none",
"(",
"metadata",
")",
"for",
"td",
"in",
"extra_table_dependencies",
":",
"td",
".",
"set_metadata_if_none",
"(",
"metadata",
")",
"# Get all lists of tables as their names",
"skip_table_names",
"=",
"[",
"ti",
".",
"tablename",
"for",
"ti",
"in",
"skip_tables",
"]",
"only_table_names",
"=",
"[",
"ti",
".",
"tablename",
"for",
"ti",
"in",
"only_tables",
"]",
"tables_to_keep_pks_for",
"=",
"[",
"ti",
".",
"tablename",
"for",
"ti",
"in",
"tables_to_keep_pks_for",
"]",
"# type: List[str] # noqa",
"# ... now all are of type List[str]",
"# Safety check: this is an imperfect check for source == destination, but",
"# it is fairly easy to pass in the wrong URL, so let's try our best:",
"_src_url",
"=",
"get_safe_url_from_engine",
"(",
"src_engine",
")",
"_dst_url",
"=",
"get_safe_url_from_session",
"(",
"dst_session",
")",
"assert",
"_src_url",
"!=",
"_dst_url",
"or",
"_src_url",
"==",
"SQLITE_MEMORY_URL",
",",
"(",
"\"Source and destination databases are the same!\"",
")",
"# Check the right tables are present.",
"src_tables",
"=",
"sorted",
"(",
"get_table_names",
"(",
"src_engine",
")",
")",
"dst_tables",
"=",
"sorted",
"(",
"list",
"(",
"tablename_to_ormclass",
".",
"keys",
"(",
")",
")",
")",
"log",
".",
"debug",
"(",
"\"Source tables: {!r}\"",
",",
"src_tables",
")",
"log",
".",
"debug",
"(",
"\"Destination tables: {!r}\"",
",",
"dst_tables",
")",
"if",
"not",
"allow_missing_src_tables",
":",
"missing_tables",
"=",
"sorted",
"(",
"d",
"for",
"d",
"in",
"dst_tables",
"if",
"d",
"not",
"in",
"src_tables",
"and",
"d",
"not",
"in",
"skip_table_names",
")",
"if",
"missing_tables",
":",
"raise",
"RuntimeError",
"(",
"\"The following tables are missing from the \"",
"\"source database: \"",
"+",
"repr",
"(",
"missing_tables",
")",
")",
"table_num",
"=",
"0",
"overall_record_num",
"=",
"0",
"tables",
"=",
"list",
"(",
"metadata",
".",
"tables",
".",
"values",
"(",
")",
")",
"# type: List[Table]",
"# Very helpfully, MetaData.sorted_tables produces tables in order of",
"# relationship dependency (\"each table is preceded by all tables which",
"# it references\");",
"# http://docs.sqlalchemy.org/en/latest/core/metadata.html",
"# HOWEVER, it only works if you specify ForeignKey relationships",
"# explicitly.",
"# We can also add in user-specified dependencies, and therefore can do the",
"# sorting in one step with sqlalchemy.schema.sort_tables:",
"ordered_tables",
"=",
"sort_tables",
"(",
"tables",
",",
"extra_dependencies",
"=",
"[",
"td",
".",
"sqla_tuple",
"(",
")",
"for",
"td",
"in",
"extra_table_dependencies",
"]",
")",
"# Note that the ordering is NOT NECESSARILY CONSISTENT, though (in that",
"# the order of stuff it doesn't care about varies across runs).",
"all_dependencies",
"=",
"get_all_dependencies",
"(",
"metadata",
",",
"extra_table_dependencies",
")",
"dep_classifications",
"=",
"classify_tables_by_dependency_type",
"(",
"metadata",
",",
"extra_table_dependencies",
")",
"circular",
"=",
"[",
"tdc",
"for",
"tdc",
"in",
"dep_classifications",
"if",
"tdc",
".",
"circular",
"]",
"assert",
"not",
"circular",
",",
"\"Circular dependencies! {!r}\"",
".",
"format",
"(",
"circular",
")",
"log",
".",
"debug",
"(",
"\"All table dependencies: {}\"",
",",
"\"; \"",
".",
"join",
"(",
"str",
"(",
"td",
")",
"for",
"td",
"in",
"all_dependencies",
")",
")",
"log",
".",
"debug",
"(",
"\"Table dependency classifications: {}\"",
",",
"\"; \"",
".",
"join",
"(",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"dep_classifications",
")",
")",
"log",
".",
"info",
"(",
"\"Processing tables in the order: {!r}\"",
",",
"[",
"table",
".",
"name",
"for",
"table",
"in",
"ordered_tables",
"]",
")",
"objmap",
"=",
"{",
"}",
"def",
"flush",
"(",
")",
"->",
"None",
":",
"if",
"not",
"dummy_run",
":",
"log",
".",
"debug",
"(",
"\"Flushing session\"",
")",
"dst_session",
".",
"flush",
"(",
")",
"if",
"commit_with_flush",
":",
"log",
".",
"debug",
"(",
"\"Committing...\"",
")",
"dst_session",
".",
"commit",
"(",
")",
"def",
"translate",
"(",
"oldobj_",
":",
"object",
",",
"newobj_",
":",
"object",
")",
"->",
"object",
":",
"if",
"translate_fn",
"is",
"None",
":",
"return",
"newobj_",
"tc",
"=",
"TranslationContext",
"(",
"oldobj",
"=",
"oldobj_",
",",
"newobj",
"=",
"newobj_",
",",
"objmap",
"=",
"objmap",
",",
"table",
"=",
"table",
",",
"tablename",
"=",
"tablename",
",",
"src_session",
"=",
"src_session",
",",
"dst_session",
"=",
"dst_session",
",",
"src_engine",
"=",
"src_engine",
",",
"dst_engine",
"=",
"dst_engine",
",",
"missing_src_columns",
"=",
"missing_columns",
",",
"src_table_names",
"=",
"src_tables",
",",
"info",
"=",
"trcon_info",
")",
"translate_fn",
"(",
"tc",
")",
"if",
"tc",
".",
"newobj",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"Instance skipped by user-supplied translate_fn\"",
")",
"return",
"tc",
".",
"newobj",
"# -------------------------------------------------------------------------",
"# Now, per table/ORM class...",
"# -------------------------------------------------------------------------",
"for",
"table",
"in",
"ordered_tables",
":",
"tablename",
"=",
"table",
".",
"name",
"if",
"tablename",
"in",
"skip_table_names",
":",
"log",
".",
"info",
"(",
"\"... skipping table {!r} (as per skip_tables)\"",
",",
"tablename",
")",
"continue",
"if",
"only_table_names",
"and",
"tablename",
"not",
"in",
"only_table_names",
":",
"log",
".",
"info",
"(",
"\"... ignoring table {!r} (as per only_tables)\"",
",",
"tablename",
")",
"continue",
"if",
"allow_missing_src_tables",
"and",
"tablename",
"not",
"in",
"src_tables",
":",
"log",
".",
"info",
"(",
"\"... ignoring table {!r} (not in source database)\"",
",",
"tablename",
")",
"continue",
"table_num",
"+=",
"1",
"table_record_num",
"=",
"0",
"src_columns",
"=",
"sorted",
"(",
"get_column_names",
"(",
"src_engine",
",",
"tablename",
")",
")",
"dst_columns",
"=",
"sorted",
"(",
"[",
"column",
".",
"name",
"for",
"column",
"in",
"table",
".",
"columns",
"]",
")",
"missing_columns",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"dst_columns",
")",
"-",
"set",
"(",
"src_columns",
")",
")",
")",
"if",
"not",
"allow_missing_src_columns",
":",
"if",
"missing_columns",
":",
"raise",
"RuntimeError",
"(",
"\"The following columns are missing from source table \"",
"\"{!r}: {!r}\"",
".",
"format",
"(",
"tablename",
",",
"missing_columns",
")",
")",
"orm_class",
"=",
"tablename_to_ormclass",
"[",
"tablename",
"]",
"pk_attrs",
"=",
"get_pk_attrnames",
"(",
"orm_class",
")",
"c2a",
"=",
"colname_to_attrname_dict",
"(",
"orm_class",
")",
"missing_attrs",
"=",
"map_keys_to_values",
"(",
"missing_columns",
",",
"c2a",
")",
"tdc",
"=",
"[",
"tdc",
"for",
"tdc",
"in",
"dep_classifications",
"if",
"tdc",
".",
"table",
"==",
"table",
"]",
"[",
"0",
"]",
"log",
".",
"info",
"(",
"\"Processing table {!r} via ORM class {!r}\"",
",",
"tablename",
",",
"orm_class",
")",
"log",
".",
"debug",
"(",
"\"PK attributes: {!r}\"",
",",
"pk_attrs",
")",
"log",
".",
"debug",
"(",
"\"Table: {!r}\"",
",",
"table",
")",
"log",
".",
"debug",
"(",
"\"Dependencies: parents = {!r}; children = {!r}\"",
",",
"tdc",
".",
"parent_names",
",",
"tdc",
".",
"child_names",
")",
"if",
"info_only",
":",
"log",
".",
"debug",
"(",
"\"info_only; skipping table contents\"",
")",
"continue",
"def",
"wipe_primary_key",
"(",
"inst",
":",
"object",
")",
"->",
"None",
":",
"for",
"attrname",
"in",
"pk_attrs",
":",
"setattr",
"(",
"inst",
",",
"attrname",
",",
"None",
")",
"query",
"=",
"src_session",
".",
"query",
"(",
"orm_class",
")",
"if",
"allow_missing_src_columns",
"and",
"missing_columns",
":",
"src_attrs",
"=",
"map_keys_to_values",
"(",
"src_columns",
",",
"c2a",
")",
"log",
".",
"info",
"(",
"\"Table {} is missing columns {} in the source\"",
",",
"tablename",
",",
"missing_columns",
")",
"log",
".",
"debug",
"(",
"\"... using only columns {} via attributes {}\"",
",",
"src_columns",
",",
"src_attrs",
")",
"query",
"=",
"query",
".",
"options",
"(",
"load_only",
"(",
"*",
"src_attrs",
")",
")",
"# PROBLEM: it will not ignore the PK.",
"if",
"prevent_eager_load",
":",
"query",
"=",
"query",
".",
"options",
"(",
"lazyload",
"(",
"\"*\"",
")",
")",
"wipe_pk",
"=",
"tablename",
"not",
"in",
"tables_to_keep_pks_for",
"# How best to deal with relationships?",
"#",
"# This doesn't work:",
"# - process tables in order of dependencies, eager-loading",
"# relationships with",
"# for relationship in insp.mapper.relationships: # type: RelationshipProperty # noqa",
"# related_col = getattr(orm_class, relationship.key)",
"# query = query.options(joinedload(related_col))",
"# - expunge from old session / make_transient / wipe_primary_key/ add",
"# to new session",
"# ... get errors like",
"# sqlalchemy.exc.InvalidRequestError: Object '<Parent at",
"# 0x7f99492440b8>' is already attached to session '7' (this is",
"# '6')",
"#",
"# ... at the point of dst_session.add(instance)",
"# ... when adding the object on the child side of the relationship",
"# ... I suspect that we move the Parent from session S to session D,",
"# but when we eager-load the Parent from the Child, that makes",
"# another in session S, so when we add the Child to session D, its",
"# parent is in session S, which is wrong.",
"#",
"# We must, therefore, take a more interventional approach, in which we",
"# maintain a copy of the old object, make a copy using",
"# copy_sqla_object, and re-assign relationships accordingly.",
"for",
"instance",
"in",
"query",
".",
"all",
"(",
")",
":",
"# log.debug(\"Source instance: {!r}\", instance)",
"table_record_num",
"+=",
"1",
"overall_record_num",
"+=",
"1",
"if",
"table_record_num",
"%",
"report_every",
"==",
"0",
":",
"log",
".",
"info",
"(",
"\"... progress{}: on table {} ({}); record {} this \"",
"\"table; overall record {}\"",
",",
"\" (DUMMY RUN)\"",
"if",
"dummy_run",
"else",
"\"\"",
",",
"table_num",
",",
"tablename",
",",
"table_record_num",
",",
"overall_record_num",
")",
"if",
"tdc",
".",
"standalone",
":",
"# Our table has neither parents nor children. We can therefore",
"# simply move the instance from one session to the other,",
"# blanking primary keys.",
"# https://stackoverflow.com/questions/14636192/sqlalchemy-modification-of-detached-object # noqa",
"src_session",
".",
"expunge",
"(",
"instance",
")",
"make_transient",
"(",
"instance",
")",
"if",
"wipe_pk",
":",
"wipe_primary_key",
"(",
"instance",
")",
"instance",
"=",
"translate",
"(",
"instance",
",",
"instance",
")",
"if",
"not",
"instance",
":",
"continue",
"# translate_fn elected to skip it",
"if",
"not",
"dummy_run",
":",
"dst_session",
".",
"add",
"(",
"instance",
")",
"# new PK will be created when session is flushed",
"else",
":",
"# Our table has either parents or children. We therefore make",
"# a copy and place the COPY in the destination session. If",
"# this object may be a parent, we maintain a log (in objmap)",
"# of the old-to-new mapping. If this object is a child, we",
"# re-assign its relationships based on the old-to-new mapping",
"# (since we will have processed the parent table first, having",
"# carefully ordered them in advance).",
"oldobj",
"=",
"instance",
"# rename for clarity",
"newobj",
"=",
"copy_sqla_object",
"(",
"oldobj",
",",
"omit_pk",
"=",
"wipe_pk",
",",
"omit_fk",
"=",
"True",
",",
"omit_attrs",
"=",
"missing_attrs",
",",
"debug",
"=",
"False",
")",
"rewrite_relationships",
"(",
"oldobj",
",",
"newobj",
",",
"objmap",
",",
"debug",
"=",
"False",
",",
"skip_table_names",
"=",
"skip_table_names",
")",
"newobj",
"=",
"translate",
"(",
"oldobj",
",",
"newobj",
")",
"if",
"not",
"newobj",
":",
"continue",
"# translate_fn elected to skip it",
"if",
"not",
"dummy_run",
":",
"dst_session",
".",
"add",
"(",
"newobj",
")",
"# new PK will be created when session is flushed",
"if",
"tdc",
".",
"is_parent",
":",
"objmap",
"[",
"oldobj",
"]",
"=",
"newobj",
"# for its children's benefit",
"if",
"flush_per_record",
":",
"flush",
"(",
")",
"if",
"flush_per_table",
":",
"flush",
"(",
")",
"flush",
"(",
")",
"if",
"commit_at_end",
":",
"log",
".",
"debug",
"(",
"\"Committing...\"",
")",
"dst_session",
".",
"commit",
"(",
")",
"log",
".",
"info",
"(",
"\"merge_db(): finished\"",
")"
] | Copies an entire database as far as it is described by ``metadata`` and
``base_class``, from SQLAlchemy ORM session ``src_session`` to
``dst_session``, and in the process:
- creates new primary keys at the destination, or raises an error if it
doesn't know how (typically something like: ``Field 'name' doesn't have a
default value``)
- maintains relationships, or raises an error if it doesn't know how
Basic method:
- Examines the metadata for the SQLAlchemy ORM base class you provide.
- Assumes that the tables exist (in the destination).
- For each table/ORM class found in the metadata:
- Queries (via the ORM) from the source.
- For each ORM instance retrieved:
- Writes information to the destination SQLAlchemy session.
- If that ORM object has relationships, process them too.
If a table is missing in the source, then that's OK if and only if
``allow_missing_src_tables`` is set. (Similarly with columns and
``allow_missing_src_columns``; we ask the ORM to perform a partial load,
of a subset of attributes only.)
Args:
base_class:
your ORM base class, e.g. from ``Base = declarative_base()``
src_engine:
SQLALchemy :class:`Engine` for the source database
dst_session:
SQLAlchemy :class:`Session` for the destination database
allow_missing_src_tables:
proceed if tables are missing from the source (allowing you to
import from older, incomplete databases)
allow_missing_src_columns:
proceed if columns are missing from the source (allowing you to
import from older, incomplete databases)
translate_fn:
optional function called with each instance, so you can modify
instances in the pipeline. Signature:
.. code-block:: python
def my_translate_fn(trcon: TranslationContext) -> None:
# We can modify trcon.newobj, or replace it (including
# setting trcon.newobj = None to omit this object).
pass
skip_tables:
tables to skip (specified as a list of :class:`TableIdentity`)
only_tables:
tables to restrict the processor to (specified as a list of
:class:`TableIdentity`)
tables_to_keep_pks_for:
tables for which PKs are guaranteed to be safe to insert into the
destination database, without modification (specified as a list of
:class:`TableIdentity`)
extra_table_dependencies:
optional list of :class:`TableDependency` objects (q.v.)
dummy_run:
don't alter the destination database
info_only:
show info, then stop
report_every:
provide a progress report every *n* records
flush_per_table:
flush the session after every table (reasonable)
flush_per_record:
flush the session after every instance (AVOID this if tables may
refer to themselves)
commit_with_flush:
``COMMIT`` with each flush?
commit_at_end:
``COMMIT`` when finished?
prevent_eager_load:
disable any eager loading (use lazy loading instead)
trcon_info:
additional dictionary passed to ``TranslationContext.info``
(see :class:`.TranslationContext`) | [
"Copies",
"an",
"entire",
"database",
"as",
"far",
"as",
"it",
"is",
"described",
"by",
"metadata",
"and",
"base_class",
"from",
"SQLAlchemy",
"ORM",
"session",
"src_session",
"to",
"dst_session",
"and",
"in",
"the",
"process",
":"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L541-L945 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | TableDependency.set_metadata | def set_metadata(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables.
"""
self._parent.set_metadata(metadata)
self._child.set_metadata(metadata) | python | def set_metadata(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables.
"""
self._parent.set_metadata(metadata)
self._child.set_metadata(metadata) | [
"def",
"set_metadata",
"(",
"self",
",",
"metadata",
":",
"MetaData",
")",
"->",
"None",
":",
"self",
".",
"_parent",
".",
"set_metadata",
"(",
"metadata",
")",
"self",
".",
"_child",
".",
"set_metadata",
"(",
"metadata",
")"
] | Sets the metadata for the parent and child tables. | [
"Sets",
"the",
"metadata",
"for",
"the",
"parent",
"and",
"child",
"tables",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L156-L161 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | TableDependency.set_metadata_if_none | def set_metadata_if_none(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables, unless they were
set already.
"""
self._parent.set_metadata_if_none(metadata)
self._child.set_metadata_if_none(metadata) | python | def set_metadata_if_none(self, metadata: MetaData) -> None:
"""
Sets the metadata for the parent and child tables, unless they were
set already.
"""
self._parent.set_metadata_if_none(metadata)
self._child.set_metadata_if_none(metadata) | [
"def",
"set_metadata_if_none",
"(",
"self",
",",
"metadata",
":",
"MetaData",
")",
"->",
"None",
":",
"self",
".",
"_parent",
".",
"set_metadata_if_none",
"(",
"metadata",
")",
"self",
".",
"_child",
".",
"set_metadata_if_none",
"(",
"metadata",
")"
] | Sets the metadata for the parent and child tables, unless they were
set already. | [
"Sets",
"the",
"metadata",
"for",
"the",
"parent",
"and",
"child",
"tables",
"unless",
"they",
"were",
"set",
"already",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L163-L169 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | TableDependencyClassification.set_circular | def set_circular(self, circular: bool, chain: List[Table] = None) -> None:
"""
Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain
"""
self.circular = circular
self.circular_chain = chain or [] | python | def set_circular(self, circular: bool, chain: List[Table] = None) -> None:
"""
Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain
"""
self.circular = circular
self.circular_chain = chain or [] | [
"def",
"set_circular",
"(",
"self",
",",
"circular",
":",
"bool",
",",
"chain",
":",
"List",
"[",
"Table",
"]",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"circular",
"=",
"circular",
"self",
".",
"circular_chain",
"=",
"chain",
"or",
"[",
"]"
] | Mark this table as circular (or not).
Args:
circular: is it circular?
chain: if it's circular, this should be the list of tables
participating in the circular chain | [
"Mark",
"this",
"table",
"as",
"circular",
"(",
"or",
"not",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L325-L335 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/merge_db.py | TableDependencyClassification.description | def description(self) -> str:
"""
Short description.
"""
if self.is_parent and self.is_child:
desc = "parent+child"
elif self.is_parent:
desc = "parent"
elif self.is_child:
desc = "child"
else:
desc = "standalone"
if self.circular:
desc += "+CIRCULAR({})".format(self.circular_description)
return desc | python | def description(self) -> str:
"""
Short description.
"""
if self.is_parent and self.is_child:
desc = "parent+child"
elif self.is_parent:
desc = "parent"
elif self.is_child:
desc = "child"
else:
desc = "standalone"
if self.circular:
desc += "+CIRCULAR({})".format(self.circular_description)
return desc | [
"def",
"description",
"(",
"self",
")",
"->",
"str",
":",
"if",
"self",
".",
"is_parent",
"and",
"self",
".",
"is_child",
":",
"desc",
"=",
"\"parent+child\"",
"elif",
"self",
".",
"is_parent",
":",
"desc",
"=",
"\"parent\"",
"elif",
"self",
".",
"is_child",
":",
"desc",
"=",
"\"child\"",
"else",
":",
"desc",
"=",
"\"standalone\"",
"if",
"self",
".",
"circular",
":",
"desc",
"+=",
"\"+CIRCULAR({})\"",
".",
"format",
"(",
"self",
".",
"circular_description",
")",
"return",
"desc"
] | Short description. | [
"Short",
"description",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/merge_db.py#L345-L359 |
RudolfCardinal/pythonlib | cardinal_pythonlib/excel.py | excel_to_bytes | def excel_to_bytes(wb: Workbook) -> bytes:
"""
Obtain a binary version of an :class:`openpyxl.Workbook` representation of
an Excel file.
"""
memfile = io.BytesIO()
wb.save(memfile)
return memfile.getvalue() | python | def excel_to_bytes(wb: Workbook) -> bytes:
"""
Obtain a binary version of an :class:`openpyxl.Workbook` representation of
an Excel file.
"""
memfile = io.BytesIO()
wb.save(memfile)
return memfile.getvalue() | [
"def",
"excel_to_bytes",
"(",
"wb",
":",
"Workbook",
")",
"->",
"bytes",
":",
"memfile",
"=",
"io",
".",
"BytesIO",
"(",
")",
"wb",
".",
"save",
"(",
"memfile",
")",
"return",
"memfile",
".",
"getvalue",
"(",
")"
] | Obtain a binary version of an :class:`openpyxl.Workbook` representation of
an Excel file. | [
"Obtain",
"a",
"binary",
"version",
"of",
"an",
":",
"class",
":",
"openpyxl",
".",
"Workbook",
"representation",
"of",
"an",
"Excel",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/excel.py#L35-L42 |
davenquinn/Attitude | attitude/coordinates/__init__.py | spherical | def spherical(coordinates):
"""No error is propagated"""
c = coordinates
r = N.linalg.norm(c,axis=0)
theta = N.arccos(c[2]/r)
phi = N.arctan2(c[1],c[0])
return N.column_stack((r,theta,phi)) | python | def spherical(coordinates):
"""No error is propagated"""
c = coordinates
r = N.linalg.norm(c,axis=0)
theta = N.arccos(c[2]/r)
phi = N.arctan2(c[1],c[0])
return N.column_stack((r,theta,phi)) | [
"def",
"spherical",
"(",
"coordinates",
")",
":",
"c",
"=",
"coordinates",
"r",
"=",
"N",
".",
"linalg",
".",
"norm",
"(",
"c",
",",
"axis",
"=",
"0",
")",
"theta",
"=",
"N",
".",
"arccos",
"(",
"c",
"[",
"2",
"]",
"/",
"r",
")",
"phi",
"=",
"N",
".",
"arctan2",
"(",
"c",
"[",
"1",
"]",
",",
"c",
"[",
"0",
"]",
")",
"return",
"N",
".",
"column_stack",
"(",
"(",
"r",
",",
"theta",
",",
"phi",
")",
")"
] | No error is propagated | [
"No",
"error",
"is",
"propagated"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/coordinates/__init__.py#L3-L9 |
davenquinn/Attitude | attitude/coordinates/__init__.py | centered | def centered(coordinates):
"""
Centers coordinate distribution with respect to its
mean on all three axes. This is used as the input to
the regression model, so it can be converted easily
into radial coordinates.
"""
coordinates = N.array(coordinates)
means = N.mean(coordinates,axis=0)
return coordinates - means | python | def centered(coordinates):
"""
Centers coordinate distribution with respect to its
mean on all three axes. This is used as the input to
the regression model, so it can be converted easily
into radial coordinates.
"""
coordinates = N.array(coordinates)
means = N.mean(coordinates,axis=0)
return coordinates - means | [
"def",
"centered",
"(",
"coordinates",
")",
":",
"coordinates",
"=",
"N",
".",
"array",
"(",
"coordinates",
")",
"means",
"=",
"N",
".",
"mean",
"(",
"coordinates",
",",
"axis",
"=",
"0",
")",
"return",
"coordinates",
"-",
"means"
] | Centers coordinate distribution with respect to its
mean on all three axes. This is used as the input to
the regression model, so it can be converted easily
into radial coordinates. | [
"Centers",
"coordinate",
"distribution",
"with",
"respect",
"to",
"its",
"mean",
"on",
"all",
"three",
"axes",
".",
"This",
"is",
"used",
"as",
"the",
"input",
"to",
"the",
"regression",
"model",
"so",
"it",
"can",
"be",
"converted",
"easily",
"into",
"radial",
"coordinates",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/coordinates/__init__.py#L19-L28 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | add_http_headers_for_attachment | def add_http_headers_for_attachment(response: HttpResponse,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = False,
as_inline: bool = False,
content_length: int = None) -> None:
"""
Add HTTP headers to a Django response class object.
Args:
response: ``HttpResponse`` instance
offered_filename: filename that the client browser will suggest
content_type: HTTP content type
as_attachment: if True, browsers will generally save to disk.
If False, they may display it inline.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html
as_inline: attempt to force inline (only if not as_attachment)
content_length: HTTP content length
"""
if offered_filename is None:
offered_filename = ''
if content_type is None:
content_type = 'application/force-download'
response['Content-Type'] = content_type
if as_attachment:
prefix = 'attachment; '
elif as_inline:
prefix = 'inline; '
else:
prefix = ''
fname = 'filename=%s' % smart_str(offered_filename)
response['Content-Disposition'] = prefix + fname
if content_length is not None:
response['Content-Length'] = content_length | python | def add_http_headers_for_attachment(response: HttpResponse,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = False,
as_inline: bool = False,
content_length: int = None) -> None:
"""
Add HTTP headers to a Django response class object.
Args:
response: ``HttpResponse`` instance
offered_filename: filename that the client browser will suggest
content_type: HTTP content type
as_attachment: if True, browsers will generally save to disk.
If False, they may display it inline.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html
as_inline: attempt to force inline (only if not as_attachment)
content_length: HTTP content length
"""
if offered_filename is None:
offered_filename = ''
if content_type is None:
content_type = 'application/force-download'
response['Content-Type'] = content_type
if as_attachment:
prefix = 'attachment; '
elif as_inline:
prefix = 'inline; '
else:
prefix = ''
fname = 'filename=%s' % smart_str(offered_filename)
response['Content-Disposition'] = prefix + fname
if content_length is not None:
response['Content-Length'] = content_length | [
"def",
"add_http_headers_for_attachment",
"(",
"response",
":",
"HttpResponse",
",",
"offered_filename",
":",
"str",
"=",
"None",
",",
"content_type",
":",
"str",
"=",
"None",
",",
"as_attachment",
":",
"bool",
"=",
"False",
",",
"as_inline",
":",
"bool",
"=",
"False",
",",
"content_length",
":",
"int",
"=",
"None",
")",
"->",
"None",
":",
"if",
"offered_filename",
"is",
"None",
":",
"offered_filename",
"=",
"''",
"if",
"content_type",
"is",
"None",
":",
"content_type",
"=",
"'application/force-download'",
"response",
"[",
"'Content-Type'",
"]",
"=",
"content_type",
"if",
"as_attachment",
":",
"prefix",
"=",
"'attachment; '",
"elif",
"as_inline",
":",
"prefix",
"=",
"'inline; '",
"else",
":",
"prefix",
"=",
"''",
"fname",
"=",
"'filename=%s'",
"%",
"smart_str",
"(",
"offered_filename",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"prefix",
"+",
"fname",
"if",
"content_length",
"is",
"not",
"None",
":",
"response",
"[",
"'Content-Length'",
"]",
"=",
"content_length"
] | Add HTTP headers to a Django response class object.
Args:
response: ``HttpResponse`` instance
offered_filename: filename that the client browser will suggest
content_type: HTTP content type
as_attachment: if True, browsers will generally save to disk.
If False, they may display it inline.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html
as_inline: attempt to force inline (only if not as_attachment)
content_length: HTTP content length | [
"Add",
"HTTP",
"headers",
"to",
"a",
"Django",
"response",
"class",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L58-L93 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | serve_file | def serve_file(path_to_file: str,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = False,
as_inline: bool = False) -> HttpResponseBase:
"""
Serve up a file from disk.
Two methods (chosen by ``settings.XSENDFILE``):
(a) serve directly
(b) serve by asking the web server to do so via the X-SendFile directive.
"""
# http://stackoverflow.com/questions/1156246/having-django-serve-downloadable-files # noqa
# https://docs.djangoproject.com/en/dev/ref/request-response/#telling-the-browser-to-treat-the-response-as-a-file-attachment # noqa
# https://djangosnippets.org/snippets/365/
if offered_filename is None:
offered_filename = os.path.basename(path_to_file) or ''
if settings.XSENDFILE:
response = HttpResponse()
response['X-Sendfile'] = smart_str(path_to_file)
content_length = os.path.getsize(path_to_file)
else:
response = FileResponse(open(path_to_file, mode='rb'))
content_length = None
add_http_headers_for_attachment(response,
offered_filename=offered_filename,
content_type=content_type,
as_attachment=as_attachment,
as_inline=as_inline,
content_length=content_length)
return response | python | def serve_file(path_to_file: str,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = False,
as_inline: bool = False) -> HttpResponseBase:
"""
Serve up a file from disk.
Two methods (chosen by ``settings.XSENDFILE``):
(a) serve directly
(b) serve by asking the web server to do so via the X-SendFile directive.
"""
# http://stackoverflow.com/questions/1156246/having-django-serve-downloadable-files # noqa
# https://docs.djangoproject.com/en/dev/ref/request-response/#telling-the-browser-to-treat-the-response-as-a-file-attachment # noqa
# https://djangosnippets.org/snippets/365/
if offered_filename is None:
offered_filename = os.path.basename(path_to_file) or ''
if settings.XSENDFILE:
response = HttpResponse()
response['X-Sendfile'] = smart_str(path_to_file)
content_length = os.path.getsize(path_to_file)
else:
response = FileResponse(open(path_to_file, mode='rb'))
content_length = None
add_http_headers_for_attachment(response,
offered_filename=offered_filename,
content_type=content_type,
as_attachment=as_attachment,
as_inline=as_inline,
content_length=content_length)
return response | [
"def",
"serve_file",
"(",
"path_to_file",
":",
"str",
",",
"offered_filename",
":",
"str",
"=",
"None",
",",
"content_type",
":",
"str",
"=",
"None",
",",
"as_attachment",
":",
"bool",
"=",
"False",
",",
"as_inline",
":",
"bool",
"=",
"False",
")",
"->",
"HttpResponseBase",
":",
"# http://stackoverflow.com/questions/1156246/having-django-serve-downloadable-files # noqa",
"# https://docs.djangoproject.com/en/dev/ref/request-response/#telling-the-browser-to-treat-the-response-as-a-file-attachment # noqa",
"# https://djangosnippets.org/snippets/365/",
"if",
"offered_filename",
"is",
"None",
":",
"offered_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path_to_file",
")",
"or",
"''",
"if",
"settings",
".",
"XSENDFILE",
":",
"response",
"=",
"HttpResponse",
"(",
")",
"response",
"[",
"'X-Sendfile'",
"]",
"=",
"smart_str",
"(",
"path_to_file",
")",
"content_length",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"path_to_file",
")",
"else",
":",
"response",
"=",
"FileResponse",
"(",
"open",
"(",
"path_to_file",
",",
"mode",
"=",
"'rb'",
")",
")",
"content_length",
"=",
"None",
"add_http_headers_for_attachment",
"(",
"response",
",",
"offered_filename",
"=",
"offered_filename",
",",
"content_type",
"=",
"content_type",
",",
"as_attachment",
"=",
"as_attachment",
",",
"as_inline",
"=",
"as_inline",
",",
"content_length",
"=",
"content_length",
")",
"return",
"response"
] | Serve up a file from disk.
Two methods (chosen by ``settings.XSENDFILE``):
(a) serve directly
(b) serve by asking the web server to do so via the X-SendFile directive. | [
"Serve",
"up",
"a",
"file",
"from",
"disk",
".",
"Two",
"methods",
"(",
"chosen",
"by",
"settings",
".",
"XSENDFILE",
")",
":",
"(",
"a",
")",
"serve",
"directly",
"(",
"b",
")",
"serve",
"by",
"asking",
"the",
"web",
"server",
"to",
"do",
"so",
"via",
"the",
"X",
"-",
"SendFile",
"directive",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L96-L125 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | serve_buffer | def serve_buffer(data: bytes,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = True,
as_inline: bool = False) -> HttpResponse:
"""
Serve up binary data from a buffer.
Options as for ``serve_file()``.
"""
response = HttpResponse(data)
add_http_headers_for_attachment(response,
offered_filename=offered_filename,
content_type=content_type,
as_attachment=as_attachment,
as_inline=as_inline,
content_length=len(data))
return response | python | def serve_buffer(data: bytes,
offered_filename: str = None,
content_type: str = None,
as_attachment: bool = True,
as_inline: bool = False) -> HttpResponse:
"""
Serve up binary data from a buffer.
Options as for ``serve_file()``.
"""
response = HttpResponse(data)
add_http_headers_for_attachment(response,
offered_filename=offered_filename,
content_type=content_type,
as_attachment=as_attachment,
as_inline=as_inline,
content_length=len(data))
return response | [
"def",
"serve_buffer",
"(",
"data",
":",
"bytes",
",",
"offered_filename",
":",
"str",
"=",
"None",
",",
"content_type",
":",
"str",
"=",
"None",
",",
"as_attachment",
":",
"bool",
"=",
"True",
",",
"as_inline",
":",
"bool",
"=",
"False",
")",
"->",
"HttpResponse",
":",
"response",
"=",
"HttpResponse",
"(",
"data",
")",
"add_http_headers_for_attachment",
"(",
"response",
",",
"offered_filename",
"=",
"offered_filename",
",",
"content_type",
"=",
"content_type",
",",
"as_attachment",
"=",
"as_attachment",
",",
"as_inline",
"=",
"as_inline",
",",
"content_length",
"=",
"len",
"(",
"data",
")",
")",
"return",
"response"
] | Serve up binary data from a buffer.
Options as for ``serve_file()``. | [
"Serve",
"up",
"binary",
"data",
"from",
"a",
"buffer",
".",
"Options",
"as",
"for",
"serve_file",
"()",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L132-L148 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | add_download_filename | def add_download_filename(response: HttpResponse, filename: str) -> None:
"""
Adds a ``Content-Disposition`` header to the HTTP response to say that
there is an attachment with the specified filename.
"""
# https://docs.djangoproject.com/en/1.9/howto/outputting-csv/
add_http_headers_for_attachment(response)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
filename) | python | def add_download_filename(response: HttpResponse, filename: str) -> None:
"""
Adds a ``Content-Disposition`` header to the HTTP response to say that
there is an attachment with the specified filename.
"""
# https://docs.djangoproject.com/en/1.9/howto/outputting-csv/
add_http_headers_for_attachment(response)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
filename) | [
"def",
"add_download_filename",
"(",
"response",
":",
"HttpResponse",
",",
"filename",
":",
"str",
")",
"->",
"None",
":",
"# https://docs.djangoproject.com/en/1.9/howto/outputting-csv/",
"add_http_headers_for_attachment",
"(",
"response",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=\"{}\"'",
".",
"format",
"(",
"filename",
")"
] | Adds a ``Content-Disposition`` header to the HTTP response to say that
there is an attachment with the specified filename. | [
"Adds",
"a",
"Content",
"-",
"Disposition",
"header",
"to",
"the",
"HTTP",
"response",
"to",
"say",
"that",
"there",
"is",
"an",
"attachment",
"with",
"the",
"specified",
"filename",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L155-L163 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | file_response | def file_response(data: Union[bytes, str], # HttpResponse encodes str if req'd
content_type: str,
filename: str) -> HttpResponse:
"""
Returns an ``HttpResponse`` with an attachment containing the specified
data with the specified filename as an attachment.
"""
response = HttpResponse(data, content_type=content_type)
add_download_filename(response, filename)
return response | python | def file_response(data: Union[bytes, str], # HttpResponse encodes str if req'd
content_type: str,
filename: str) -> HttpResponse:
"""
Returns an ``HttpResponse`` with an attachment containing the specified
data with the specified filename as an attachment.
"""
response = HttpResponse(data, content_type=content_type)
add_download_filename(response, filename)
return response | [
"def",
"file_response",
"(",
"data",
":",
"Union",
"[",
"bytes",
",",
"str",
"]",
",",
"# HttpResponse encodes str if req'd",
"content_type",
":",
"str",
",",
"filename",
":",
"str",
")",
"->",
"HttpResponse",
":",
"response",
"=",
"HttpResponse",
"(",
"data",
",",
"content_type",
"=",
"content_type",
")",
"add_download_filename",
"(",
"response",
",",
"filename",
")",
"return",
"response"
] | Returns an ``HttpResponse`` with an attachment containing the specified
data with the specified filename as an attachment. | [
"Returns",
"an",
"HttpResponse",
"with",
"an",
"attachment",
"containing",
"the",
"specified",
"data",
"with",
"the",
"specified",
"filename",
"as",
"an",
"attachment",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L166-L175 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | serve_concatenated_pdf_from_disk | def serve_concatenated_pdf_from_disk(
filenames: Iterable[str],
offered_filename: str = "crate_download.pdf",
**kwargs) -> HttpResponse:
"""
Concatenates PDFs from disk and serves them.
"""
pdf = get_concatenated_pdf_from_disk(filenames, **kwargs)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | python | def serve_concatenated_pdf_from_disk(
filenames: Iterable[str],
offered_filename: str = "crate_download.pdf",
**kwargs) -> HttpResponse:
"""
Concatenates PDFs from disk and serves them.
"""
pdf = get_concatenated_pdf_from_disk(filenames, **kwargs)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | [
"def",
"serve_concatenated_pdf_from_disk",
"(",
"filenames",
":",
"Iterable",
"[",
"str",
"]",
",",
"offered_filename",
":",
"str",
"=",
"\"crate_download.pdf\"",
",",
"*",
"*",
"kwargs",
")",
"->",
"HttpResponse",
":",
"pdf",
"=",
"get_concatenated_pdf_from_disk",
"(",
"filenames",
",",
"*",
"*",
"kwargs",
")",
"return",
"serve_buffer",
"(",
"pdf",
",",
"offered_filename",
"=",
"offered_filename",
",",
"content_type",
"=",
"MimeType",
".",
"PDF",
",",
"as_attachment",
"=",
"False",
",",
"as_inline",
"=",
"True",
")"
] | Concatenates PDFs from disk and serves them. | [
"Concatenates",
"PDFs",
"from",
"disk",
"and",
"serves",
"them",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L182-L194 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | serve_pdf_from_html | def serve_pdf_from_html(html: str,
offered_filename: str = "test.pdf",
**kwargs) -> HttpResponse:
"""
Same args as ``pdf_from_html()``.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish.
"""
pdf = get_pdf_from_html(html, **kwargs)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | python | def serve_pdf_from_html(html: str,
offered_filename: str = "test.pdf",
**kwargs) -> HttpResponse:
"""
Same args as ``pdf_from_html()``.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish.
"""
pdf = get_pdf_from_html(html, **kwargs)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | [
"def",
"serve_pdf_from_html",
"(",
"html",
":",
"str",
",",
"offered_filename",
":",
"str",
"=",
"\"test.pdf\"",
",",
"*",
"*",
"kwargs",
")",
"->",
"HttpResponse",
":",
"pdf",
"=",
"get_pdf_from_html",
"(",
"html",
",",
"*",
"*",
"kwargs",
")",
"return",
"serve_buffer",
"(",
"pdf",
",",
"offered_filename",
"=",
"offered_filename",
",",
"content_type",
"=",
"MimeType",
".",
"PDF",
",",
"as_attachment",
"=",
"False",
",",
"as_inline",
"=",
"True",
")"
] | Same args as ``pdf_from_html()``.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish. | [
"Same",
"args",
"as",
"pdf_from_html",
"()",
".",
"WATCH",
"OUT",
":",
"may",
"not",
"apply",
"e",
".",
"g",
".",
"wkhtmltopdf",
"options",
"as",
"you",
"d",
"wish",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L197-L209 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/serve.py | serve_concatenated_pdf_from_memory | def serve_concatenated_pdf_from_memory(
pdf_plans: Iterable[PdfPlan],
start_recto: bool = True,
offered_filename: str = "crate_download.pdf") -> HttpResponse:
"""
Concatenates PDFs into memory and serves it.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish.
"""
pdf = get_concatenated_pdf_in_memory(pdf_plans, start_recto=start_recto)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | python | def serve_concatenated_pdf_from_memory(
pdf_plans: Iterable[PdfPlan],
start_recto: bool = True,
offered_filename: str = "crate_download.pdf") -> HttpResponse:
"""
Concatenates PDFs into memory and serves it.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish.
"""
pdf = get_concatenated_pdf_in_memory(pdf_plans, start_recto=start_recto)
return serve_buffer(pdf,
offered_filename=offered_filename,
content_type=MimeType.PDF,
as_attachment=False,
as_inline=True) | [
"def",
"serve_concatenated_pdf_from_memory",
"(",
"pdf_plans",
":",
"Iterable",
"[",
"PdfPlan",
"]",
",",
"start_recto",
":",
"bool",
"=",
"True",
",",
"offered_filename",
":",
"str",
"=",
"\"crate_download.pdf\"",
")",
"->",
"HttpResponse",
":",
"pdf",
"=",
"get_concatenated_pdf_in_memory",
"(",
"pdf_plans",
",",
"start_recto",
"=",
"start_recto",
")",
"return",
"serve_buffer",
"(",
"pdf",
",",
"offered_filename",
"=",
"offered_filename",
",",
"content_type",
"=",
"MimeType",
".",
"PDF",
",",
"as_attachment",
"=",
"False",
",",
"as_inline",
"=",
"True",
")"
] | Concatenates PDFs into memory and serves it.
WATCH OUT: may not apply e.g. wkhtmltopdf options as you'd wish. | [
"Concatenates",
"PDFs",
"into",
"memory",
"and",
"serves",
"it",
".",
"WATCH",
"OUT",
":",
"may",
"not",
"apply",
"e",
".",
"g",
".",
"wkhtmltopdf",
"options",
"as",
"you",
"d",
"wish",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/serve.py#L212-L225 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/files.py | auto_delete_files_on_instance_delete | def auto_delete_files_on_instance_delete(instance: Any,
fieldnames: Iterable[str]) -> None:
"""
Deletes files from filesystem when object is deleted.
"""
for fieldname in fieldnames:
filefield = getattr(instance, fieldname, None)
if filefield:
if os.path.isfile(filefield.path):
os.remove(filefield.path) | python | def auto_delete_files_on_instance_delete(instance: Any,
fieldnames: Iterable[str]) -> None:
"""
Deletes files from filesystem when object is deleted.
"""
for fieldname in fieldnames:
filefield = getattr(instance, fieldname, None)
if filefield:
if os.path.isfile(filefield.path):
os.remove(filefield.path) | [
"def",
"auto_delete_files_on_instance_delete",
"(",
"instance",
":",
"Any",
",",
"fieldnames",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"None",
":",
"for",
"fieldname",
"in",
"fieldnames",
":",
"filefield",
"=",
"getattr",
"(",
"instance",
",",
"fieldname",
",",
"None",
")",
"if",
"filefield",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filefield",
".",
"path",
")",
":",
"os",
".",
"remove",
"(",
"filefield",
".",
"path",
")"
] | Deletes files from filesystem when object is deleted. | [
"Deletes",
"files",
"from",
"filesystem",
"when",
"object",
"is",
"deleted",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/files.py#L43-L52 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/files.py | auto_delete_files_on_instance_change | def auto_delete_files_on_instance_change(
instance: Any,
fieldnames: Iterable[str],
model_class) -> None:
"""
Deletes files from filesystem when object is changed.
model_class: ``Type[Model]``
... only the type checker in Py3.5 is broken; v.s.
"""
if not instance.pk:
return # instance not yet saved in database
# noinspection PyUnresolvedReferences
try:
# noinspection PyUnresolvedReferences
old_instance = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
return # old version gone from database entirely
for fieldname in fieldnames:
old_filefield = getattr(old_instance, fieldname, None)
if not old_filefield:
continue
new_filefield = getattr(instance, fieldname, None)
if old_filefield != new_filefield:
if os.path.isfile(old_filefield.path):
os.remove(old_filefield.path) | python | def auto_delete_files_on_instance_change(
instance: Any,
fieldnames: Iterable[str],
model_class) -> None:
"""
Deletes files from filesystem when object is changed.
model_class: ``Type[Model]``
... only the type checker in Py3.5 is broken; v.s.
"""
if not instance.pk:
return # instance not yet saved in database
# noinspection PyUnresolvedReferences
try:
# noinspection PyUnresolvedReferences
old_instance = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
return # old version gone from database entirely
for fieldname in fieldnames:
old_filefield = getattr(old_instance, fieldname, None)
if not old_filefield:
continue
new_filefield = getattr(instance, fieldname, None)
if old_filefield != new_filefield:
if os.path.isfile(old_filefield.path):
os.remove(old_filefield.path) | [
"def",
"auto_delete_files_on_instance_change",
"(",
"instance",
":",
"Any",
",",
"fieldnames",
":",
"Iterable",
"[",
"str",
"]",
",",
"model_class",
")",
"->",
"None",
":",
"if",
"not",
"instance",
".",
"pk",
":",
"return",
"# instance not yet saved in database",
"# noinspection PyUnresolvedReferences",
"try",
":",
"# noinspection PyUnresolvedReferences",
"old_instance",
"=",
"model_class",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"instance",
".",
"pk",
")",
"except",
"model_class",
".",
"DoesNotExist",
":",
"return",
"# old version gone from database entirely",
"for",
"fieldname",
"in",
"fieldnames",
":",
"old_filefield",
"=",
"getattr",
"(",
"old_instance",
",",
"fieldname",
",",
"None",
")",
"if",
"not",
"old_filefield",
":",
"continue",
"new_filefield",
"=",
"getattr",
"(",
"instance",
",",
"fieldname",
",",
"None",
")",
"if",
"old_filefield",
"!=",
"new_filefield",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"old_filefield",
".",
"path",
")",
":",
"os",
".",
"remove",
"(",
"old_filefield",
".",
"path",
")"
] | Deletes files from filesystem when object is changed.
model_class: ``Type[Model]``
... only the type checker in Py3.5 is broken; v.s. | [
"Deletes",
"files",
"from",
"filesystem",
"when",
"object",
"is",
"changed",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/files.py#L73-L100 |
davenquinn/Attitude | attitude/orientation/pca.py | axis_transform | def axis_transform(pca_axes):
"""
Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane
"""
from_ = N.identity(3)
to_ = pca_axes
# Find inverse transform for forward transform
# y = M x -> M = y (x)^(-1)
# We don't need to do least-squares since
# there is a simple transformation
trans_matrix = N.linalg.lstsq(from_,to_)[0]
return trans_matrix | python | def axis_transform(pca_axes):
"""
Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane
"""
from_ = N.identity(3)
to_ = pca_axes
# Find inverse transform for forward transform
# y = M x -> M = y (x)^(-1)
# We don't need to do least-squares since
# there is a simple transformation
trans_matrix = N.linalg.lstsq(from_,to_)[0]
return trans_matrix | [
"def",
"axis_transform",
"(",
"pca_axes",
")",
":",
"from_",
"=",
"N",
".",
"identity",
"(",
"3",
")",
"to_",
"=",
"pca_axes",
"# Find inverse transform for forward transform",
"# y = M x -> M = y (x)^(-1)",
"# We don't need to do least-squares since",
"# there is a simple transformation",
"trans_matrix",
"=",
"N",
".",
"linalg",
".",
"lstsq",
"(",
"from_",
",",
"to_",
")",
"[",
"0",
"]",
"return",
"trans_matrix"
] | Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane | [
"Creates",
"an",
"affine",
"transformation",
"matrix",
"to",
"rotate",
"data",
"in",
"PCA",
"axes",
"into",
"Cartesian",
"plane"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L57-L70 |
davenquinn/Attitude | attitude/orientation/pca.py | covariance_matrix | def covariance_matrix(self):
"""
Constructs the covariance matrix of
input data from
the singular value decomposition. Note
that this is different than a covariance
matrix of residuals, which is what we want
for calculating fit errors.
Using SVD output to compute covariance matrix
X=UΣV⊤
XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤)
V is an orthogonal matrix (V⊤V=I),
covariance matrix of input data: XX⊤=UΣ2U⊤
Because the axes represent identity in the
PCA coordinate system, the PCA major axes
themselves represent an affine transformation
matrix from PCA to Cartesian space
"""
a = N.dot(self.U,self.sigma)
cv = N.dot(a,a.T)
# This yields the covariance matrix in Cartesian
# coordinates
return cv | python | def covariance_matrix(self):
"""
Constructs the covariance matrix of
input data from
the singular value decomposition. Note
that this is different than a covariance
matrix of residuals, which is what we want
for calculating fit errors.
Using SVD output to compute covariance matrix
X=UΣV⊤
XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤)
V is an orthogonal matrix (V⊤V=I),
covariance matrix of input data: XX⊤=UΣ2U⊤
Because the axes represent identity in the
PCA coordinate system, the PCA major axes
themselves represent an affine transformation
matrix from PCA to Cartesian space
"""
a = N.dot(self.U,self.sigma)
cv = N.dot(a,a.T)
# This yields the covariance matrix in Cartesian
# coordinates
return cv | [
"def",
"covariance_matrix",
"(",
"self",
")",
":",
"a",
"=",
"N",
".",
"dot",
"(",
"self",
".",
"U",
",",
"self",
".",
"sigma",
")",
"cv",
"=",
"N",
".",
"dot",
"(",
"a",
",",
"a",
".",
"T",
")",
"# This yields the covariance matrix in Cartesian",
"# coordinates",
"return",
"cv"
] | Constructs the covariance matrix of
input data from
the singular value decomposition. Note
that this is different than a covariance
matrix of residuals, which is what we want
for calculating fit errors.
Using SVD output to compute covariance matrix
X=UΣV⊤
XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤)
V is an orthogonal matrix (V⊤V=I),
covariance matrix of input data: XX⊤=UΣ2U⊤
Because the axes represent identity in the
PCA coordinate system, the PCA major axes
themselves represent an affine transformation
matrix from PCA to Cartesian space | [
"Constructs",
"the",
"covariance",
"matrix",
"of",
"input",
"data",
"from",
"the",
"singular",
"value",
"decomposition",
".",
"Note",
"that",
"this",
"is",
"different",
"than",
"a",
"covariance",
"matrix",
"of",
"residuals",
"which",
"is",
"what",
"we",
"want",
"for",
"calculating",
"fit",
"errors",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L83-L108 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.U | def U(self):
"""
Property to support lazy evaluation of residuals
"""
if self._U is None:
sinv = N.diag(1/self.singular_values)
self._U = dot(self.arr,self.V.T,sinv)
return self._U | python | def U(self):
"""
Property to support lazy evaluation of residuals
"""
if self._U is None:
sinv = N.diag(1/self.singular_values)
self._U = dot(self.arr,self.V.T,sinv)
return self._U | [
"def",
"U",
"(",
"self",
")",
":",
"if",
"self",
".",
"_U",
"is",
"None",
":",
"sinv",
"=",
"N",
".",
"diag",
"(",
"1",
"/",
"self",
".",
"singular_values",
")",
"self",
".",
"_U",
"=",
"dot",
"(",
"self",
".",
"arr",
",",
"self",
".",
"V",
".",
"T",
",",
"sinv",
")",
"return",
"self",
".",
"_U"
] | Property to support lazy evaluation of residuals | [
"Property",
"to",
"support",
"lazy",
"evaluation",
"of",
"residuals"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L240-L247 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.residuals | def residuals(self):
"""
Returns residuals of fit against all
three data axes (singular values 1, 2,
and 3). This takes the form of data along
singular axis 3 (axes 1 and 2 define the plane)
"""
_ = self.rotated()
_[:,-1] = 0
_ = N.dot(_,self.axes)
return self.arr - _ | python | def residuals(self):
"""
Returns residuals of fit against all
three data axes (singular values 1, 2,
and 3). This takes the form of data along
singular axis 3 (axes 1 and 2 define the plane)
"""
_ = self.rotated()
_[:,-1] = 0
_ = N.dot(_,self.axes)
return self.arr - _ | [
"def",
"residuals",
"(",
"self",
")",
":",
"_",
"=",
"self",
".",
"rotated",
"(",
")",
"_",
"[",
":",
",",
"-",
"1",
"]",
"=",
"0",
"_",
"=",
"N",
".",
"dot",
"(",
"_",
",",
"self",
".",
"axes",
")",
"return",
"self",
".",
"arr",
"-",
"_"
] | Returns residuals of fit against all
three data axes (singular values 1, 2,
and 3). This takes the form of data along
singular axis 3 (axes 1 and 2 define the plane) | [
"Returns",
"residuals",
"of",
"fit",
"against",
"all",
"three",
"data",
"axes",
"(",
"singular",
"values",
"1",
"2",
"and",
"3",
")",
".",
"This",
"takes",
"the",
"form",
"of",
"data",
"along",
"singular",
"axis",
"3",
"(",
"axes",
"1",
"and",
"2",
"define",
"the",
"plane",
")"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L264-L274 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.angular_error | def angular_error(self, axis_length):
"""
The angular error for an in-plane axis of
given length (either a PCA major axis or
an intermediate direction).
"""
hyp_axes = self.method(self)
return N.arctan2(hyp_axes[-1],axis_length) | python | def angular_error(self, axis_length):
"""
The angular error for an in-plane axis of
given length (either a PCA major axis or
an intermediate direction).
"""
hyp_axes = self.method(self)
return N.arctan2(hyp_axes[-1],axis_length) | [
"def",
"angular_error",
"(",
"self",
",",
"axis_length",
")",
":",
"hyp_axes",
"=",
"self",
".",
"method",
"(",
"self",
")",
"return",
"N",
".",
"arctan2",
"(",
"hyp_axes",
"[",
"-",
"1",
"]",
",",
"axis_length",
")"
] | The angular error for an in-plane axis of
given length (either a PCA major axis or
an intermediate direction). | [
"The",
"angular",
"error",
"for",
"an",
"in",
"-",
"plane",
"axis",
"of",
"given",
"length",
"(",
"either",
"a",
"PCA",
"major",
"axis",
"or",
"an",
"intermediate",
"direction",
")",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L288-L295 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.angular_errors | def angular_errors(self, degrees=True):
"""
Minimum and maximum angular errors
corresponding to 1st and 2nd axes
of PCA distribution.
"""
hyp_axes = self.method(self)
v = angular_errors(hyp_axes)
if degrees:
v = N.degrees(v)
return tuple(v) | python | def angular_errors(self, degrees=True):
"""
Minimum and maximum angular errors
corresponding to 1st and 2nd axes
of PCA distribution.
"""
hyp_axes = self.method(self)
v = angular_errors(hyp_axes)
if degrees:
v = N.degrees(v)
return tuple(v) | [
"def",
"angular_errors",
"(",
"self",
",",
"degrees",
"=",
"True",
")",
":",
"hyp_axes",
"=",
"self",
".",
"method",
"(",
"self",
")",
"v",
"=",
"angular_errors",
"(",
"hyp_axes",
")",
"if",
"degrees",
":",
"v",
"=",
"N",
".",
"degrees",
"(",
"v",
")",
"return",
"tuple",
"(",
"v",
")"
] | Minimum and maximum angular errors
corresponding to 1st and 2nd axes
of PCA distribution. | [
"Minimum",
"and",
"maximum",
"angular",
"errors",
"corresponding",
"to",
"1st",
"and",
"2nd",
"axes",
"of",
"PCA",
"distribution",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L297-L307 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation._covariance_matrix | def _covariance_matrix(self, type='noise'):
"""
Constructs the covariance matrix from PCA
residuals
"""
if type == 'sampling':
return self.sigma**2/(self.n-1)
elif type == 'noise':
return 4*self.sigma*N.var(self.rotated(), axis=0) | python | def _covariance_matrix(self, type='noise'):
"""
Constructs the covariance matrix from PCA
residuals
"""
if type == 'sampling':
return self.sigma**2/(self.n-1)
elif type == 'noise':
return 4*self.sigma*N.var(self.rotated(), axis=0) | [
"def",
"_covariance_matrix",
"(",
"self",
",",
"type",
"=",
"'noise'",
")",
":",
"if",
"type",
"==",
"'sampling'",
":",
"return",
"self",
".",
"sigma",
"**",
"2",
"/",
"(",
"self",
".",
"n",
"-",
"1",
")",
"elif",
"type",
"==",
"'noise'",
":",
"return",
"4",
"*",
"self",
".",
"sigma",
"*",
"N",
".",
"var",
"(",
"self",
".",
"rotated",
"(",
")",
",",
"axis",
"=",
"0",
")"
] | Constructs the covariance matrix from PCA
residuals | [
"Constructs",
"the",
"covariance",
"matrix",
"from",
"PCA",
"residuals"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L309-L317 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.explained_variance | def explained_variance(self):
"""
Proportion of variance that is explained by the
first two principal components (which together
represent the planar fit). Analogous to R^2 of
linear least squares.
"""
v = N.diagonal(self.covariance_matrix)
return v[0:2].sum()/v.sum() | python | def explained_variance(self):
"""
Proportion of variance that is explained by the
first two principal components (which together
represent the planar fit). Analogous to R^2 of
linear least squares.
"""
v = N.diagonal(self.covariance_matrix)
return v[0:2].sum()/v.sum() | [
"def",
"explained_variance",
"(",
"self",
")",
":",
"v",
"=",
"N",
".",
"diagonal",
"(",
"self",
".",
"covariance_matrix",
")",
"return",
"v",
"[",
"0",
":",
"2",
"]",
".",
"sum",
"(",
")",
"/",
"v",
".",
"sum",
"(",
")"
] | Proportion of variance that is explained by the
first two principal components (which together
represent the planar fit). Analogous to R^2 of
linear least squares. | [
"Proportion",
"of",
"variance",
"that",
"is",
"explained",
"by",
"the",
"first",
"two",
"principal",
"components",
"(",
"which",
"together",
"represent",
"the",
"planar",
"fit",
")",
".",
"Analogous",
"to",
"R^2",
"of",
"linear",
"least",
"squares",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L329-L337 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.strike_dip | def strike_dip(self, degrees=True):
"""
Computes strike and dip from a normal vector.
Results are usually the same as LLSQ
in strike (to a few decimal places) and close in dip.
Sometimes, dips are greater by as much as 45 degrees,
reflecting inclusion of errors in x-y plane.
"""
n = self.axes[2]
r = N.linalg.norm(n)
strike = N.degrees(N.arctan2(n[0],n[1]))-90
dip = N.degrees(N.arccos(n[2]/r))
# Since PCA errors are not pinned to the XYZ plane,
# we need to make sure our results are in the
# right quadrant.
if dip > 90:
dip = 180 - dip
strike += 180
# Proper azimuth notation
if strike < 0:
strike += 360
return strike, dip | python | def strike_dip(self, degrees=True):
"""
Computes strike and dip from a normal vector.
Results are usually the same as LLSQ
in strike (to a few decimal places) and close in dip.
Sometimes, dips are greater by as much as 45 degrees,
reflecting inclusion of errors in x-y plane.
"""
n = self.axes[2]
r = N.linalg.norm(n)
strike = N.degrees(N.arctan2(n[0],n[1]))-90
dip = N.degrees(N.arccos(n[2]/r))
# Since PCA errors are not pinned to the XYZ plane,
# we need to make sure our results are in the
# right quadrant.
if dip > 90:
dip = 180 - dip
strike += 180
# Proper azimuth notation
if strike < 0:
strike += 360
return strike, dip | [
"def",
"strike_dip",
"(",
"self",
",",
"degrees",
"=",
"True",
")",
":",
"n",
"=",
"self",
".",
"axes",
"[",
"2",
"]",
"r",
"=",
"N",
".",
"linalg",
".",
"norm",
"(",
"n",
")",
"strike",
"=",
"N",
".",
"degrees",
"(",
"N",
".",
"arctan2",
"(",
"n",
"[",
"0",
"]",
",",
"n",
"[",
"1",
"]",
")",
")",
"-",
"90",
"dip",
"=",
"N",
".",
"degrees",
"(",
"N",
".",
"arccos",
"(",
"n",
"[",
"2",
"]",
"/",
"r",
")",
")",
"# Since PCA errors are not pinned to the XYZ plane,",
"# we need to make sure our results are in the",
"# right quadrant.",
"if",
"dip",
">",
"90",
":",
"dip",
"=",
"180",
"-",
"dip",
"strike",
"+=",
"180",
"# Proper azimuth notation",
"if",
"strike",
"<",
"0",
":",
"strike",
"+=",
"360",
"return",
"strike",
",",
"dip"
] | Computes strike and dip from a normal vector.
Results are usually the same as LLSQ
in strike (to a few decimal places) and close in dip.
Sometimes, dips are greater by as much as 45 degrees,
reflecting inclusion of errors in x-y plane. | [
"Computes",
"strike",
"and",
"dip",
"from",
"a",
"normal",
"vector",
".",
"Results",
"are",
"usually",
"the",
"same",
"as",
"LLSQ",
"in",
"strike",
"(",
"to",
"a",
"few",
"decimal",
"places",
")",
"and",
"close",
"in",
"dip",
".",
"Sometimes",
"dips",
"are",
"greater",
"by",
"as",
"much",
"as",
"45",
"degrees",
"reflecting",
"inclusion",
"of",
"errors",
"in",
"x",
"-",
"y",
"plane",
"."
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L361-L385 |
davenquinn/Attitude | attitude/orientation/pca.py | PCAOrientation.as_hyperbola | def as_hyperbola(self, rotated=False):
"""
Hyperbolic error area
"""
idx = N.diag_indices(3)
_ = 1/self.covariance_matrix[idx]
d = list(_)
d[-1] *= -1
arr = N.identity(4)*-1
arr[idx] = d
hyp = conic(arr)
if rotated:
R = augment(self.axes)
hyp = hyp.transform(R)
return hyp | python | def as_hyperbola(self, rotated=False):
"""
Hyperbolic error area
"""
idx = N.diag_indices(3)
_ = 1/self.covariance_matrix[idx]
d = list(_)
d[-1] *= -1
arr = N.identity(4)*-1
arr[idx] = d
hyp = conic(arr)
if rotated:
R = augment(self.axes)
hyp = hyp.transform(R)
return hyp | [
"def",
"as_hyperbola",
"(",
"self",
",",
"rotated",
"=",
"False",
")",
":",
"idx",
"=",
"N",
".",
"diag_indices",
"(",
"3",
")",
"_",
"=",
"1",
"/",
"self",
".",
"covariance_matrix",
"[",
"idx",
"]",
"d",
"=",
"list",
"(",
"_",
")",
"d",
"[",
"-",
"1",
"]",
"*=",
"-",
"1",
"arr",
"=",
"N",
".",
"identity",
"(",
"4",
")",
"*",
"-",
"1",
"arr",
"[",
"idx",
"]",
"=",
"d",
"hyp",
"=",
"conic",
"(",
"arr",
")",
"if",
"rotated",
":",
"R",
"=",
"augment",
"(",
"self",
".",
"axes",
")",
"hyp",
"=",
"hyp",
".",
"transform",
"(",
"R",
")",
"return",
"hyp"
] | Hyperbolic error area | [
"Hyperbolic",
"error",
"area"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/orientation/pca.py#L422-L437 |
meyersj/geotweet | geotweet/mapreduce/utils/words.py | WordExtractor.run | def run(self, line):
"""
Extract words from tweet
1. Remove non-ascii characters
2. Split line into individual words
3. Clean up puncuation characters
"""
words = []
for word in self.clean_unicode(line.lower()).split():
if word.startswith('http'):
continue
cleaned = self.clean_punctuation(word)
if len(cleaned) > 1 and cleaned not in self.stopwords:
words.append(cleaned)
return words | python | def run(self, line):
"""
Extract words from tweet
1. Remove non-ascii characters
2. Split line into individual words
3. Clean up puncuation characters
"""
words = []
for word in self.clean_unicode(line.lower()).split():
if word.startswith('http'):
continue
cleaned = self.clean_punctuation(word)
if len(cleaned) > 1 and cleaned not in self.stopwords:
words.append(cleaned)
return words | [
"def",
"run",
"(",
"self",
",",
"line",
")",
":",
"words",
"=",
"[",
"]",
"for",
"word",
"in",
"self",
".",
"clean_unicode",
"(",
"line",
".",
"lower",
"(",
")",
")",
".",
"split",
"(",
")",
":",
"if",
"word",
".",
"startswith",
"(",
"'http'",
")",
":",
"continue",
"cleaned",
"=",
"self",
".",
"clean_punctuation",
"(",
"word",
")",
"if",
"len",
"(",
"cleaned",
")",
">",
"1",
"and",
"cleaned",
"not",
"in",
"self",
".",
"stopwords",
":",
"words",
".",
"append",
"(",
"cleaned",
")",
"return",
"words"
] | Extract words from tweet
1. Remove non-ascii characters
2. Split line into individual words
3. Clean up puncuation characters | [
"Extract",
"words",
"from",
"tweet"
] | train | https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/utils/words.py#L47-L63 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_query.py | get_rows_fieldnames_from_query | def get_rows_fieldnames_from_query(
session: Union[Session, Engine, Connection],
query: Query) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:
"""
Returns results and column names from a query.
Args:
session: SQLAlchemy :class:`Session`, :class:`Engine`, or
:class:`Connection` object
query: SQLAlchemy :class:`Query`
Returns:
``(rows, fieldnames)`` where ``rows`` is the usual set of results and
``fieldnames`` are the name of the result columns/fields.
"""
# https://stackoverflow.com/questions/6455560/how-to-get-column-names-from-sqlalchemy-result-declarative-syntax # noqa
# No! Returns e.g. "User" for session.Query(User)...
# fieldnames = [cd['name'] for cd in query.column_descriptions]
result = session.execute(query) # type: ResultProxy
fieldnames = result.keys()
# ... yes! Comes out as "_table_field", which is how SQLAlchemy SELECTs
# things.
rows = result.fetchall()
return rows, fieldnames | python | def get_rows_fieldnames_from_query(
session: Union[Session, Engine, Connection],
query: Query) -> Tuple[Sequence[Sequence[Any]], Sequence[str]]:
"""
Returns results and column names from a query.
Args:
session: SQLAlchemy :class:`Session`, :class:`Engine`, or
:class:`Connection` object
query: SQLAlchemy :class:`Query`
Returns:
``(rows, fieldnames)`` where ``rows`` is the usual set of results and
``fieldnames`` are the name of the result columns/fields.
"""
# https://stackoverflow.com/questions/6455560/how-to-get-column-names-from-sqlalchemy-result-declarative-syntax # noqa
# No! Returns e.g. "User" for session.Query(User)...
# fieldnames = [cd['name'] for cd in query.column_descriptions]
result = session.execute(query) # type: ResultProxy
fieldnames = result.keys()
# ... yes! Comes out as "_table_field", which is how SQLAlchemy SELECTs
# things.
rows = result.fetchall()
return rows, fieldnames | [
"def",
"get_rows_fieldnames_from_query",
"(",
"session",
":",
"Union",
"[",
"Session",
",",
"Engine",
",",
"Connection",
"]",
",",
"query",
":",
"Query",
")",
"->",
"Tuple",
"[",
"Sequence",
"[",
"Sequence",
"[",
"Any",
"]",
"]",
",",
"Sequence",
"[",
"str",
"]",
"]",
":",
"# https://stackoverflow.com/questions/6455560/how-to-get-column-names-from-sqlalchemy-result-declarative-syntax # noqa",
"# No! Returns e.g. \"User\" for session.Query(User)...",
"# fieldnames = [cd['name'] for cd in query.column_descriptions]",
"result",
"=",
"session",
".",
"execute",
"(",
"query",
")",
"# type: ResultProxy",
"fieldnames",
"=",
"result",
".",
"keys",
"(",
")",
"# ... yes! Comes out as \"_table_field\", which is how SQLAlchemy SELECTs",
"# things.",
"rows",
"=",
"result",
".",
"fetchall",
"(",
")",
"return",
"rows",
",",
"fieldnames"
] | Returns results and column names from a query.
Args:
session: SQLAlchemy :class:`Session`, :class:`Engine`, or
:class:`Connection` object
query: SQLAlchemy :class:`Query`
Returns:
``(rows, fieldnames)`` where ``rows`` is the usual set of results and
``fieldnames`` are the name of the result columns/fields. | [
"Returns",
"results",
"and",
"column",
"names",
"from",
"a",
"query",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L50-L74 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_query.py | bool_from_exists_clause | def bool_from_exists_clause(session: Session,
exists_clause: Exists) -> bool:
"""
Database dialects are not consistent in how ``EXISTS`` clauses can be
converted to a boolean answer. This function manages the inconsistencies.
See:
- https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists
- http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists
Specifically, we want this:
*SQL Server*
.. code-block:: sql
SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or None (no rows)
-- ... fine for SQL Server, but invalid for MySQL (no FROM clause)
*Others, including MySQL*
.. code-block:: sql
SELECT EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or 0
-- ... fine for MySQL, but invalid syntax for SQL Server
""" # noqa
if session.get_bind().dialect.name == SqlaDialectName.MSSQL:
# SQL Server
result = session.query(literal(True)).filter(exists_clause).scalar()
else:
# MySQL, etc.
result = session.query(exists_clause).scalar()
return bool(result) | python | def bool_from_exists_clause(session: Session,
exists_clause: Exists) -> bool:
"""
Database dialects are not consistent in how ``EXISTS`` clauses can be
converted to a boolean answer. This function manages the inconsistencies.
See:
- https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists
- http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists
Specifically, we want this:
*SQL Server*
.. code-block:: sql
SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or None (no rows)
-- ... fine for SQL Server, but invalid for MySQL (no FROM clause)
*Others, including MySQL*
.. code-block:: sql
SELECT EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or 0
-- ... fine for MySQL, but invalid syntax for SQL Server
""" # noqa
if session.get_bind().dialect.name == SqlaDialectName.MSSQL:
# SQL Server
result = session.query(literal(True)).filter(exists_clause).scalar()
else:
# MySQL, etc.
result = session.query(exists_clause).scalar()
return bool(result) | [
"def",
"bool_from_exists_clause",
"(",
"session",
":",
"Session",
",",
"exists_clause",
":",
"Exists",
")",
"->",
"bool",
":",
"# noqa",
"if",
"session",
".",
"get_bind",
"(",
")",
".",
"dialect",
".",
"name",
"==",
"SqlaDialectName",
".",
"MSSQL",
":",
"# SQL Server",
"result",
"=",
"session",
".",
"query",
"(",
"literal",
"(",
"True",
")",
")",
".",
"filter",
"(",
"exists_clause",
")",
".",
"scalar",
"(",
")",
"else",
":",
"# MySQL, etc.",
"result",
"=",
"session",
".",
"query",
"(",
"exists_clause",
")",
".",
"scalar",
"(",
")",
"return",
"bool",
"(",
"result",
")"
] | Database dialects are not consistent in how ``EXISTS`` clauses can be
converted to a boolean answer. This function manages the inconsistencies.
See:
- https://bitbucket.org/zzzeek/sqlalchemy/issues/3212/misleading-documentation-for-queryexists
- http://docs.sqlalchemy.org/en/latest/orm/query.html#sqlalchemy.orm.query.Query.exists
Specifically, we want this:
*SQL Server*
.. code-block:: sql
SELECT 1 WHERE EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or None (no rows)
-- ... fine for SQL Server, but invalid for MySQL (no FROM clause)
*Others, including MySQL*
.. code-block:: sql
SELECT EXISTS (SELECT 1 FROM table WHERE ...)
-- ... giving 1 or 0
-- ... fine for MySQL, but invalid syntax for SQL Server | [
"Database",
"dialects",
"are",
"not",
"consistent",
"in",
"how",
"EXISTS",
"clauses",
"can",
"be",
"converted",
"to",
"a",
"boolean",
"answer",
".",
"This",
"function",
"manages",
"the",
"inconsistencies",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L81-L117 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_query.py | exists_orm | def exists_orm(session: Session,
ormclass: DeclarativeMeta,
*criteria: Any) -> bool:
"""
Detects whether a database record exists for the specified ``ormclass``
and ``criteria``.
Example usage:
.. code-block:: python
bool_exists = exists_orm(session, MyClass, MyClass.myfield == value)
"""
# http://docs.sqlalchemy.org/en/latest/orm/query.html
q = session.query(ormclass)
for criterion in criteria:
q = q.filter(criterion)
exists_clause = q.exists()
return bool_from_exists_clause(session=session,
exists_clause=exists_clause) | python | def exists_orm(session: Session,
ormclass: DeclarativeMeta,
*criteria: Any) -> bool:
"""
Detects whether a database record exists for the specified ``ormclass``
and ``criteria``.
Example usage:
.. code-block:: python
bool_exists = exists_orm(session, MyClass, MyClass.myfield == value)
"""
# http://docs.sqlalchemy.org/en/latest/orm/query.html
q = session.query(ormclass)
for criterion in criteria:
q = q.filter(criterion)
exists_clause = q.exists()
return bool_from_exists_clause(session=session,
exists_clause=exists_clause) | [
"def",
"exists_orm",
"(",
"session",
":",
"Session",
",",
"ormclass",
":",
"DeclarativeMeta",
",",
"*",
"criteria",
":",
"Any",
")",
"->",
"bool",
":",
"# http://docs.sqlalchemy.org/en/latest/orm/query.html",
"q",
"=",
"session",
".",
"query",
"(",
"ormclass",
")",
"for",
"criterion",
"in",
"criteria",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"criterion",
")",
"exists_clause",
"=",
"q",
".",
"exists",
"(",
")",
"return",
"bool_from_exists_clause",
"(",
"session",
"=",
"session",
",",
"exists_clause",
"=",
"exists_clause",
")"
] | Detects whether a database record exists for the specified ``ormclass``
and ``criteria``.
Example usage:
.. code-block:: python
bool_exists = exists_orm(session, MyClass, MyClass.myfield == value) | [
"Detects",
"whether",
"a",
"database",
"record",
"exists",
"for",
"the",
"specified",
"ormclass",
"and",
"criteria",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L120-L139 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_query.py | get_or_create | def get_or_create(session: Session,
model: DeclarativeMeta,
defaults: Dict[str, Any] = None,
**kwargs: Any) -> Tuple[Any, bool]:
"""
Fetches an ORM object from the database, or creates one if none existed.
Args:
session: an SQLAlchemy :class:`Session`
model: an SQLAlchemy ORM class
defaults: default initialization arguments (in addition to relevant
filter criteria) if we have to create a new instance
kwargs: optional filter criteria
Returns:
a tuple ``(instance, newly_created)``
See http://stackoverflow.com/questions/2546207 (this function is a
composite of several suggestions).
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
params = dict((k, v) for k, v in kwargs.items()
if not isinstance(v, ClauseElement))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
return instance, True | python | def get_or_create(session: Session,
model: DeclarativeMeta,
defaults: Dict[str, Any] = None,
**kwargs: Any) -> Tuple[Any, bool]:
"""
Fetches an ORM object from the database, or creates one if none existed.
Args:
session: an SQLAlchemy :class:`Session`
model: an SQLAlchemy ORM class
defaults: default initialization arguments (in addition to relevant
filter criteria) if we have to create a new instance
kwargs: optional filter criteria
Returns:
a tuple ``(instance, newly_created)``
See http://stackoverflow.com/questions/2546207 (this function is a
composite of several suggestions).
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
params = dict((k, v) for k, v in kwargs.items()
if not isinstance(v, ClauseElement))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
return instance, True | [
"def",
"get_or_create",
"(",
"session",
":",
"Session",
",",
"model",
":",
"DeclarativeMeta",
",",
"defaults",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
":",
"Any",
")",
"->",
"Tuple",
"[",
"Any",
",",
"bool",
"]",
":",
"instance",
"=",
"session",
".",
"query",
"(",
"model",
")",
".",
"filter_by",
"(",
"*",
"*",
"kwargs",
")",
".",
"first",
"(",
")",
"if",
"instance",
":",
"return",
"instance",
",",
"False",
"else",
":",
"params",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"ClauseElement",
")",
")",
"params",
".",
"update",
"(",
"defaults",
"or",
"{",
"}",
")",
"instance",
"=",
"model",
"(",
"*",
"*",
"params",
")",
"session",
".",
"add",
"(",
"instance",
")",
"return",
"instance",
",",
"True"
] | Fetches an ORM object from the database, or creates one if none existed.
Args:
session: an SQLAlchemy :class:`Session`
model: an SQLAlchemy ORM class
defaults: default initialization arguments (in addition to relevant
filter criteria) if we have to create a new instance
kwargs: optional filter criteria
Returns:
a tuple ``(instance, newly_created)``
See http://stackoverflow.com/questions/2546207 (this function is a
composite of several suggestions). | [
"Fetches",
"an",
"ORM",
"object",
"from",
"the",
"database",
"or",
"creates",
"one",
"if",
"none",
"existed",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L146-L175 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/orm_query.py | CountStarSpecializedQuery.count_star | def count_star(self) -> int:
"""
Implements the ``COUNT(*)`` specialization.
"""
count_query = (self.statement.with_only_columns([func.count()])
.order_by(None))
return self.session.execute(count_query).scalar() | python | def count_star(self) -> int:
"""
Implements the ``COUNT(*)`` specialization.
"""
count_query = (self.statement.with_only_columns([func.count()])
.order_by(None))
return self.session.execute(count_query).scalar() | [
"def",
"count_star",
"(",
"self",
")",
"->",
"int",
":",
"count_query",
"=",
"(",
"self",
".",
"statement",
".",
"with_only_columns",
"(",
"[",
"func",
".",
"count",
"(",
")",
"]",
")",
".",
"order_by",
"(",
"None",
")",
")",
"return",
"self",
".",
"session",
".",
"execute",
"(",
"count_query",
")",
".",
"scalar",
"(",
")"
] | Implements the ``COUNT(*)`` specialization. | [
"Implements",
"the",
"COUNT",
"(",
"*",
")",
"specialization",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/orm_query.py#L202-L208 |
meyersj/geotweet | geotweet/mapreduce/utils/reader.py | FileReader.read | def read(self, src):
""" Download GeoJSON file of US counties from url (S3 bucket) """
geojson = None
if not self.is_valid_src(src):
error = "File < {0} > does not exists or does start with 'http'."
raise ValueError(error.format(src))
if not self.is_url(src):
return open(src, 'r').read().decode('latin-1').encode('utf-8')
tmp = self.get_location(src)
# if src poits to url that was already downloaded
# read from local file instead
if os.path.isfile(tmp):
with open(tmp, 'r') as f:
return f.read()
# download file and write to local filesystem before returning
response = urllib2.urlopen(src)
data = response.read().decode('latin-1').encode('utf-8')
with open(tmp, 'w') as f:
f.write(data)
return data | python | def read(self, src):
""" Download GeoJSON file of US counties from url (S3 bucket) """
geojson = None
if not self.is_valid_src(src):
error = "File < {0} > does not exists or does start with 'http'."
raise ValueError(error.format(src))
if not self.is_url(src):
return open(src, 'r').read().decode('latin-1').encode('utf-8')
tmp = self.get_location(src)
# if src poits to url that was already downloaded
# read from local file instead
if os.path.isfile(tmp):
with open(tmp, 'r') as f:
return f.read()
# download file and write to local filesystem before returning
response = urllib2.urlopen(src)
data = response.read().decode('latin-1').encode('utf-8')
with open(tmp, 'w') as f:
f.write(data)
return data | [
"def",
"read",
"(",
"self",
",",
"src",
")",
":",
"geojson",
"=",
"None",
"if",
"not",
"self",
".",
"is_valid_src",
"(",
"src",
")",
":",
"error",
"=",
"\"File < {0} > does not exists or does start with 'http'.\"",
"raise",
"ValueError",
"(",
"error",
".",
"format",
"(",
"src",
")",
")",
"if",
"not",
"self",
".",
"is_url",
"(",
"src",
")",
":",
"return",
"open",
"(",
"src",
",",
"'r'",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'latin-1'",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"tmp",
"=",
"self",
".",
"get_location",
"(",
"src",
")",
"# if src poits to url that was already downloaded",
"# read from local file instead",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"tmp",
")",
":",
"with",
"open",
"(",
"tmp",
",",
"'r'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")",
"# download file and write to local filesystem before returning",
"response",
"=",
"urllib2",
".",
"urlopen",
"(",
"src",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'latin-1'",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"with",
"open",
"(",
"tmp",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"return",
"data"
] | Download GeoJSON file of US counties from url (S3 bucket) | [
"Download",
"GeoJSON",
"file",
"of",
"US",
"counties",
"from",
"url",
"(",
"S3",
"bucket",
")"
] | train | https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/mapreduce/utils/reader.py#L32-L51 |
RudolfCardinal/pythonlib | cardinal_pythonlib/randomness.py | create_base64encoded_randomness | def create_base64encoded_randomness(num_bytes: int) -> str:
"""
Create and return ``num_bytes`` of random data.
The result is encoded in a string with URL-safe ``base64`` encoding.
Used (for example) to generate session tokens.
Which generator to use? See
https://cryptography.io/en/latest/random-numbers/.
Do NOT use these methods:
.. code-block:: python
randbytes = M2Crypto.m2.rand_bytes(num_bytes) # NO!
randbytes = Crypto.Random.get_random_bytes(num_bytes) # NO!
Instead, do this:
.. code-block:: python
randbytes = os.urandom(num_bytes) # YES
"""
randbytes = os.urandom(num_bytes) # YES
return base64.urlsafe_b64encode(randbytes).decode('ascii') | python | def create_base64encoded_randomness(num_bytes: int) -> str:
"""
Create and return ``num_bytes`` of random data.
The result is encoded in a string with URL-safe ``base64`` encoding.
Used (for example) to generate session tokens.
Which generator to use? See
https://cryptography.io/en/latest/random-numbers/.
Do NOT use these methods:
.. code-block:: python
randbytes = M2Crypto.m2.rand_bytes(num_bytes) # NO!
randbytes = Crypto.Random.get_random_bytes(num_bytes) # NO!
Instead, do this:
.. code-block:: python
randbytes = os.urandom(num_bytes) # YES
"""
randbytes = os.urandom(num_bytes) # YES
return base64.urlsafe_b64encode(randbytes).decode('ascii') | [
"def",
"create_base64encoded_randomness",
"(",
"num_bytes",
":",
"int",
")",
"->",
"str",
":",
"randbytes",
"=",
"os",
".",
"urandom",
"(",
"num_bytes",
")",
"# YES",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"randbytes",
")",
".",
"decode",
"(",
"'ascii'",
")"
] | Create and return ``num_bytes`` of random data.
The result is encoded in a string with URL-safe ``base64`` encoding.
Used (for example) to generate session tokens.
Which generator to use? See
https://cryptography.io/en/latest/random-numbers/.
Do NOT use these methods:
.. code-block:: python
randbytes = M2Crypto.m2.rand_bytes(num_bytes) # NO!
randbytes = Crypto.Random.get_random_bytes(num_bytes) # NO!
Instead, do this:
.. code-block:: python
randbytes = os.urandom(num_bytes) # YES | [
"Create",
"and",
"return",
"num_bytes",
"of",
"random",
"data",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/randomness.py#L33-L58 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/table_identity.py | TableIdentity.table | def table(self) -> Table:
"""
Returns a SQLAlchemy :class:`Table` object. This is either the
:class:`Table` object that was used for initialization, or one that
was constructed from the ``tablename`` plus the ``metadata``.
"""
if self._table is not None:
return self._table
assert self._metadata, (
"Must specify metadata (in constructor or via set_metadata()/"
"set_metadata_if_none() before you can get a Table from a "
"tablename"
)
for table in self._metadata.tables.values(): # type: Table
if table.name == self._tablename:
return table
raise ValueError("No table named {!r} is present in the "
"metadata".format(self._tablename)) | python | def table(self) -> Table:
"""
Returns a SQLAlchemy :class:`Table` object. This is either the
:class:`Table` object that was used for initialization, or one that
was constructed from the ``tablename`` plus the ``metadata``.
"""
if self._table is not None:
return self._table
assert self._metadata, (
"Must specify metadata (in constructor or via set_metadata()/"
"set_metadata_if_none() before you can get a Table from a "
"tablename"
)
for table in self._metadata.tables.values(): # type: Table
if table.name == self._tablename:
return table
raise ValueError("No table named {!r} is present in the "
"metadata".format(self._tablename)) | [
"def",
"table",
"(",
"self",
")",
"->",
"Table",
":",
"if",
"self",
".",
"_table",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_table",
"assert",
"self",
".",
"_metadata",
",",
"(",
"\"Must specify metadata (in constructor or via set_metadata()/\"",
"\"set_metadata_if_none() before you can get a Table from a \"",
"\"tablename\"",
")",
"for",
"table",
"in",
"self",
".",
"_metadata",
".",
"tables",
".",
"values",
"(",
")",
":",
"# type: Table",
"if",
"table",
".",
"name",
"==",
"self",
".",
"_tablename",
":",
"return",
"table",
"raise",
"ValueError",
"(",
"\"No table named {!r} is present in the \"",
"\"metadata\"",
".",
"format",
"(",
"self",
".",
"_tablename",
")",
")"
] | Returns a SQLAlchemy :class:`Table` object. This is either the
:class:`Table` object that was used for initialization, or one that
was constructed from the ``tablename`` plus the ``metadata``. | [
"Returns",
"a",
"SQLAlchemy",
":",
"class",
":",
"Table",
"object",
".",
"This",
"is",
"either",
"the",
":",
"class",
":",
"Table",
"object",
"that",
"was",
"used",
"for",
"initialization",
"or",
"one",
"that",
"was",
"constructed",
"from",
"the",
"tablename",
"plus",
"the",
"metadata",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/table_identity.py#L76-L93 |
RudolfCardinal/pythonlib | cardinal_pythonlib/sqlalchemy/table_identity.py | TableIdentity.tablename | def tablename(self) -> str:
"""
Returns the string name of the table.
"""
if self._tablename:
return self._tablename
return self.table.name | python | def tablename(self) -> str:
"""
Returns the string name of the table.
"""
if self._tablename:
return self._tablename
return self.table.name | [
"def",
"tablename",
"(",
"self",
")",
"->",
"str",
":",
"if",
"self",
".",
"_tablename",
":",
"return",
"self",
".",
"_tablename",
"return",
"self",
".",
"table",
".",
"name"
] | Returns the string name of the table. | [
"Returns",
"the",
"string",
"name",
"of",
"the",
"table",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/table_identity.py#L96-L102 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/pause_process_by_disk_space.py | is_running | def is_running(process_id: int) -> bool:
"""
Uses the Unix ``ps`` program to see if a process is running.
"""
pstr = str(process_id)
encoding = sys.getdefaultencoding()
s = subprocess.Popen(["ps", "-p", pstr], stdout=subprocess.PIPE)
for line in s.stdout:
strline = line.decode(encoding)
if pstr in strline:
return True
return False | python | def is_running(process_id: int) -> bool:
"""
Uses the Unix ``ps`` program to see if a process is running.
"""
pstr = str(process_id)
encoding = sys.getdefaultencoding()
s = subprocess.Popen(["ps", "-p", pstr], stdout=subprocess.PIPE)
for line in s.stdout:
strline = line.decode(encoding)
if pstr in strline:
return True
return False | [
"def",
"is_running",
"(",
"process_id",
":",
"int",
")",
"->",
"bool",
":",
"pstr",
"=",
"str",
"(",
"process_id",
")",
"encoding",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
"s",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"ps\"",
",",
"\"-p\"",
",",
"pstr",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"for",
"line",
"in",
"s",
".",
"stdout",
":",
"strline",
"=",
"line",
".",
"decode",
"(",
"encoding",
")",
"if",
"pstr",
"in",
"strline",
":",
"return",
"True",
"return",
"False"
] | Uses the Unix ``ps`` program to see if a process is running. | [
"Uses",
"the",
"Unix",
"ps",
"program",
"to",
"see",
"if",
"a",
"process",
"is",
"running",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/pause_process_by_disk_space.py#L45-L56 |
RudolfCardinal/pythonlib | cardinal_pythonlib/openxml/pause_process_by_disk_space.py | main | def main() -> None:
"""
Command-line handler for the ``pause_process_by_disk_space`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
description="Pauses and resumes a process by disk space; LINUX ONLY."
)
parser.add_argument(
"process_id", type=int,
help="Process ID."
)
parser.add_argument(
"--path", required=True,
help="Path to check free space for (e.g. '/')"
)
parser.add_argument(
"--pause_when_free_below", type=str, required=True,
help="Pause process when free disk space below this value (in bytes "
"or as e.g. '50G')"
)
parser.add_argument(
"--resume_when_free_above", type=str, required=True,
help="Resume process when free disk space above this value (in bytes "
"or as e.g. '70G')"
)
parser.add_argument(
"--check_every", type=int, required=True,
help="Check every n seconds (where this is n)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
minimum = human2bytes(args.pause_when_free_below)
maximum = human2bytes(args.resume_when_free_above)
path = args.path
process_id = args.process_id
period = args.check_every
pause_args = ["kill", "-STOP", str(process_id)]
resume_args = ["kill", "-CONT", str(process_id)]
assert minimum < maximum, "Minimum must be less than maximum"
log.info(
"Starting: controlling process {proc}; "
"checking disk space every {period} s; "
"will pause when free space on {path} is less than {minimum} and "
"resume when free space is at least {maximum}; "
"pause command will be {pause}; "
"resume command will be {resume}.".format(
proc=process_id,
period=period,
path=path,
minimum=sizeof_fmt(minimum),
maximum=sizeof_fmt(maximum),
pause=pause_args,
resume=resume_args,
))
log.debug("Presuming that the process is RUNNING to begin with.")
paused = False
while True:
if not is_running(process_id):
log.info("Process {} is no longer running", process_id)
sys.exit(0)
space = shutil.disk_usage(path).free
log.debug("Disk space on {} is {}", path, sizeof_fmt(space))
if space < minimum and not paused:
log.info("Disk space down to {}: pausing process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(pause_args)
paused = True
elif space >= maximum and paused:
log.info("Disk space up to {}: resuming process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(resume_args)
paused = False
log.debug("Sleeping for {} seconds...", period)
sleep(period) | python | def main() -> None:
"""
Command-line handler for the ``pause_process_by_disk_space`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
description="Pauses and resumes a process by disk space; LINUX ONLY."
)
parser.add_argument(
"process_id", type=int,
help="Process ID."
)
parser.add_argument(
"--path", required=True,
help="Path to check free space for (e.g. '/')"
)
parser.add_argument(
"--pause_when_free_below", type=str, required=True,
help="Pause process when free disk space below this value (in bytes "
"or as e.g. '50G')"
)
parser.add_argument(
"--resume_when_free_above", type=str, required=True,
help="Resume process when free disk space above this value (in bytes "
"or as e.g. '70G')"
)
parser.add_argument(
"--check_every", type=int, required=True,
help="Check every n seconds (where this is n)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
minimum = human2bytes(args.pause_when_free_below)
maximum = human2bytes(args.resume_when_free_above)
path = args.path
process_id = args.process_id
period = args.check_every
pause_args = ["kill", "-STOP", str(process_id)]
resume_args = ["kill", "-CONT", str(process_id)]
assert minimum < maximum, "Minimum must be less than maximum"
log.info(
"Starting: controlling process {proc}; "
"checking disk space every {period} s; "
"will pause when free space on {path} is less than {minimum} and "
"resume when free space is at least {maximum}; "
"pause command will be {pause}; "
"resume command will be {resume}.".format(
proc=process_id,
period=period,
path=path,
minimum=sizeof_fmt(minimum),
maximum=sizeof_fmt(maximum),
pause=pause_args,
resume=resume_args,
))
log.debug("Presuming that the process is RUNNING to begin with.")
paused = False
while True:
if not is_running(process_id):
log.info("Process {} is no longer running", process_id)
sys.exit(0)
space = shutil.disk_usage(path).free
log.debug("Disk space on {} is {}", path, sizeof_fmt(space))
if space < minimum and not paused:
log.info("Disk space down to {}: pausing process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(pause_args)
paused = True
elif space >= maximum and paused:
log.info("Disk space up to {}: resuming process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(resume_args)
paused = False
log.debug("Sleeping for {} seconds...", period)
sleep(period) | [
"def",
"main",
"(",
")",
"->",
"None",
":",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"\"Pauses and resumes a process by disk space; LINUX ONLY.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"process_id\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Process ID.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--path\"",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to check free space for (e.g. '/')\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--pause_when_free_below\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Pause process when free disk space below this value (in bytes \"",
"\"or as e.g. '50G')\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--resume_when_free_above\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Resume process when free disk space above this value (in bytes \"",
"\"or as e.g. '70G')\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--check_every\"",
",",
"type",
"=",
"int",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Check every n seconds (where this is n)\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Verbose output\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"main_only_quicksetup_rootlogger",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
"if",
"args",
".",
"verbose",
"else",
"logging",
".",
"INFO",
")",
"minimum",
"=",
"human2bytes",
"(",
"args",
".",
"pause_when_free_below",
")",
"maximum",
"=",
"human2bytes",
"(",
"args",
".",
"resume_when_free_above",
")",
"path",
"=",
"args",
".",
"path",
"process_id",
"=",
"args",
".",
"process_id",
"period",
"=",
"args",
".",
"check_every",
"pause_args",
"=",
"[",
"\"kill\"",
",",
"\"-STOP\"",
",",
"str",
"(",
"process_id",
")",
"]",
"resume_args",
"=",
"[",
"\"kill\"",
",",
"\"-CONT\"",
",",
"str",
"(",
"process_id",
")",
"]",
"assert",
"minimum",
"<",
"maximum",
",",
"\"Minimum must be less than maximum\"",
"log",
".",
"info",
"(",
"\"Starting: controlling process {proc}; \"",
"\"checking disk space every {period} s; \"",
"\"will pause when free space on {path} is less than {minimum} and \"",
"\"resume when free space is at least {maximum}; \"",
"\"pause command will be {pause}; \"",
"\"resume command will be {resume}.\"",
".",
"format",
"(",
"proc",
"=",
"process_id",
",",
"period",
"=",
"period",
",",
"path",
"=",
"path",
",",
"minimum",
"=",
"sizeof_fmt",
"(",
"minimum",
")",
",",
"maximum",
"=",
"sizeof_fmt",
"(",
"maximum",
")",
",",
"pause",
"=",
"pause_args",
",",
"resume",
"=",
"resume_args",
",",
")",
")",
"log",
".",
"debug",
"(",
"\"Presuming that the process is RUNNING to begin with.\"",
")",
"paused",
"=",
"False",
"while",
"True",
":",
"if",
"not",
"is_running",
"(",
"process_id",
")",
":",
"log",
".",
"info",
"(",
"\"Process {} is no longer running\"",
",",
"process_id",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"space",
"=",
"shutil",
".",
"disk_usage",
"(",
"path",
")",
".",
"free",
"log",
".",
"debug",
"(",
"\"Disk space on {} is {}\"",
",",
"path",
",",
"sizeof_fmt",
"(",
"space",
")",
")",
"if",
"space",
"<",
"minimum",
"and",
"not",
"paused",
":",
"log",
".",
"info",
"(",
"\"Disk space down to {}: pausing process {}\"",
",",
"sizeof_fmt",
"(",
"space",
")",
",",
"process_id",
")",
"subprocess",
".",
"check_call",
"(",
"pause_args",
")",
"paused",
"=",
"True",
"elif",
"space",
">=",
"maximum",
"and",
"paused",
":",
"log",
".",
"info",
"(",
"\"Disk space up to {}: resuming process {}\"",
",",
"sizeof_fmt",
"(",
"space",
")",
",",
"process_id",
")",
"subprocess",
".",
"check_call",
"(",
"resume_args",
")",
"paused",
"=",
"False",
"log",
".",
"debug",
"(",
"\"Sleeping for {} seconds...\"",
",",
"period",
")",
"sleep",
"(",
"period",
")"
] | Command-line handler for the ``pause_process_by_disk_space`` tool.
Use the ``--help`` option for help. | [
"Command",
"-",
"line",
"handler",
"for",
"the",
"pause_process_by_disk_space",
"tool",
".",
"Use",
"the",
"--",
"help",
"option",
"for",
"help",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/openxml/pause_process_by_disk_space.py#L59-L142 |
davenquinn/Attitude | attitude/display/plot/cov_types/regressions.py | bootstrap_noise | def bootstrap_noise(data, func, n=10000, std=1, symmetric=True):
"""
Bootstrap by adding noise
"""
boot_dist = []
arr = N.zeros(data.shape)
for i in range(n):
if symmetric:
# Noise on all three axes
arr = N.random.randn(*data.shape)*std
else:
# Only z-coordinate noise
arr[:,-1] = N.random.randn(data.shape[0])*std
boot_dist.append(func(data+arr))
return N.array(boot_dist) | python | def bootstrap_noise(data, func, n=10000, std=1, symmetric=True):
"""
Bootstrap by adding noise
"""
boot_dist = []
arr = N.zeros(data.shape)
for i in range(n):
if symmetric:
# Noise on all three axes
arr = N.random.randn(*data.shape)*std
else:
# Only z-coordinate noise
arr[:,-1] = N.random.randn(data.shape[0])*std
boot_dist.append(func(data+arr))
return N.array(boot_dist) | [
"def",
"bootstrap_noise",
"(",
"data",
",",
"func",
",",
"n",
"=",
"10000",
",",
"std",
"=",
"1",
",",
"symmetric",
"=",
"True",
")",
":",
"boot_dist",
"=",
"[",
"]",
"arr",
"=",
"N",
".",
"zeros",
"(",
"data",
".",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"if",
"symmetric",
":",
"# Noise on all three axes",
"arr",
"=",
"N",
".",
"random",
".",
"randn",
"(",
"*",
"data",
".",
"shape",
")",
"*",
"std",
"else",
":",
"# Only z-coordinate noise",
"arr",
"[",
":",
",",
"-",
"1",
"]",
"=",
"N",
".",
"random",
".",
"randn",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"std",
"boot_dist",
".",
"append",
"(",
"func",
"(",
"data",
"+",
"arr",
")",
")",
"return",
"N",
".",
"array",
"(",
"boot_dist",
")"
] | Bootstrap by adding noise | [
"Bootstrap",
"by",
"adding",
"noise"
] | train | https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/display/plot/cov_types/regressions.py#L43-L57 |
RudolfCardinal/pythonlib | cardinal_pythonlib/django/reprfunc.py | modelrepr | def modelrepr(instance) -> str:
"""
Default ``repr`` version of a Django model object, for debugging.
"""
elements = []
# noinspection PyProtectedMember
for f in instance._meta.get_fields():
# https://docs.djangoproject.com/en/2.0/ref/models/meta/
if f.auto_created:
continue
if f.is_relation and f.related_model is None:
continue
fieldname = f.name
try:
value = repr(getattr(instance, fieldname))
except ObjectDoesNotExist:
value = "<RelatedObjectDoesNotExist>"
elements.append("{}={}".format(fieldname, value))
return "<{} <{}>>".format(type(instance).__name__,
", ".join(elements)) | python | def modelrepr(instance) -> str:
"""
Default ``repr`` version of a Django model object, for debugging.
"""
elements = []
# noinspection PyProtectedMember
for f in instance._meta.get_fields():
# https://docs.djangoproject.com/en/2.0/ref/models/meta/
if f.auto_created:
continue
if f.is_relation and f.related_model is None:
continue
fieldname = f.name
try:
value = repr(getattr(instance, fieldname))
except ObjectDoesNotExist:
value = "<RelatedObjectDoesNotExist>"
elements.append("{}={}".format(fieldname, value))
return "<{} <{}>>".format(type(instance).__name__,
", ".join(elements)) | [
"def",
"modelrepr",
"(",
"instance",
")",
"->",
"str",
":",
"elements",
"=",
"[",
"]",
"# noinspection PyProtectedMember",
"for",
"f",
"in",
"instance",
".",
"_meta",
".",
"get_fields",
"(",
")",
":",
"# https://docs.djangoproject.com/en/2.0/ref/models/meta/",
"if",
"f",
".",
"auto_created",
":",
"continue",
"if",
"f",
".",
"is_relation",
"and",
"f",
".",
"related_model",
"is",
"None",
":",
"continue",
"fieldname",
"=",
"f",
".",
"name",
"try",
":",
"value",
"=",
"repr",
"(",
"getattr",
"(",
"instance",
",",
"fieldname",
")",
")",
"except",
"ObjectDoesNotExist",
":",
"value",
"=",
"\"<RelatedObjectDoesNotExist>\"",
"elements",
".",
"append",
"(",
"\"{}={}\"",
".",
"format",
"(",
"fieldname",
",",
"value",
")",
")",
"return",
"\"<{} <{}>>\"",
".",
"format",
"(",
"type",
"(",
"instance",
")",
".",
"__name__",
",",
"\", \"",
".",
"join",
"(",
"elements",
")",
")"
] | Default ``repr`` version of a Django model object, for debugging. | [
"Default",
"repr",
"version",
"of",
"a",
"Django",
"model",
"object",
"for",
"debugging",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/reprfunc.py#L32-L51 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | assert_processor_available | def assert_processor_available(processor: str) -> None:
"""
Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
"""
if processor not in [Processors.XHTML2PDF,
Processors.WEASYPRINT,
Processors.PDFKIT]:
raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor"
" specified")
if processor == Processors.WEASYPRINT and not weasyprint:
raise RuntimeError("rnc_pdf: Weasyprint requested, but not available")
if processor == Processors.XHTML2PDF and not xhtml2pdf:
raise RuntimeError("rnc_pdf: xhtml2pdf requested, but not available")
if processor == Processors.PDFKIT and not pdfkit:
raise RuntimeError("rnc_pdf: pdfkit requested, but not available") | python | def assert_processor_available(processor: str) -> None:
"""
Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
"""
if processor not in [Processors.XHTML2PDF,
Processors.WEASYPRINT,
Processors.PDFKIT]:
raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor"
" specified")
if processor == Processors.WEASYPRINT and not weasyprint:
raise RuntimeError("rnc_pdf: Weasyprint requested, but not available")
if processor == Processors.XHTML2PDF and not xhtml2pdf:
raise RuntimeError("rnc_pdf: xhtml2pdf requested, but not available")
if processor == Processors.PDFKIT and not pdfkit:
raise RuntimeError("rnc_pdf: pdfkit requested, but not available") | [
"def",
"assert_processor_available",
"(",
"processor",
":",
"str",
")",
"->",
"None",
":",
"if",
"processor",
"not",
"in",
"[",
"Processors",
".",
"XHTML2PDF",
",",
"Processors",
".",
"WEASYPRINT",
",",
"Processors",
".",
"PDFKIT",
"]",
":",
"raise",
"AssertionError",
"(",
"\"rnc_pdf.set_pdf_processor: invalid PDF processor\"",
"\" specified\"",
")",
"if",
"processor",
"==",
"Processors",
".",
"WEASYPRINT",
"and",
"not",
"weasyprint",
":",
"raise",
"RuntimeError",
"(",
"\"rnc_pdf: Weasyprint requested, but not available\"",
")",
"if",
"processor",
"==",
"Processors",
".",
"XHTML2PDF",
"and",
"not",
"xhtml2pdf",
":",
"raise",
"RuntimeError",
"(",
"\"rnc_pdf: xhtml2pdf requested, but not available\"",
")",
"if",
"processor",
"==",
"Processors",
".",
"PDFKIT",
"and",
"not",
"pdfkit",
":",
"raise",
"RuntimeError",
"(",
"\"rnc_pdf: pdfkit requested, but not available\"",
")"
] | Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable | [
"Assert",
"that",
"a",
"specific",
"PDF",
"processor",
"is",
"available",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L193-L214 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | make_pdf_from_html | def make_pdf_from_html(
# Mandatory parameters:
on_disk: bool,
html: str,
# Disk options:
output_path: str = None,
# Shared options:
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> Union[bytes, bool]:
"""
Takes HTML and either returns a PDF in memory or makes one on disk.
For preference, uses ``wkhtmltopdf`` (with ``pdfkit``):
- faster than ``xhtml2pdf``
- tables not buggy like ``Weasyprint``
- however, doesn't support CSS Paged Media, so we have the
``header_html`` and ``footer_html`` options to allow you to pass
appropriate HTML content to serve as the header/footer (rather than
passing it within the main HTML).
Args:
on_disk: make file on disk (rather than returning it in memory)?
html: main HTML
output_path: if ``on_disk``, the output filename
header_html: optional page header, as HTML
footer_html: optional page footer, as HTML
wkhtmltopdf_filename: filename of the ``wkhtmltopdf`` executable
wkhtmltopdf_options: options for ``wkhtmltopdf``
file_encoding: encoding to use when writing the header/footer to disk
debug_options: log ``wkhtmltopdf`` config/options passed to ``pdfkit``?
debug_content: log the main/header/footer HTML?
debug_wkhtmltopdf_args: log the final command-line arguments to
that will be used by ``pdfkit`` when it calls ``wkhtmltopdf``?
fix_pdfkit_encoding_bug: attempt to work around bug in e.g.
``pdfkit==0.5.0`` by encoding ``wkhtmltopdf_filename`` to UTF-8
before passing it to ``pdfkit``? If you pass ``None`` here, then
a default value is used, from
:func:`get_default_fix_pdfkit_encoding_bug`.
processor: a PDF processor type from :class:`Processors`
Returns:
the PDF binary as a ``bytes`` object
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
"""
wkhtmltopdf_options = wkhtmltopdf_options or {} # type: Dict[str, Any]
assert_processor_available(processor)
if debug_content:
log.debug("html: {}", html)
log.debug("header_html: {}", header_html)
log.debug("footer_html: {}", footer_html)
if fix_pdfkit_encoding_bug is None:
fix_pdfkit_encoding_bug = get_default_fix_pdfkit_encoding_bug()
if processor == Processors.XHTML2PDF:
if on_disk:
with open(output_path, mode='wb') as outfile:
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, outfile)
return True
else:
memfile = io.BytesIO()
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, memfile)
# ... returns a document, but we don't use it, so we don't store it
# to stop pychecker complaining
# http://xhtml2pdf.appspot.com/static/pisa-en.html
memfile.seek(0)
return memfile.read()
# http://stackoverflow.com/questions/3310584
elif processor == Processors.WEASYPRINT:
if on_disk:
return weasyprint.HTML(string=html).write_pdf(output_path)
else:
# http://ampad.de/blog/generating-pdfs-django/
return weasyprint.HTML(string=html).write_pdf()
elif processor == Processors.PDFKIT:
# Config:
if not wkhtmltopdf_filename:
config = None
else:
if fix_pdfkit_encoding_bug: # needs to be True for pdfkit==0.5.0
log.debug("Attempting to fix bug in pdfkit (e.g. version 0.5.0)"
" by encoding wkhtmltopdf_filename to UTF-8")
config = pdfkit.configuration(
wkhtmltopdf=wkhtmltopdf_filename.encode('utf-8'))
# the bug is that pdfkit.pdfkit.PDFKit.__init__ will attempt to
# decode the string in its configuration object;
# https://github.com/JazzCore/python-pdfkit/issues/32
else:
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_filename)
# Temporary files that a subprocess can read:
# http://stackoverflow.com/questions/15169101
# wkhtmltopdf requires its HTML files to have ".html" extensions:
# http://stackoverflow.com/questions/5776125
h_filename = None
f_filename = None
try:
if header_html:
h_fd, h_filename = tempfile.mkstemp(suffix='.html')
os.write(h_fd, header_html.encode(file_encoding))
os.close(h_fd)
wkhtmltopdf_options["header-html"] = h_filename
if footer_html:
f_fd, f_filename = tempfile.mkstemp(suffix='.html')
os.write(f_fd, footer_html.encode(file_encoding))
os.close(f_fd)
wkhtmltopdf_options["footer-html"] = f_filename
if debug_options:
log.debug("wkhtmltopdf config: {!r}", config)
log.debug("wkhtmltopdf_options: {}",
pformat(wkhtmltopdf_options))
kit = pdfkit.pdfkit.PDFKit(html, 'string', configuration=config,
options=wkhtmltopdf_options)
if on_disk:
path = output_path
else:
path = None
# With "path=None", the to_pdf() function directly returns
# stdout from a subprocess.Popen().communicate() call (see
# pdfkit.py). Since universal_newlines is not set, stdout will
# be bytes in Python 3.
if debug_wkhtmltopdf_args:
log.debug("Probable current user: {!r}", getpass.getuser())
log.debug("wkhtmltopdf arguments will be: {!r}",
kit.command(path=path))
return kit.to_pdf(path=path)
finally:
if h_filename:
os.remove(h_filename)
if f_filename:
os.remove(f_filename)
else:
raise AssertionError("Unknown PDF engine") | python | def make_pdf_from_html(
# Mandatory parameters:
on_disk: bool,
html: str,
# Disk options:
output_path: str = None,
# Shared options:
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> Union[bytes, bool]:
"""
Takes HTML and either returns a PDF in memory or makes one on disk.
For preference, uses ``wkhtmltopdf`` (with ``pdfkit``):
- faster than ``xhtml2pdf``
- tables not buggy like ``Weasyprint``
- however, doesn't support CSS Paged Media, so we have the
``header_html`` and ``footer_html`` options to allow you to pass
appropriate HTML content to serve as the header/footer (rather than
passing it within the main HTML).
Args:
on_disk: make file on disk (rather than returning it in memory)?
html: main HTML
output_path: if ``on_disk``, the output filename
header_html: optional page header, as HTML
footer_html: optional page footer, as HTML
wkhtmltopdf_filename: filename of the ``wkhtmltopdf`` executable
wkhtmltopdf_options: options for ``wkhtmltopdf``
file_encoding: encoding to use when writing the header/footer to disk
debug_options: log ``wkhtmltopdf`` config/options passed to ``pdfkit``?
debug_content: log the main/header/footer HTML?
debug_wkhtmltopdf_args: log the final command-line arguments to
that will be used by ``pdfkit`` when it calls ``wkhtmltopdf``?
fix_pdfkit_encoding_bug: attempt to work around bug in e.g.
``pdfkit==0.5.0`` by encoding ``wkhtmltopdf_filename`` to UTF-8
before passing it to ``pdfkit``? If you pass ``None`` here, then
a default value is used, from
:func:`get_default_fix_pdfkit_encoding_bug`.
processor: a PDF processor type from :class:`Processors`
Returns:
the PDF binary as a ``bytes`` object
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable
"""
wkhtmltopdf_options = wkhtmltopdf_options or {} # type: Dict[str, Any]
assert_processor_available(processor)
if debug_content:
log.debug("html: {}", html)
log.debug("header_html: {}", header_html)
log.debug("footer_html: {}", footer_html)
if fix_pdfkit_encoding_bug is None:
fix_pdfkit_encoding_bug = get_default_fix_pdfkit_encoding_bug()
if processor == Processors.XHTML2PDF:
if on_disk:
with open(output_path, mode='wb') as outfile:
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, outfile)
return True
else:
memfile = io.BytesIO()
# noinspection PyUnresolvedReferences
xhtml2pdf.document.pisaDocument(html, memfile)
# ... returns a document, but we don't use it, so we don't store it
# to stop pychecker complaining
# http://xhtml2pdf.appspot.com/static/pisa-en.html
memfile.seek(0)
return memfile.read()
# http://stackoverflow.com/questions/3310584
elif processor == Processors.WEASYPRINT:
if on_disk:
return weasyprint.HTML(string=html).write_pdf(output_path)
else:
# http://ampad.de/blog/generating-pdfs-django/
return weasyprint.HTML(string=html).write_pdf()
elif processor == Processors.PDFKIT:
# Config:
if not wkhtmltopdf_filename:
config = None
else:
if fix_pdfkit_encoding_bug: # needs to be True for pdfkit==0.5.0
log.debug("Attempting to fix bug in pdfkit (e.g. version 0.5.0)"
" by encoding wkhtmltopdf_filename to UTF-8")
config = pdfkit.configuration(
wkhtmltopdf=wkhtmltopdf_filename.encode('utf-8'))
# the bug is that pdfkit.pdfkit.PDFKit.__init__ will attempt to
# decode the string in its configuration object;
# https://github.com/JazzCore/python-pdfkit/issues/32
else:
config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdf_filename)
# Temporary files that a subprocess can read:
# http://stackoverflow.com/questions/15169101
# wkhtmltopdf requires its HTML files to have ".html" extensions:
# http://stackoverflow.com/questions/5776125
h_filename = None
f_filename = None
try:
if header_html:
h_fd, h_filename = tempfile.mkstemp(suffix='.html')
os.write(h_fd, header_html.encode(file_encoding))
os.close(h_fd)
wkhtmltopdf_options["header-html"] = h_filename
if footer_html:
f_fd, f_filename = tempfile.mkstemp(suffix='.html')
os.write(f_fd, footer_html.encode(file_encoding))
os.close(f_fd)
wkhtmltopdf_options["footer-html"] = f_filename
if debug_options:
log.debug("wkhtmltopdf config: {!r}", config)
log.debug("wkhtmltopdf_options: {}",
pformat(wkhtmltopdf_options))
kit = pdfkit.pdfkit.PDFKit(html, 'string', configuration=config,
options=wkhtmltopdf_options)
if on_disk:
path = output_path
else:
path = None
# With "path=None", the to_pdf() function directly returns
# stdout from a subprocess.Popen().communicate() call (see
# pdfkit.py). Since universal_newlines is not set, stdout will
# be bytes in Python 3.
if debug_wkhtmltopdf_args:
log.debug("Probable current user: {!r}", getpass.getuser())
log.debug("wkhtmltopdf arguments will be: {!r}",
kit.command(path=path))
return kit.to_pdf(path=path)
finally:
if h_filename:
os.remove(h_filename)
if f_filename:
os.remove(f_filename)
else:
raise AssertionError("Unknown PDF engine") | [
"def",
"make_pdf_from_html",
"(",
"# Mandatory parameters:",
"on_disk",
":",
"bool",
",",
"html",
":",
"str",
",",
"# Disk options:",
"output_path",
":",
"str",
"=",
"None",
",",
"# Shared options:",
"header_html",
":",
"str",
"=",
"None",
",",
"footer_html",
":",
"str",
"=",
"None",
",",
"wkhtmltopdf_filename",
":",
"str",
"=",
"_WKHTMLTOPDF_FILENAME",
",",
"wkhtmltopdf_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"file_encoding",
":",
"str",
"=",
"\"utf-8\"",
",",
"debug_options",
":",
"bool",
"=",
"False",
",",
"debug_content",
":",
"bool",
"=",
"False",
",",
"debug_wkhtmltopdf_args",
":",
"bool",
"=",
"True",
",",
"fix_pdfkit_encoding_bug",
":",
"bool",
"=",
"None",
",",
"processor",
":",
"str",
"=",
"_DEFAULT_PROCESSOR",
")",
"->",
"Union",
"[",
"bytes",
",",
"bool",
"]",
":",
"wkhtmltopdf_options",
"=",
"wkhtmltopdf_options",
"or",
"{",
"}",
"# type: Dict[str, Any]",
"assert_processor_available",
"(",
"processor",
")",
"if",
"debug_content",
":",
"log",
".",
"debug",
"(",
"\"html: {}\"",
",",
"html",
")",
"log",
".",
"debug",
"(",
"\"header_html: {}\"",
",",
"header_html",
")",
"log",
".",
"debug",
"(",
"\"footer_html: {}\"",
",",
"footer_html",
")",
"if",
"fix_pdfkit_encoding_bug",
"is",
"None",
":",
"fix_pdfkit_encoding_bug",
"=",
"get_default_fix_pdfkit_encoding_bug",
"(",
")",
"if",
"processor",
"==",
"Processors",
".",
"XHTML2PDF",
":",
"if",
"on_disk",
":",
"with",
"open",
"(",
"output_path",
",",
"mode",
"=",
"'wb'",
")",
"as",
"outfile",
":",
"# noinspection PyUnresolvedReferences",
"xhtml2pdf",
".",
"document",
".",
"pisaDocument",
"(",
"html",
",",
"outfile",
")",
"return",
"True",
"else",
":",
"memfile",
"=",
"io",
".",
"BytesIO",
"(",
")",
"# noinspection PyUnresolvedReferences",
"xhtml2pdf",
".",
"document",
".",
"pisaDocument",
"(",
"html",
",",
"memfile",
")",
"# ... returns a document, but we don't use it, so we don't store it",
"# to stop pychecker complaining",
"# http://xhtml2pdf.appspot.com/static/pisa-en.html",
"memfile",
".",
"seek",
"(",
"0",
")",
"return",
"memfile",
".",
"read",
"(",
")",
"# http://stackoverflow.com/questions/3310584",
"elif",
"processor",
"==",
"Processors",
".",
"WEASYPRINT",
":",
"if",
"on_disk",
":",
"return",
"weasyprint",
".",
"HTML",
"(",
"string",
"=",
"html",
")",
".",
"write_pdf",
"(",
"output_path",
")",
"else",
":",
"# http://ampad.de/blog/generating-pdfs-django/",
"return",
"weasyprint",
".",
"HTML",
"(",
"string",
"=",
"html",
")",
".",
"write_pdf",
"(",
")",
"elif",
"processor",
"==",
"Processors",
".",
"PDFKIT",
":",
"# Config:",
"if",
"not",
"wkhtmltopdf_filename",
":",
"config",
"=",
"None",
"else",
":",
"if",
"fix_pdfkit_encoding_bug",
":",
"# needs to be True for pdfkit==0.5.0",
"log",
".",
"debug",
"(",
"\"Attempting to fix bug in pdfkit (e.g. version 0.5.0)\"",
"\" by encoding wkhtmltopdf_filename to UTF-8\"",
")",
"config",
"=",
"pdfkit",
".",
"configuration",
"(",
"wkhtmltopdf",
"=",
"wkhtmltopdf_filename",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"# the bug is that pdfkit.pdfkit.PDFKit.__init__ will attempt to",
"# decode the string in its configuration object;",
"# https://github.com/JazzCore/python-pdfkit/issues/32",
"else",
":",
"config",
"=",
"pdfkit",
".",
"configuration",
"(",
"wkhtmltopdf",
"=",
"wkhtmltopdf_filename",
")",
"# Temporary files that a subprocess can read:",
"# http://stackoverflow.com/questions/15169101",
"# wkhtmltopdf requires its HTML files to have \".html\" extensions:",
"# http://stackoverflow.com/questions/5776125",
"h_filename",
"=",
"None",
"f_filename",
"=",
"None",
"try",
":",
"if",
"header_html",
":",
"h_fd",
",",
"h_filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.html'",
")",
"os",
".",
"write",
"(",
"h_fd",
",",
"header_html",
".",
"encode",
"(",
"file_encoding",
")",
")",
"os",
".",
"close",
"(",
"h_fd",
")",
"wkhtmltopdf_options",
"[",
"\"header-html\"",
"]",
"=",
"h_filename",
"if",
"footer_html",
":",
"f_fd",
",",
"f_filename",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"'.html'",
")",
"os",
".",
"write",
"(",
"f_fd",
",",
"footer_html",
".",
"encode",
"(",
"file_encoding",
")",
")",
"os",
".",
"close",
"(",
"f_fd",
")",
"wkhtmltopdf_options",
"[",
"\"footer-html\"",
"]",
"=",
"f_filename",
"if",
"debug_options",
":",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf config: {!r}\"",
",",
"config",
")",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf_options: {}\"",
",",
"pformat",
"(",
"wkhtmltopdf_options",
")",
")",
"kit",
"=",
"pdfkit",
".",
"pdfkit",
".",
"PDFKit",
"(",
"html",
",",
"'string'",
",",
"configuration",
"=",
"config",
",",
"options",
"=",
"wkhtmltopdf_options",
")",
"if",
"on_disk",
":",
"path",
"=",
"output_path",
"else",
":",
"path",
"=",
"None",
"# With \"path=None\", the to_pdf() function directly returns",
"# stdout from a subprocess.Popen().communicate() call (see",
"# pdfkit.py). Since universal_newlines is not set, stdout will",
"# be bytes in Python 3.",
"if",
"debug_wkhtmltopdf_args",
":",
"log",
".",
"debug",
"(",
"\"Probable current user: {!r}\"",
",",
"getpass",
".",
"getuser",
"(",
")",
")",
"log",
".",
"debug",
"(",
"\"wkhtmltopdf arguments will be: {!r}\"",
",",
"kit",
".",
"command",
"(",
"path",
"=",
"path",
")",
")",
"return",
"kit",
".",
"to_pdf",
"(",
"path",
"=",
"path",
")",
"finally",
":",
"if",
"h_filename",
":",
"os",
".",
"remove",
"(",
"h_filename",
")",
"if",
"f_filename",
":",
"os",
".",
"remove",
"(",
"f_filename",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Unknown PDF engine\"",
")"
] | Takes HTML and either returns a PDF in memory or makes one on disk.
For preference, uses ``wkhtmltopdf`` (with ``pdfkit``):
- faster than ``xhtml2pdf``
- tables not buggy like ``Weasyprint``
- however, doesn't support CSS Paged Media, so we have the
``header_html`` and ``footer_html`` options to allow you to pass
appropriate HTML content to serve as the header/footer (rather than
passing it within the main HTML).
Args:
on_disk: make file on disk (rather than returning it in memory)?
html: main HTML
output_path: if ``on_disk``, the output filename
header_html: optional page header, as HTML
footer_html: optional page footer, as HTML
wkhtmltopdf_filename: filename of the ``wkhtmltopdf`` executable
wkhtmltopdf_options: options for ``wkhtmltopdf``
file_encoding: encoding to use when writing the header/footer to disk
debug_options: log ``wkhtmltopdf`` config/options passed to ``pdfkit``?
debug_content: log the main/header/footer HTML?
debug_wkhtmltopdf_args: log the final command-line arguments to
that will be used by ``pdfkit`` when it calls ``wkhtmltopdf``?
fix_pdfkit_encoding_bug: attempt to work around bug in e.g.
``pdfkit==0.5.0`` by encoding ``wkhtmltopdf_filename`` to UTF-8
before passing it to ``pdfkit``? If you pass ``None`` here, then
a default value is used, from
:func:`get_default_fix_pdfkit_encoding_bug`.
processor: a PDF processor type from :class:`Processors`
Returns:
the PDF binary as a ``bytes`` object
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable | [
"Takes",
"HTML",
"and",
"either",
"returns",
"a",
"PDF",
"in",
"memory",
"or",
"makes",
"one",
"on",
"disk",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L232-L402 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | get_pdf_from_html | def get_pdf_from_html(html: str,
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> bytes:
"""
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
"""
result = make_pdf_from_html(
on_disk=False,
html=html,
header_html=header_html,
footer_html=footer_html,
wkhtmltopdf_filename=wkhtmltopdf_filename,
wkhtmltopdf_options=wkhtmltopdf_options,
file_encoding=file_encoding,
debug_options=debug_options,
debug_content=debug_content,
debug_wkhtmltopdf_args=debug_wkhtmltopdf_args,
fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug,
processor=processor,
) # type: bytes
return result | python | def get_pdf_from_html(html: str,
header_html: str = None,
footer_html: str = None,
wkhtmltopdf_filename: str = _WKHTMLTOPDF_FILENAME,
wkhtmltopdf_options: Dict[str, Any] = None,
file_encoding: str = "utf-8",
debug_options: bool = False,
debug_content: bool = False,
debug_wkhtmltopdf_args: bool = True,
fix_pdfkit_encoding_bug: bool = None,
processor: str = _DEFAULT_PROCESSOR) -> bytes:
"""
Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object
"""
result = make_pdf_from_html(
on_disk=False,
html=html,
header_html=header_html,
footer_html=footer_html,
wkhtmltopdf_filename=wkhtmltopdf_filename,
wkhtmltopdf_options=wkhtmltopdf_options,
file_encoding=file_encoding,
debug_options=debug_options,
debug_content=debug_content,
debug_wkhtmltopdf_args=debug_wkhtmltopdf_args,
fix_pdfkit_encoding_bug=fix_pdfkit_encoding_bug,
processor=processor,
) # type: bytes
return result | [
"def",
"get_pdf_from_html",
"(",
"html",
":",
"str",
",",
"header_html",
":",
"str",
"=",
"None",
",",
"footer_html",
":",
"str",
"=",
"None",
",",
"wkhtmltopdf_filename",
":",
"str",
"=",
"_WKHTMLTOPDF_FILENAME",
",",
"wkhtmltopdf_options",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"file_encoding",
":",
"str",
"=",
"\"utf-8\"",
",",
"debug_options",
":",
"bool",
"=",
"False",
",",
"debug_content",
":",
"bool",
"=",
"False",
",",
"debug_wkhtmltopdf_args",
":",
"bool",
"=",
"True",
",",
"fix_pdfkit_encoding_bug",
":",
"bool",
"=",
"None",
",",
"processor",
":",
"str",
"=",
"_DEFAULT_PROCESSOR",
")",
"->",
"bytes",
":",
"result",
"=",
"make_pdf_from_html",
"(",
"on_disk",
"=",
"False",
",",
"html",
"=",
"html",
",",
"header_html",
"=",
"header_html",
",",
"footer_html",
"=",
"footer_html",
",",
"wkhtmltopdf_filename",
"=",
"wkhtmltopdf_filename",
",",
"wkhtmltopdf_options",
"=",
"wkhtmltopdf_options",
",",
"file_encoding",
"=",
"file_encoding",
",",
"debug_options",
"=",
"debug_options",
",",
"debug_content",
"=",
"debug_content",
",",
"debug_wkhtmltopdf_args",
"=",
"debug_wkhtmltopdf_args",
",",
"fix_pdfkit_encoding_bug",
"=",
"fix_pdfkit_encoding_bug",
",",
"processor",
"=",
"processor",
",",
")",
"# type: bytes",
"return",
"result"
] | Takes HTML and returns a PDF.
See the arguments to :func:`make_pdf_from_html` (except ``on_disk``).
Returns:
the PDF binary as a ``bytes`` object | [
"Takes",
"HTML",
"and",
"returns",
"a",
"PDF",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L405-L438 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | pdf_from_writer | def pdf_from_writer(writer: Union[PdfFileWriter, PdfFileMerger]) -> bytes:
"""
Extracts a PDF (as binary data) from a PyPDF2 writer or merger object.
"""
memfile = io.BytesIO()
writer.write(memfile)
memfile.seek(0)
return memfile.read() | python | def pdf_from_writer(writer: Union[PdfFileWriter, PdfFileMerger]) -> bytes:
"""
Extracts a PDF (as binary data) from a PyPDF2 writer or merger object.
"""
memfile = io.BytesIO()
writer.write(memfile)
memfile.seek(0)
return memfile.read() | [
"def",
"pdf_from_writer",
"(",
"writer",
":",
"Union",
"[",
"PdfFileWriter",
",",
"PdfFileMerger",
"]",
")",
"->",
"bytes",
":",
"memfile",
"=",
"io",
".",
"BytesIO",
"(",
")",
"writer",
".",
"write",
"(",
"memfile",
")",
"memfile",
".",
"seek",
"(",
"0",
")",
"return",
"memfile",
".",
"read",
"(",
")"
] | Extracts a PDF (as binary data) from a PyPDF2 writer or merger object. | [
"Extracts",
"a",
"PDF",
"(",
"as",
"binary",
"data",
")",
"from",
"a",
"PyPDF2",
"writer",
"or",
"merger",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L505-L512 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | append_memory_pdf_to_writer | def append_memory_pdf_to_writer(input_pdf: bytes,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
"""
Appends a PDF (as bytes in memory) to a PyPDF2 writer.
Args:
input_pdf: the PDF, as ``bytes``
writer: the writer
start_recto: start a new right-hand page?
"""
if not input_pdf:
return
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
# ... suitable for double-sided printing
infile = io.BytesIO(input_pdf)
reader = PdfFileReader(infile)
for page_num in range(reader.numPages):
writer.addPage(reader.getPage(page_num)) | python | def append_memory_pdf_to_writer(input_pdf: bytes,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
"""
Appends a PDF (as bytes in memory) to a PyPDF2 writer.
Args:
input_pdf: the PDF, as ``bytes``
writer: the writer
start_recto: start a new right-hand page?
"""
if not input_pdf:
return
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
# ... suitable for double-sided printing
infile = io.BytesIO(input_pdf)
reader = PdfFileReader(infile)
for page_num in range(reader.numPages):
writer.addPage(reader.getPage(page_num)) | [
"def",
"append_memory_pdf_to_writer",
"(",
"input_pdf",
":",
"bytes",
",",
"writer",
":",
"PdfFileWriter",
",",
"start_recto",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"not",
"input_pdf",
":",
"return",
"if",
"start_recto",
"and",
"writer",
".",
"getNumPages",
"(",
")",
"%",
"2",
"!=",
"0",
":",
"writer",
".",
"addBlankPage",
"(",
")",
"# ... suitable for double-sided printing",
"infile",
"=",
"io",
".",
"BytesIO",
"(",
"input_pdf",
")",
"reader",
"=",
"PdfFileReader",
"(",
"infile",
")",
"for",
"page_num",
"in",
"range",
"(",
"reader",
".",
"numPages",
")",
":",
"writer",
".",
"addPage",
"(",
"reader",
".",
"getPage",
"(",
"page_num",
")",
")"
] | Appends a PDF (as bytes in memory) to a PyPDF2 writer.
Args:
input_pdf: the PDF, as ``bytes``
writer: the writer
start_recto: start a new right-hand page? | [
"Appends",
"a",
"PDF",
"(",
"as",
"bytes",
"in",
"memory",
")",
"to",
"a",
"PyPDF2",
"writer",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L541-L560 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | append_pdf | def append_pdf(input_pdf: bytes, output_writer: PdfFileWriter):
"""
Appends a PDF to a pyPDF writer. Legacy interface.
"""
append_memory_pdf_to_writer(input_pdf=input_pdf,
writer=output_writer) | python | def append_pdf(input_pdf: bytes, output_writer: PdfFileWriter):
"""
Appends a PDF to a pyPDF writer. Legacy interface.
"""
append_memory_pdf_to_writer(input_pdf=input_pdf,
writer=output_writer) | [
"def",
"append_pdf",
"(",
"input_pdf",
":",
"bytes",
",",
"output_writer",
":",
"PdfFileWriter",
")",
":",
"append_memory_pdf_to_writer",
"(",
"input_pdf",
"=",
"input_pdf",
",",
"writer",
"=",
"output_writer",
")"
] | Appends a PDF to a pyPDF writer. Legacy interface. | [
"Appends",
"a",
"PDF",
"to",
"a",
"pyPDF",
"writer",
".",
"Legacy",
"interface",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L563-L568 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | get_concatenated_pdf_from_disk | def get_concatenated_pdf_from_disk(filenames: Iterable[str],
start_recto: bool = True) -> bytes:
"""
Concatenates PDFs from disk and returns them as an in-memory binary PDF.
Args:
filenames: iterable of filenames of PDFs to concatenate
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
"""
# http://stackoverflow.com/questions/17104926/pypdf-merging-multiple-pdf-files-into-one-pdf # noqa
# https://en.wikipedia.org/wiki/Recto_and_verso
if start_recto:
writer = PdfFileWriter()
for filename in filenames:
if filename:
if writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(
PdfFileReader(open(filename, 'rb')))
return pdf_from_writer(writer)
else:
merger = PdfFileMerger()
for filename in filenames:
if filename:
merger.append(open(filename, 'rb'))
return pdf_from_writer(merger) | python | def get_concatenated_pdf_from_disk(filenames: Iterable[str],
start_recto: bool = True) -> bytes:
"""
Concatenates PDFs from disk and returns them as an in-memory binary PDF.
Args:
filenames: iterable of filenames of PDFs to concatenate
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
"""
# http://stackoverflow.com/questions/17104926/pypdf-merging-multiple-pdf-files-into-one-pdf # noqa
# https://en.wikipedia.org/wiki/Recto_and_verso
if start_recto:
writer = PdfFileWriter()
for filename in filenames:
if filename:
if writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(
PdfFileReader(open(filename, 'rb')))
return pdf_from_writer(writer)
else:
merger = PdfFileMerger()
for filename in filenames:
if filename:
merger.append(open(filename, 'rb'))
return pdf_from_writer(merger) | [
"def",
"get_concatenated_pdf_from_disk",
"(",
"filenames",
":",
"Iterable",
"[",
"str",
"]",
",",
"start_recto",
":",
"bool",
"=",
"True",
")",
"->",
"bytes",
":",
"# http://stackoverflow.com/questions/17104926/pypdf-merging-multiple-pdf-files-into-one-pdf # noqa",
"# https://en.wikipedia.org/wiki/Recto_and_verso",
"if",
"start_recto",
":",
"writer",
"=",
"PdfFileWriter",
"(",
")",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
":",
"if",
"writer",
".",
"getNumPages",
"(",
")",
"%",
"2",
"!=",
"0",
":",
"writer",
".",
"addBlankPage",
"(",
")",
"writer",
".",
"appendPagesFromReader",
"(",
"PdfFileReader",
"(",
"open",
"(",
"filename",
",",
"'rb'",
")",
")",
")",
"return",
"pdf_from_writer",
"(",
"writer",
")",
"else",
":",
"merger",
"=",
"PdfFileMerger",
"(",
")",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
":",
"merger",
".",
"append",
"(",
"open",
"(",
"filename",
",",
"'rb'",
")",
")",
"return",
"pdf_from_writer",
"(",
"merger",
")"
] | Concatenates PDFs from disk and returns them as an in-memory binary PDF.
Args:
filenames: iterable of filenames of PDFs to concatenate
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes`` | [
"Concatenates",
"PDFs",
"from",
"disk",
"and",
"returns",
"them",
"as",
"an",
"in",
"-",
"memory",
"binary",
"PDF",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L597-L626 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | get_concatenated_pdf_in_memory | def get_concatenated_pdf_in_memory(
pdf_plans: Iterable[PdfPlan],
start_recto: bool = True) -> bytes:
"""
Concatenates PDFs and returns them as an in-memory binary PDF.
Args:
pdf_plans: iterable of :class:`PdfPlan` objects
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
"""
writer = PdfFileWriter()
for pdfplan in pdf_plans:
pdfplan.add_to_writer(writer, start_recto=start_recto)
return pdf_from_writer(writer) | python | def get_concatenated_pdf_in_memory(
pdf_plans: Iterable[PdfPlan],
start_recto: bool = True) -> bytes:
"""
Concatenates PDFs and returns them as an in-memory binary PDF.
Args:
pdf_plans: iterable of :class:`PdfPlan` objects
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes``
"""
writer = PdfFileWriter()
for pdfplan in pdf_plans:
pdfplan.add_to_writer(writer, start_recto=start_recto)
return pdf_from_writer(writer) | [
"def",
"get_concatenated_pdf_in_memory",
"(",
"pdf_plans",
":",
"Iterable",
"[",
"PdfPlan",
"]",
",",
"start_recto",
":",
"bool",
"=",
"True",
")",
"->",
"bytes",
":",
"writer",
"=",
"PdfFileWriter",
"(",
")",
"for",
"pdfplan",
"in",
"pdf_plans",
":",
"pdfplan",
".",
"add_to_writer",
"(",
"writer",
",",
"start_recto",
"=",
"start_recto",
")",
"return",
"pdf_from_writer",
"(",
"writer",
")"
] | Concatenates PDFs and returns them as an in-memory binary PDF.
Args:
pdf_plans: iterable of :class:`PdfPlan` objects
start_recto: start a new right-hand page for each new PDF?
Returns:
concatenated PDF, as ``bytes`` | [
"Concatenates",
"PDFs",
"and",
"returns",
"them",
"as",
"an",
"in",
"-",
"memory",
"binary",
"PDF",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L629-L646 |
RudolfCardinal/pythonlib | cardinal_pythonlib/pdf.py | PdfPlan.add_to_writer | def add_to_writer(self,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
"""
Add the PDF described by this class to a PDF writer.
Args:
writer: a :class:`PyPDF2.PdfFileWriter`
start_recto: start a new right-hand page?
"""
if self.is_html:
pdf = get_pdf_from_html(
html=self.html,
header_html=self.header_html,
footer_html=self.footer_html,
wkhtmltopdf_filename=self.wkhtmltopdf_filename,
wkhtmltopdf_options=self.wkhtmltopdf_options)
append_memory_pdf_to_writer(pdf, writer, start_recto=start_recto)
elif self.is_filename:
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(PdfFileReader(
open(self.filename, 'rb')))
else:
raise AssertionError("PdfPlan: shouldn't get here!") | python | def add_to_writer(self,
writer: PdfFileWriter,
start_recto: bool = True) -> None:
"""
Add the PDF described by this class to a PDF writer.
Args:
writer: a :class:`PyPDF2.PdfFileWriter`
start_recto: start a new right-hand page?
"""
if self.is_html:
pdf = get_pdf_from_html(
html=self.html,
header_html=self.header_html,
footer_html=self.footer_html,
wkhtmltopdf_filename=self.wkhtmltopdf_filename,
wkhtmltopdf_options=self.wkhtmltopdf_options)
append_memory_pdf_to_writer(pdf, writer, start_recto=start_recto)
elif self.is_filename:
if start_recto and writer.getNumPages() % 2 != 0:
writer.addBlankPage()
writer.appendPagesFromReader(PdfFileReader(
open(self.filename, 'rb')))
else:
raise AssertionError("PdfPlan: shouldn't get here!") | [
"def",
"add_to_writer",
"(",
"self",
",",
"writer",
":",
"PdfFileWriter",
",",
"start_recto",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"if",
"self",
".",
"is_html",
":",
"pdf",
"=",
"get_pdf_from_html",
"(",
"html",
"=",
"self",
".",
"html",
",",
"header_html",
"=",
"self",
".",
"header_html",
",",
"footer_html",
"=",
"self",
".",
"footer_html",
",",
"wkhtmltopdf_filename",
"=",
"self",
".",
"wkhtmltopdf_filename",
",",
"wkhtmltopdf_options",
"=",
"self",
".",
"wkhtmltopdf_options",
")",
"append_memory_pdf_to_writer",
"(",
"pdf",
",",
"writer",
",",
"start_recto",
"=",
"start_recto",
")",
"elif",
"self",
".",
"is_filename",
":",
"if",
"start_recto",
"and",
"writer",
".",
"getNumPages",
"(",
")",
"%",
"2",
"!=",
"0",
":",
"writer",
".",
"addBlankPage",
"(",
")",
"writer",
".",
"appendPagesFromReader",
"(",
"PdfFileReader",
"(",
"open",
"(",
"self",
".",
"filename",
",",
"'rb'",
")",
")",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"\"PdfPlan: shouldn't get here!\"",
")"
] | Add the PDF described by this class to a PDF writer.
Args:
writer: a :class:`PyPDF2.PdfFileWriter`
start_recto: start a new right-hand page? | [
"Add",
"the",
"PDF",
"described",
"by",
"this",
"class",
"to",
"a",
"PDF",
"writer",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/pdf.py#L161-L186 |
RudolfCardinal/pythonlib | cardinal_pythonlib/formatting.py | trunc_if_integer | def trunc_if_integer(n: Any) -> Any:
"""
Truncates floats that are integers to their integer representation.
That is, converts ``1.0`` to ``1``, etc.
Otherwise, returns the starting value.
Will raise an exception if the input cannot be converted to ``int``.
"""
if n == int(n):
return int(n)
return n | python | def trunc_if_integer(n: Any) -> Any:
"""
Truncates floats that are integers to their integer representation.
That is, converts ``1.0`` to ``1``, etc.
Otherwise, returns the starting value.
Will raise an exception if the input cannot be converted to ``int``.
"""
if n == int(n):
return int(n)
return n | [
"def",
"trunc_if_integer",
"(",
"n",
":",
"Any",
")",
"->",
"Any",
":",
"if",
"n",
"==",
"int",
"(",
"n",
")",
":",
"return",
"int",
"(",
"n",
")",
"return",
"n"
] | Truncates floats that are integers to their integer representation.
That is, converts ``1.0`` to ``1``, etc.
Otherwise, returns the starting value.
Will raise an exception if the input cannot be converted to ``int``. | [
"Truncates",
"floats",
"that",
"are",
"integers",
"to",
"their",
"integer",
"representation",
".",
"That",
"is",
"converts",
"1",
".",
"0",
"to",
"1",
"etc",
".",
"Otherwise",
"returns",
"the",
"starting",
"value",
".",
"Will",
"raise",
"an",
"exception",
"if",
"the",
"input",
"cannot",
"be",
"converted",
"to",
"int",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/formatting.py#L36-L45 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | repr_result | def repr_result(obj: Any, elements: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Internal function to make a :func:`repr`-style representation of an object.
Args:
obj: object to display
elements: list of object ``attribute=value`` strings
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
if with_addr:
return "<{qualname}({elements}) at {addr}>".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements),
addr=hex(id(obj)))
else:
return "{qualname}({elements})".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements)) | python | def repr_result(obj: Any, elements: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Internal function to make a :func:`repr`-style representation of an object.
Args:
obj: object to display
elements: list of object ``attribute=value`` strings
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
if with_addr:
return "<{qualname}({elements}) at {addr}>".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements),
addr=hex(id(obj)))
else:
return "{qualname}({elements})".format(
qualname=obj.__class__.__qualname__,
elements=joiner.join(elements)) | [
"def",
"repr_result",
"(",
"obj",
":",
"Any",
",",
"elements",
":",
"List",
"[",
"str",
"]",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"if",
"with_addr",
":",
"return",
"\"<{qualname}({elements}) at {addr}>\"",
".",
"format",
"(",
"qualname",
"=",
"obj",
".",
"__class__",
".",
"__qualname__",
",",
"elements",
"=",
"joiner",
".",
"join",
"(",
"elements",
")",
",",
"addr",
"=",
"hex",
"(",
"id",
"(",
"obj",
")",
")",
")",
"else",
":",
"return",
"\"{qualname}({elements})\"",
".",
"format",
"(",
"qualname",
"=",
"obj",
".",
"__class__",
".",
"__qualname__",
",",
"elements",
"=",
"joiner",
".",
"join",
"(",
"elements",
")",
")"
] | Internal function to make a :func:`repr`-style representation of an object.
Args:
obj: object to display
elements: list of object ``attribute=value`` strings
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Internal",
"function",
"to",
"make",
"a",
":",
"func",
":",
"repr",
"-",
"style",
"representation",
"of",
"an",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L42-L65 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | auto_repr | def auto_repr(obj: Any, with_addr: bool = False,
sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Works its way through the object's ``__dict__`` and reports accordingly.
Args:
obj: object to display
with_addr: include the memory address of ``obj``
sort_attrs: sort the attributes into alphabetical order?
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
if sort_attrs:
keys = sorted(obj.__dict__.keys())
else:
keys = obj.__dict__.keys()
elements = ["{}={}".format(k, repr(getattr(obj, k))) for k in keys]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | python | def auto_repr(obj: Any, with_addr: bool = False,
sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Works its way through the object's ``__dict__`` and reports accordingly.
Args:
obj: object to display
with_addr: include the memory address of ``obj``
sort_attrs: sort the attributes into alphabetical order?
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
if sort_attrs:
keys = sorted(obj.__dict__.keys())
else:
keys = obj.__dict__.keys()
elements = ["{}={}".format(k, repr(getattr(obj, k))) for k in keys]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | [
"def",
"auto_repr",
"(",
"obj",
":",
"Any",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"sort_attrs",
":",
"bool",
"=",
"True",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"if",
"sort_attrs",
":",
"keys",
"=",
"sorted",
"(",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
"else",
":",
"keys",
"=",
"obj",
".",
"__dict__",
".",
"keys",
"(",
")",
"elements",
"=",
"[",
"\"{}={}\"",
".",
"format",
"(",
"k",
",",
"repr",
"(",
"getattr",
"(",
"obj",
",",
"k",
")",
")",
")",
"for",
"k",
"in",
"keys",
"]",
"return",
"repr_result",
"(",
"obj",
",",
"elements",
",",
"with_addr",
"=",
"with_addr",
",",
"joiner",
"=",
"joiner",
")"
] | Convenience function for :func:`__repr__`.
Works its way through the object's ``__dict__`` and reports accordingly.
Args:
obj: object to display
with_addr: include the memory address of ``obj``
sort_attrs: sort the attributes into alphabetical order?
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Convenience",
"function",
"for",
":",
"func",
":",
"__repr__",
".",
"Works",
"its",
"way",
"through",
"the",
"object",
"s",
"__dict__",
"and",
"reports",
"accordingly",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L68-L88 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | simple_repr | def simple_repr(obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Works its way through a list of attribute names, and creates a ``repr()``
representation assuming that parameters to the constructor have the same
names.
Args:
obj: object to display
attrnames: names of attributes to include
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
elements = ["{}={}".format(name, repr(getattr(obj, name)))
for name in attrnames]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | python | def simple_repr(obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Works its way through a list of attribute names, and creates a ``repr()``
representation assuming that parameters to the constructor have the same
names.
Args:
obj: object to display
attrnames: names of attributes to include
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
elements = ["{}={}".format(name, repr(getattr(obj, name)))
for name in attrnames]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | [
"def",
"simple_repr",
"(",
"obj",
":",
"Any",
",",
"attrnames",
":",
"List",
"[",
"str",
"]",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"elements",
"=",
"[",
"\"{}={}\"",
".",
"format",
"(",
"name",
",",
"repr",
"(",
"getattr",
"(",
"obj",
",",
"name",
")",
")",
")",
"for",
"name",
"in",
"attrnames",
"]",
"return",
"repr_result",
"(",
"obj",
",",
"elements",
",",
"with_addr",
"=",
"with_addr",
",",
"joiner",
"=",
"joiner",
")"
] | Convenience function for :func:`__repr__`.
Works its way through a list of attribute names, and creates a ``repr()``
representation assuming that parameters to the constructor have the same
names.
Args:
obj: object to display
attrnames: names of attributes to include
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Convenience",
"function",
"for",
":",
"func",
":",
"__repr__",
".",
"Works",
"its",
"way",
"through",
"a",
"list",
"of",
"attribute",
"names",
"and",
"creates",
"a",
"repr",
"()",
"representation",
"assuming",
"that",
"parameters",
"to",
"the",
"constructor",
"have",
"the",
"same",
"names",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L91-L111 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | mapped_repr | def mapped_repr(obj: Any, attributes: List[Tuple[str, str]],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Takes attribute names and corresponding initialization parameter names
(parameters to :func:`__init__`).
Args:
obj: object to display
attributes: list of tuples, each ``(attr_name, init_param_name)``.
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
elements = ["{}={}".format(init_param_name, repr(getattr(obj, attr_name)))
for attr_name, init_param_name in attributes]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | python | def mapped_repr(obj: Any, attributes: List[Tuple[str, str]],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Takes attribute names and corresponding initialization parameter names
(parameters to :func:`__init__`).
Args:
obj: object to display
attributes: list of tuples, each ``(attr_name, init_param_name)``.
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
elements = ["{}={}".format(init_param_name, repr(getattr(obj, attr_name)))
for attr_name, init_param_name in attributes]
return repr_result(obj, elements, with_addr=with_addr, joiner=joiner) | [
"def",
"mapped_repr",
"(",
"obj",
":",
"Any",
",",
"attributes",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"elements",
"=",
"[",
"\"{}={}\"",
".",
"format",
"(",
"init_param_name",
",",
"repr",
"(",
"getattr",
"(",
"obj",
",",
"attr_name",
")",
")",
")",
"for",
"attr_name",
",",
"init_param_name",
"in",
"attributes",
"]",
"return",
"repr_result",
"(",
"obj",
",",
"elements",
",",
"with_addr",
"=",
"with_addr",
",",
"joiner",
"=",
"joiner",
")"
] | Convenience function for :func:`__repr__`.
Takes attribute names and corresponding initialization parameter names
(parameters to :func:`__init__`).
Args:
obj: object to display
attributes: list of tuples, each ``(attr_name, init_param_name)``.
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Convenience",
"function",
"for",
":",
"func",
":",
"__repr__",
".",
"Takes",
"attribute",
"names",
"and",
"corresponding",
"initialization",
"parameter",
"names",
"(",
"parameters",
"to",
":",
"func",
":",
"__init__",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L114-L133 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | mapped_repr_stripping_underscores | def mapped_repr_stripping_underscores(
obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
attributes = []
for attr_name in attrnames:
if attr_name.startswith('_'):
init_param_name = attr_name[1:]
else:
init_param_name = attr_name
attributes.append((attr_name, init_param_name))
return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner) | python | def mapped_repr_stripping_underscores(
obj: Any, attrnames: List[str],
with_addr: bool = False, joiner: str = COMMA_SPACE) -> str:
"""
Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
attributes = []
for attr_name in attrnames:
if attr_name.startswith('_'):
init_param_name = attr_name[1:]
else:
init_param_name = attr_name
attributes.append((attr_name, init_param_name))
return mapped_repr(obj, attributes, with_addr=with_addr, joiner=joiner) | [
"def",
"mapped_repr_stripping_underscores",
"(",
"obj",
":",
"Any",
",",
"attrnames",
":",
"List",
"[",
"str",
"]",
",",
"with_addr",
":",
"bool",
"=",
"False",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"attributes",
"=",
"[",
"]",
"for",
"attr_name",
"in",
"attrnames",
":",
"if",
"attr_name",
".",
"startswith",
"(",
"'_'",
")",
":",
"init_param_name",
"=",
"attr_name",
"[",
"1",
":",
"]",
"else",
":",
"init_param_name",
"=",
"attr_name",
"attributes",
".",
"append",
"(",
"(",
"attr_name",
",",
"init_param_name",
")",
")",
"return",
"mapped_repr",
"(",
"obj",
",",
"attributes",
",",
"with_addr",
"=",
"with_addr",
",",
"joiner",
"=",
"joiner",
")"
] | Convenience function for :func:`__repr__`.
Here, you pass a list of internal attributes, and it assumes that the
:func:`__init__` parameter names have the leading underscore dropped.
Args:
obj: object to display
attrnames: list of attribute names
with_addr: include the memory address of ``obj``
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Convenience",
"function",
"for",
":",
"func",
":",
"__repr__",
".",
"Here",
"you",
"pass",
"a",
"list",
"of",
"internal",
"attributes",
"and",
"it",
"assumes",
"that",
"the",
":",
"func",
":",
"__init__",
"parameter",
"names",
"have",
"the",
"leading",
"underscore",
"dropped",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L136-L161 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | ordered_repr | def ordered_repr(obj: object, attrlist: Iterable[str],
joiner: str = COMMA_SPACE) -> str:
"""
Shortcut to make :func:`repr` functions ordered.
Define your :func:`__repr__` like this:
.. code-block:: python
def __repr__(self):
return ordered_repr(self, ["field1", "field2", "field3"])
Args:
obj: object to display
attrlist: iterable of attribute names
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
return "<{classname}({kvp})>".format(
classname=type(obj).__name__,
kvp=joiner.join("{}={}".format(a, repr(getattr(obj, a)))
for a in attrlist)
) | python | def ordered_repr(obj: object, attrlist: Iterable[str],
joiner: str = COMMA_SPACE) -> str:
"""
Shortcut to make :func:`repr` functions ordered.
Define your :func:`__repr__` like this:
.. code-block:: python
def __repr__(self):
return ordered_repr(self, ["field1", "field2", "field3"])
Args:
obj: object to display
attrlist: iterable of attribute names
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation
"""
return "<{classname}({kvp})>".format(
classname=type(obj).__name__,
kvp=joiner.join("{}={}".format(a, repr(getattr(obj, a)))
for a in attrlist)
) | [
"def",
"ordered_repr",
"(",
"obj",
":",
"object",
",",
"attrlist",
":",
"Iterable",
"[",
"str",
"]",
",",
"joiner",
":",
"str",
"=",
"COMMA_SPACE",
")",
"->",
"str",
":",
"return",
"\"<{classname}({kvp})>\"",
".",
"format",
"(",
"classname",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
",",
"kvp",
"=",
"joiner",
".",
"join",
"(",
"\"{}={}\"",
".",
"format",
"(",
"a",
",",
"repr",
"(",
"getattr",
"(",
"obj",
",",
"a",
")",
")",
")",
"for",
"a",
"in",
"attrlist",
")",
")"
] | Shortcut to make :func:`repr` functions ordered.
Define your :func:`__repr__` like this:
.. code-block:: python
def __repr__(self):
return ordered_repr(self, ["field1", "field2", "field3"])
Args:
obj: object to display
attrlist: iterable of attribute names
joiner: string with which to join the elements
Returns:
string: :func:`repr`-style representation | [
"Shortcut",
"to",
"make",
":",
"func",
":",
"repr",
"functions",
"ordered",
".",
"Define",
"your",
":",
"func",
":",
"__repr__",
"like",
"this",
":"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L164-L187 |
RudolfCardinal/pythonlib | cardinal_pythonlib/reprfunc.py | auto_str | def auto_str(obj: Any, indent: int = 4, width: int = 80, depth: int = None,
compact: bool = False) -> str:
"""
Make a pretty :func:`str()` representation using :func:`pprint.pformat`
and the object's ``__dict__`` attribute.
Args:
obj: object to display
indent: see
https://docs.python.org/3/library/pprint.html#pprint.PrettyPrinter
width: as above
depth: as above
compact: as above
Returns:
string: :func:`str`-style representation
"""
return pprint.pformat(obj.__dict__, indent=indent, width=width,
depth=depth, compact=compact) | python | def auto_str(obj: Any, indent: int = 4, width: int = 80, depth: int = None,
compact: bool = False) -> str:
"""
Make a pretty :func:`str()` representation using :func:`pprint.pformat`
and the object's ``__dict__`` attribute.
Args:
obj: object to display
indent: see
https://docs.python.org/3/library/pprint.html#pprint.PrettyPrinter
width: as above
depth: as above
compact: as above
Returns:
string: :func:`str`-style representation
"""
return pprint.pformat(obj.__dict__, indent=indent, width=width,
depth=depth, compact=compact) | [
"def",
"auto_str",
"(",
"obj",
":",
"Any",
",",
"indent",
":",
"int",
"=",
"4",
",",
"width",
":",
"int",
"=",
"80",
",",
"depth",
":",
"int",
"=",
"None",
",",
"compact",
":",
"bool",
"=",
"False",
")",
"->",
"str",
":",
"return",
"pprint",
".",
"pformat",
"(",
"obj",
".",
"__dict__",
",",
"indent",
"=",
"indent",
",",
"width",
"=",
"width",
",",
"depth",
"=",
"depth",
",",
"compact",
"=",
"compact",
")"
] | Make a pretty :func:`str()` representation using :func:`pprint.pformat`
and the object's ``__dict__`` attribute.
Args:
obj: object to display
indent: see
https://docs.python.org/3/library/pprint.html#pprint.PrettyPrinter
width: as above
depth: as above
compact: as above
Returns:
string: :func:`str`-style representation | [
"Make",
"a",
"pretty",
":",
"func",
":",
"str",
"()",
"representation",
"using",
":",
"func",
":",
"pprint",
".",
"pformat",
"and",
"the",
"object",
"s",
"__dict__",
"attribute",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/reprfunc.py#L190-L209 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/treatment_resistant_depression.py | timedelta_days | def timedelta_days(days: int) -> timedelta64:
"""
Convert a duration in days to a NumPy ``timedelta64`` object.
"""
int_days = int(days)
if int_days != days:
raise ValueError("Fractional days passed to timedelta_days: "
"{!r}".format(days))
try:
# Do not pass e.g. 27.0; that will raise a ValueError.
# Must be an actual int:
return timedelta64(int_days, 'D')
except ValueError as e:
raise ValueError("Failure in timedelta_days; value was {!r}; original "
"error was: {}".format(days, e)) | python | def timedelta_days(days: int) -> timedelta64:
"""
Convert a duration in days to a NumPy ``timedelta64`` object.
"""
int_days = int(days)
if int_days != days:
raise ValueError("Fractional days passed to timedelta_days: "
"{!r}".format(days))
try:
# Do not pass e.g. 27.0; that will raise a ValueError.
# Must be an actual int:
return timedelta64(int_days, 'D')
except ValueError as e:
raise ValueError("Failure in timedelta_days; value was {!r}; original "
"error was: {}".format(days, e)) | [
"def",
"timedelta_days",
"(",
"days",
":",
"int",
")",
"->",
"timedelta64",
":",
"int_days",
"=",
"int",
"(",
"days",
")",
"if",
"int_days",
"!=",
"days",
":",
"raise",
"ValueError",
"(",
"\"Fractional days passed to timedelta_days: \"",
"\"{!r}\"",
".",
"format",
"(",
"days",
")",
")",
"try",
":",
"# Do not pass e.g. 27.0; that will raise a ValueError.",
"# Must be an actual int:",
"return",
"timedelta64",
"(",
"int_days",
",",
"'D'",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Failure in timedelta_days; value was {!r}; original \"",
"\"error was: {}\"",
".",
"format",
"(",
"days",
",",
"e",
")",
")"
] | Convert a duration in days to a NumPy ``timedelta64`` object. | [
"Convert",
"a",
"duration",
"in",
"days",
"to",
"a",
"NumPy",
"timedelta64",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/treatment_resistant_depression.py#L121-L135 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/treatment_resistant_depression.py | _get_generic_two_antidep_episodes_result | def _get_generic_two_antidep_episodes_result(
rowdata: Tuple[Any, ...] = None) -> DataFrame:
"""
Create a results row for this application.
"""
# Valid data types... see:
# - pandas.core.dtypes.common.pandas_dtype
# - https://pandas.pydata.org/pandas-docs/stable/timeseries.html
# - https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html
data = [rowdata] if rowdata else []
return DataFrame(array(
data, # data
dtype=[ # column definitions:
(RCN_PATIENT_ID, DTYPE_STRING),
(RCN_DRUG_A_NAME, DTYPE_STRING),
(RCN_DRUG_A_FIRST_MENTION, DTYPE_DATE),
(RCN_DRUG_A_SECOND_MENTION, DTYPE_DATE),
(RCN_DRUG_B_NAME, DTYPE_STRING),
(RCN_DRUG_B_FIRST_MENTION, DTYPE_DATE),
(RCN_DRUG_B_SECOND_MENTION, DTYPE_DATE),
(RCN_EXPECT_RESPONSE_BY_DATE, DTYPE_DATE),
(RCN_END_OF_SYMPTOM_PERIOD, DTYPE_DATE),
]
)) | python | def _get_generic_two_antidep_episodes_result(
rowdata: Tuple[Any, ...] = None) -> DataFrame:
"""
Create a results row for this application.
"""
# Valid data types... see:
# - pandas.core.dtypes.common.pandas_dtype
# - https://pandas.pydata.org/pandas-docs/stable/timeseries.html
# - https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html
data = [rowdata] if rowdata else []
return DataFrame(array(
data, # data
dtype=[ # column definitions:
(RCN_PATIENT_ID, DTYPE_STRING),
(RCN_DRUG_A_NAME, DTYPE_STRING),
(RCN_DRUG_A_FIRST_MENTION, DTYPE_DATE),
(RCN_DRUG_A_SECOND_MENTION, DTYPE_DATE),
(RCN_DRUG_B_NAME, DTYPE_STRING),
(RCN_DRUG_B_FIRST_MENTION, DTYPE_DATE),
(RCN_DRUG_B_SECOND_MENTION, DTYPE_DATE),
(RCN_EXPECT_RESPONSE_BY_DATE, DTYPE_DATE),
(RCN_END_OF_SYMPTOM_PERIOD, DTYPE_DATE),
]
)) | [
"def",
"_get_generic_two_antidep_episodes_result",
"(",
"rowdata",
":",
"Tuple",
"[",
"Any",
",",
"...",
"]",
"=",
"None",
")",
"->",
"DataFrame",
":",
"# Valid data types... see:",
"# - pandas.core.dtypes.common.pandas_dtype",
"# - https://pandas.pydata.org/pandas-docs/stable/timeseries.html",
"# - https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html",
"data",
"=",
"[",
"rowdata",
"]",
"if",
"rowdata",
"else",
"[",
"]",
"return",
"DataFrame",
"(",
"array",
"(",
"data",
",",
"# data",
"dtype",
"=",
"[",
"# column definitions:",
"(",
"RCN_PATIENT_ID",
",",
"DTYPE_STRING",
")",
",",
"(",
"RCN_DRUG_A_NAME",
",",
"DTYPE_STRING",
")",
",",
"(",
"RCN_DRUG_A_FIRST_MENTION",
",",
"DTYPE_DATE",
")",
",",
"(",
"RCN_DRUG_A_SECOND_MENTION",
",",
"DTYPE_DATE",
")",
",",
"(",
"RCN_DRUG_B_NAME",
",",
"DTYPE_STRING",
")",
",",
"(",
"RCN_DRUG_B_FIRST_MENTION",
",",
"DTYPE_DATE",
")",
",",
"(",
"RCN_DRUG_B_SECOND_MENTION",
",",
"DTYPE_DATE",
")",
",",
"(",
"RCN_EXPECT_RESPONSE_BY_DATE",
",",
"DTYPE_DATE",
")",
",",
"(",
"RCN_END_OF_SYMPTOM_PERIOD",
",",
"DTYPE_DATE",
")",
",",
"]",
")",
")"
] | Create a results row for this application. | [
"Create",
"a",
"results",
"row",
"for",
"this",
"application",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/treatment_resistant_depression.py#L138-L161 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/treatment_resistant_depression.py | two_antidepressant_episodes_single_patient | def two_antidepressant_episodes_single_patient(
patient_id: str,
patient_drug_date_df: DataFrame,
patient_colname: str = DEFAULT_SOURCE_PATIENT_COLNAME,
drug_colname: str = DEFAULT_SOURCE_DRUG_COLNAME,
date_colname: str = DEFAULT_SOURCE_DATE_COLNAME,
course_length_days: int = DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS,
expect_response_by_days: int = DEFAULT_EXPECT_RESPONSE_BY_DAYS,
symptom_assessment_time_days: int = DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS, # noqa
first_episode_only: bool = True) -> Optional[DataFrame]:
"""
Processes a single patient for ``two_antidepressant_episodes()`` (q.v.).
Implements the key algorithm.
"""
log.debug("Running two_antidepressant_episodes_single_patient() for "
"patient {!r}".format(patient_id))
all_episodes_for_patient = _get_blank_two_antidep_episodes_result()
flush_stdout_stderr()
# Get column details from source data
sourcecolnum_drug = patient_drug_date_df.columns.get_loc(drug_colname)
sourcecolnum_date = patient_drug_date_df.columns.get_loc(date_colname)
# -------------------------------------------------------------------------
# Get data for this patient
# -------------------------------------------------------------------------
# ... this is pretty quick (e.g. 4ms for 1150 rows
patient_mask = patient_drug_date_df[patient_colname].values == patient_id
tp = patient_drug_date_df[patient_mask] # type: DataFrame
# -------------------------------------------------------------------------
# Sort by date, then drug.
# ... arbitrary drug name order to make the output stable
# -------------------------------------------------------------------------
# ... this is about 2ms for small lists; probably not limiting
# ... seems slower if "inplace=True" is used.
tp = tp.sort_values(by=[date_colname, drug_colname], ascending=True)
# log.critical("{!r}", tp)
nrows_all = len(tp) # https://stackoverflow.com/questions/15943769/
if nrows_all < 4: # need A, A, B, B; so minimum #rows is 4
return None
end_date = tp.iat[nrows_all - 1, sourcecolnum_date] # date of last row
# -------------------------------------------------------------------------
# Get antidepressants, in the order they appear
# -------------------------------------------------------------------------
for first_b_rownum in range(1, nrows_all):
# ... skip rows 0 and 1, because a drug can't be the second (B) drug
# unless there are two mentions of A beforehand.
# ... modified 2019-04-01 from range(2, nrows_all) to
# range(1, nrows_all) because we now support A ending on the same
# day that B starts (so first possible "first B" row is the second
# row, row 1)
# ---------------------------------------------------------------------
# Check candidate B drug
# ---------------------------------------------------------------------
antidepressant_b_name = tp.iat[first_b_rownum, sourcecolnum_drug]
antidepressant_b_first_mention = tp.iat[first_b_rownum,
sourcecolnum_date]
earliest_possible_b_second_mention = (
antidepressant_b_first_mention +
timedelta_days(course_length_days - 1)
)
if earliest_possible_b_second_mention > end_date:
# Impossible for this to be a B course.
# Logically unnecessary test, but improves efficiency by skipping
# the slice operation that follows.
continue # try another B
b_second_mentions = tp[
(tp[drug_colname] == antidepressant_b_name) & # same drug
(tp[date_colname] >= earliest_possible_b_second_mention)
]
if len(b_second_mentions) == 0:
# No second mention of antidepressant_b_name
continue # try another B
# We only care about the earliest qualifying (completion-of-course)
# B second mention.
antidepressant_b_second_mention = b_second_mentions.iat[
0, sourcecolnum_date]
# ... this statement could be moved to after the A loop, but that
# would sacrifice clarity.
# ---------------------------------------------------------------------
# Now find preceding A drug
# ---------------------------------------------------------------------
preceding_other_antidepressants = tp[
(tp[drug_colname] != antidepressant_b_name) &
# ... A is a different drug to B
# Changed 2019-04-01: WAS THIS:
# (tp[date_colname] < antidepressant_b_first_mention)
# # ... A is mentioned before B starts
# CHANGED TO THIS:
(tp[date_colname] <= antidepressant_b_first_mention)
# ... A is mentioned before B starts or on the same day
]
nrows_a = len(preceding_other_antidepressants)
if nrows_a < 2: # need at least two mentions of A
# No candidates for A
continue # try another B
# preceding_other_antidepressants remains date-sorted (ascending)
found_valid_a = False
antidepressant_a_name = NaN
antidepressant_a_first_mention = NaN
antidepressant_a_second_mention = NaN
for first_a_rownum in range(nrows_a - 1):
# skip the last row, as that's impossible (the first mention of
# A cannot be the last row since there must be two mentions of A)
antidepressant_a_name = tp.iat[first_a_rownum, sourcecolnum_drug]
antidepressant_a_first_mention = tp.iat[first_a_rownum,
sourcecolnum_date]
earliest_possible_a_second_mention = (
antidepressant_a_first_mention +
timedelta_days(course_length_days - 1)
)
# 2019-04-01: CHANGED FROM:
# if (earliest_possible_a_second_mention >=
# antidepressant_b_first_mention):
# TO:
if (earliest_possible_a_second_mention >
antidepressant_b_first_mention):
# Impossible to squeeze in the second A mention before B.
# Logically unnecessary test, but improves efficiency by
# skipping the slice operation that follows.
continue # try another A
a_second_mentions = tp[
(tp[drug_colname] == antidepressant_a_name) &
# ... same drug
(tp[date_colname] >= earliest_possible_a_second_mention)
# ... mentioned late enough after its first mention
]
if len(a_second_mentions) == 0:
# No second mention of antidepressant_a_name
continue # try another A
# We pick the first possible completion-of-course A second
# mention:
antidepressant_a_second_mention = a_second_mentions.iat[
0, sourcecolnum_date]
# Make sure B is not mentioned within the A range
mentions_of_b_within_a_range = tp[
(tp[drug_colname] == antidepressant_b_name) &
(tp[date_colname] >= antidepressant_a_first_mention) &
# 2019-04-01: CHANGED FROM:
# (tp[date_colname] <= antidepressant_a_second_mention)
# TO:
(tp[date_colname] < antidepressant_a_second_mention)
]
if len(mentions_of_b_within_a_range) > 0:
# Nope, chuck out this combination.
continue # try another A
found_valid_a = True
break
if not found_valid_a:
continue # try another B
# ---------------------------------------------------------------------
# OK; here we have found a combination that we like.
# Add it to the results.
# ---------------------------------------------------------------------
# https://stackoverflow.com/questions/19365513/how-to-add-an-extra-row-to-a-pandas-dataframe/19368360 # noqa
# http://pandas.pydata.org/pandas-docs/stable/indexing.html#setting-with-enlargement # noqa
expect_response_by_date = (
antidepressant_b_first_mention + timedelta_days(
expect_response_by_days)
)
end_of_symptom_period = (
antidepressant_b_first_mention + timedelta_days(
expect_response_by_days + symptom_assessment_time_days - 1)
)
result = _get_generic_two_antidep_episodes_result((
patient_id,
antidepressant_a_name,
antidepressant_a_first_mention,
antidepressant_a_second_mention,
antidepressant_b_name,
antidepressant_b_first_mention,
antidepressant_b_second_mention,
expect_response_by_date,
end_of_symptom_period
))
# We only care about the first episode per patient that matches, so:
if first_episode_only:
return result
else:
all_episodes_for_patient = all_episodes_for_patient.append(result)
if len(all_episodes_for_patient) == 0:
return None # nothing found
return all_episodes_for_patient | python | def two_antidepressant_episodes_single_patient(
patient_id: str,
patient_drug_date_df: DataFrame,
patient_colname: str = DEFAULT_SOURCE_PATIENT_COLNAME,
drug_colname: str = DEFAULT_SOURCE_DRUG_COLNAME,
date_colname: str = DEFAULT_SOURCE_DATE_COLNAME,
course_length_days: int = DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS,
expect_response_by_days: int = DEFAULT_EXPECT_RESPONSE_BY_DAYS,
symptom_assessment_time_days: int = DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS, # noqa
first_episode_only: bool = True) -> Optional[DataFrame]:
"""
Processes a single patient for ``two_antidepressant_episodes()`` (q.v.).
Implements the key algorithm.
"""
log.debug("Running two_antidepressant_episodes_single_patient() for "
"patient {!r}".format(patient_id))
all_episodes_for_patient = _get_blank_two_antidep_episodes_result()
flush_stdout_stderr()
# Get column details from source data
sourcecolnum_drug = patient_drug_date_df.columns.get_loc(drug_colname)
sourcecolnum_date = patient_drug_date_df.columns.get_loc(date_colname)
# -------------------------------------------------------------------------
# Get data for this patient
# -------------------------------------------------------------------------
# ... this is pretty quick (e.g. 4ms for 1150 rows
patient_mask = patient_drug_date_df[patient_colname].values == patient_id
tp = patient_drug_date_df[patient_mask] # type: DataFrame
# -------------------------------------------------------------------------
# Sort by date, then drug.
# ... arbitrary drug name order to make the output stable
# -------------------------------------------------------------------------
# ... this is about 2ms for small lists; probably not limiting
# ... seems slower if "inplace=True" is used.
tp = tp.sort_values(by=[date_colname, drug_colname], ascending=True)
# log.critical("{!r}", tp)
nrows_all = len(tp) # https://stackoverflow.com/questions/15943769/
if nrows_all < 4: # need A, A, B, B; so minimum #rows is 4
return None
end_date = tp.iat[nrows_all - 1, sourcecolnum_date] # date of last row
# -------------------------------------------------------------------------
# Get antidepressants, in the order they appear
# -------------------------------------------------------------------------
for first_b_rownum in range(1, nrows_all):
# ... skip rows 0 and 1, because a drug can't be the second (B) drug
# unless there are two mentions of A beforehand.
# ... modified 2019-04-01 from range(2, nrows_all) to
# range(1, nrows_all) because we now support A ending on the same
# day that B starts (so first possible "first B" row is the second
# row, row 1)
# ---------------------------------------------------------------------
# Check candidate B drug
# ---------------------------------------------------------------------
antidepressant_b_name = tp.iat[first_b_rownum, sourcecolnum_drug]
antidepressant_b_first_mention = tp.iat[first_b_rownum,
sourcecolnum_date]
earliest_possible_b_second_mention = (
antidepressant_b_first_mention +
timedelta_days(course_length_days - 1)
)
if earliest_possible_b_second_mention > end_date:
# Impossible for this to be a B course.
# Logically unnecessary test, but improves efficiency by skipping
# the slice operation that follows.
continue # try another B
b_second_mentions = tp[
(tp[drug_colname] == antidepressant_b_name) & # same drug
(tp[date_colname] >= earliest_possible_b_second_mention)
]
if len(b_second_mentions) == 0:
# No second mention of antidepressant_b_name
continue # try another B
# We only care about the earliest qualifying (completion-of-course)
# B second mention.
antidepressant_b_second_mention = b_second_mentions.iat[
0, sourcecolnum_date]
# ... this statement could be moved to after the A loop, but that
# would sacrifice clarity.
# ---------------------------------------------------------------------
# Now find preceding A drug
# ---------------------------------------------------------------------
preceding_other_antidepressants = tp[
(tp[drug_colname] != antidepressant_b_name) &
# ... A is a different drug to B
# Changed 2019-04-01: WAS THIS:
# (tp[date_colname] < antidepressant_b_first_mention)
# # ... A is mentioned before B starts
# CHANGED TO THIS:
(tp[date_colname] <= antidepressant_b_first_mention)
# ... A is mentioned before B starts or on the same day
]
nrows_a = len(preceding_other_antidepressants)
if nrows_a < 2: # need at least two mentions of A
# No candidates for A
continue # try another B
# preceding_other_antidepressants remains date-sorted (ascending)
found_valid_a = False
antidepressant_a_name = NaN
antidepressant_a_first_mention = NaN
antidepressant_a_second_mention = NaN
for first_a_rownum in range(nrows_a - 1):
# skip the last row, as that's impossible (the first mention of
# A cannot be the last row since there must be two mentions of A)
antidepressant_a_name = tp.iat[first_a_rownum, sourcecolnum_drug]
antidepressant_a_first_mention = tp.iat[first_a_rownum,
sourcecolnum_date]
earliest_possible_a_second_mention = (
antidepressant_a_first_mention +
timedelta_days(course_length_days - 1)
)
# 2019-04-01: CHANGED FROM:
# if (earliest_possible_a_second_mention >=
# antidepressant_b_first_mention):
# TO:
if (earliest_possible_a_second_mention >
antidepressant_b_first_mention):
# Impossible to squeeze in the second A mention before B.
# Logically unnecessary test, but improves efficiency by
# skipping the slice operation that follows.
continue # try another A
a_second_mentions = tp[
(tp[drug_colname] == antidepressant_a_name) &
# ... same drug
(tp[date_colname] >= earliest_possible_a_second_mention)
# ... mentioned late enough after its first mention
]
if len(a_second_mentions) == 0:
# No second mention of antidepressant_a_name
continue # try another A
# We pick the first possible completion-of-course A second
# mention:
antidepressant_a_second_mention = a_second_mentions.iat[
0, sourcecolnum_date]
# Make sure B is not mentioned within the A range
mentions_of_b_within_a_range = tp[
(tp[drug_colname] == antidepressant_b_name) &
(tp[date_colname] >= antidepressant_a_first_mention) &
# 2019-04-01: CHANGED FROM:
# (tp[date_colname] <= antidepressant_a_second_mention)
# TO:
(tp[date_colname] < antidepressant_a_second_mention)
]
if len(mentions_of_b_within_a_range) > 0:
# Nope, chuck out this combination.
continue # try another A
found_valid_a = True
break
if not found_valid_a:
continue # try another B
# ---------------------------------------------------------------------
# OK; here we have found a combination that we like.
# Add it to the results.
# ---------------------------------------------------------------------
# https://stackoverflow.com/questions/19365513/how-to-add-an-extra-row-to-a-pandas-dataframe/19368360 # noqa
# http://pandas.pydata.org/pandas-docs/stable/indexing.html#setting-with-enlargement # noqa
expect_response_by_date = (
antidepressant_b_first_mention + timedelta_days(
expect_response_by_days)
)
end_of_symptom_period = (
antidepressant_b_first_mention + timedelta_days(
expect_response_by_days + symptom_assessment_time_days - 1)
)
result = _get_generic_two_antidep_episodes_result((
patient_id,
antidepressant_a_name,
antidepressant_a_first_mention,
antidepressant_a_second_mention,
antidepressant_b_name,
antidepressant_b_first_mention,
antidepressant_b_second_mention,
expect_response_by_date,
end_of_symptom_period
))
# We only care about the first episode per patient that matches, so:
if first_episode_only:
return result
else:
all_episodes_for_patient = all_episodes_for_patient.append(result)
if len(all_episodes_for_patient) == 0:
return None # nothing found
return all_episodes_for_patient | [
"def",
"two_antidepressant_episodes_single_patient",
"(",
"patient_id",
":",
"str",
",",
"patient_drug_date_df",
":",
"DataFrame",
",",
"patient_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_PATIENT_COLNAME",
",",
"drug_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_DRUG_COLNAME",
",",
"date_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_DATE_COLNAME",
",",
"course_length_days",
":",
"int",
"=",
"DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS",
",",
"expect_response_by_days",
":",
"int",
"=",
"DEFAULT_EXPECT_RESPONSE_BY_DAYS",
",",
"symptom_assessment_time_days",
":",
"int",
"=",
"DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS",
",",
"# noqa",
"first_episode_only",
":",
"bool",
"=",
"True",
")",
"->",
"Optional",
"[",
"DataFrame",
"]",
":",
"log",
".",
"debug",
"(",
"\"Running two_antidepressant_episodes_single_patient() for \"",
"\"patient {!r}\"",
".",
"format",
"(",
"patient_id",
")",
")",
"all_episodes_for_patient",
"=",
"_get_blank_two_antidep_episodes_result",
"(",
")",
"flush_stdout_stderr",
"(",
")",
"# Get column details from source data",
"sourcecolnum_drug",
"=",
"patient_drug_date_df",
".",
"columns",
".",
"get_loc",
"(",
"drug_colname",
")",
"sourcecolnum_date",
"=",
"patient_drug_date_df",
".",
"columns",
".",
"get_loc",
"(",
"date_colname",
")",
"# -------------------------------------------------------------------------",
"# Get data for this patient",
"# -------------------------------------------------------------------------",
"# ... this is pretty quick (e.g. 4ms for 1150 rows",
"patient_mask",
"=",
"patient_drug_date_df",
"[",
"patient_colname",
"]",
".",
"values",
"==",
"patient_id",
"tp",
"=",
"patient_drug_date_df",
"[",
"patient_mask",
"]",
"# type: DataFrame",
"# -------------------------------------------------------------------------",
"# Sort by date, then drug.",
"# ... arbitrary drug name order to make the output stable",
"# -------------------------------------------------------------------------",
"# ... this is about 2ms for small lists; probably not limiting",
"# ... seems slower if \"inplace=True\" is used.",
"tp",
"=",
"tp",
".",
"sort_values",
"(",
"by",
"=",
"[",
"date_colname",
",",
"drug_colname",
"]",
",",
"ascending",
"=",
"True",
")",
"# log.critical(\"{!r}\", tp)",
"nrows_all",
"=",
"len",
"(",
"tp",
")",
"# https://stackoverflow.com/questions/15943769/",
"if",
"nrows_all",
"<",
"4",
":",
"# need A, A, B, B; so minimum #rows is 4",
"return",
"None",
"end_date",
"=",
"tp",
".",
"iat",
"[",
"nrows_all",
"-",
"1",
",",
"sourcecolnum_date",
"]",
"# date of last row",
"# -------------------------------------------------------------------------",
"# Get antidepressants, in the order they appear",
"# -------------------------------------------------------------------------",
"for",
"first_b_rownum",
"in",
"range",
"(",
"1",
",",
"nrows_all",
")",
":",
"# ... skip rows 0 and 1, because a drug can't be the second (B) drug",
"# unless there are two mentions of A beforehand.",
"# ... modified 2019-04-01 from range(2, nrows_all) to",
"# range(1, nrows_all) because we now support A ending on the same",
"# day that B starts (so first possible \"first B\" row is the second",
"# row, row 1)",
"# ---------------------------------------------------------------------",
"# Check candidate B drug",
"# ---------------------------------------------------------------------",
"antidepressant_b_name",
"=",
"tp",
".",
"iat",
"[",
"first_b_rownum",
",",
"sourcecolnum_drug",
"]",
"antidepressant_b_first_mention",
"=",
"tp",
".",
"iat",
"[",
"first_b_rownum",
",",
"sourcecolnum_date",
"]",
"earliest_possible_b_second_mention",
"=",
"(",
"antidepressant_b_first_mention",
"+",
"timedelta_days",
"(",
"course_length_days",
"-",
"1",
")",
")",
"if",
"earliest_possible_b_second_mention",
">",
"end_date",
":",
"# Impossible for this to be a B course.",
"# Logically unnecessary test, but improves efficiency by skipping",
"# the slice operation that follows.",
"continue",
"# try another B",
"b_second_mentions",
"=",
"tp",
"[",
"(",
"tp",
"[",
"drug_colname",
"]",
"==",
"antidepressant_b_name",
")",
"&",
"# same drug",
"(",
"tp",
"[",
"date_colname",
"]",
">=",
"earliest_possible_b_second_mention",
")",
"]",
"if",
"len",
"(",
"b_second_mentions",
")",
"==",
"0",
":",
"# No second mention of antidepressant_b_name",
"continue",
"# try another B",
"# We only care about the earliest qualifying (completion-of-course)",
"# B second mention.",
"antidepressant_b_second_mention",
"=",
"b_second_mentions",
".",
"iat",
"[",
"0",
",",
"sourcecolnum_date",
"]",
"# ... this statement could be moved to after the A loop, but that",
"# would sacrifice clarity.",
"# ---------------------------------------------------------------------",
"# Now find preceding A drug",
"# ---------------------------------------------------------------------",
"preceding_other_antidepressants",
"=",
"tp",
"[",
"(",
"tp",
"[",
"drug_colname",
"]",
"!=",
"antidepressant_b_name",
")",
"&",
"# ... A is a different drug to B",
"# Changed 2019-04-01: WAS THIS:",
"# (tp[date_colname] < antidepressant_b_first_mention)",
"# # ... A is mentioned before B starts",
"# CHANGED TO THIS:",
"(",
"tp",
"[",
"date_colname",
"]",
"<=",
"antidepressant_b_first_mention",
")",
"# ... A is mentioned before B starts or on the same day",
"]",
"nrows_a",
"=",
"len",
"(",
"preceding_other_antidepressants",
")",
"if",
"nrows_a",
"<",
"2",
":",
"# need at least two mentions of A",
"# No candidates for A",
"continue",
"# try another B",
"# preceding_other_antidepressants remains date-sorted (ascending)",
"found_valid_a",
"=",
"False",
"antidepressant_a_name",
"=",
"NaN",
"antidepressant_a_first_mention",
"=",
"NaN",
"antidepressant_a_second_mention",
"=",
"NaN",
"for",
"first_a_rownum",
"in",
"range",
"(",
"nrows_a",
"-",
"1",
")",
":",
"# skip the last row, as that's impossible (the first mention of",
"# A cannot be the last row since there must be two mentions of A)",
"antidepressant_a_name",
"=",
"tp",
".",
"iat",
"[",
"first_a_rownum",
",",
"sourcecolnum_drug",
"]",
"antidepressant_a_first_mention",
"=",
"tp",
".",
"iat",
"[",
"first_a_rownum",
",",
"sourcecolnum_date",
"]",
"earliest_possible_a_second_mention",
"=",
"(",
"antidepressant_a_first_mention",
"+",
"timedelta_days",
"(",
"course_length_days",
"-",
"1",
")",
")",
"# 2019-04-01: CHANGED FROM:",
"# if (earliest_possible_a_second_mention >=",
"# antidepressant_b_first_mention):",
"# TO:",
"if",
"(",
"earliest_possible_a_second_mention",
">",
"antidepressant_b_first_mention",
")",
":",
"# Impossible to squeeze in the second A mention before B.",
"# Logically unnecessary test, but improves efficiency by",
"# skipping the slice operation that follows.",
"continue",
"# try another A",
"a_second_mentions",
"=",
"tp",
"[",
"(",
"tp",
"[",
"drug_colname",
"]",
"==",
"antidepressant_a_name",
")",
"&",
"# ... same drug",
"(",
"tp",
"[",
"date_colname",
"]",
">=",
"earliest_possible_a_second_mention",
")",
"# ... mentioned late enough after its first mention",
"]",
"if",
"len",
"(",
"a_second_mentions",
")",
"==",
"0",
":",
"# No second mention of antidepressant_a_name",
"continue",
"# try another A",
"# We pick the first possible completion-of-course A second",
"# mention:",
"antidepressant_a_second_mention",
"=",
"a_second_mentions",
".",
"iat",
"[",
"0",
",",
"sourcecolnum_date",
"]",
"# Make sure B is not mentioned within the A range",
"mentions_of_b_within_a_range",
"=",
"tp",
"[",
"(",
"tp",
"[",
"drug_colname",
"]",
"==",
"antidepressant_b_name",
")",
"&",
"(",
"tp",
"[",
"date_colname",
"]",
">=",
"antidepressant_a_first_mention",
")",
"&",
"# 2019-04-01: CHANGED FROM:",
"# (tp[date_colname] <= antidepressant_a_second_mention)",
"# TO:",
"(",
"tp",
"[",
"date_colname",
"]",
"<",
"antidepressant_a_second_mention",
")",
"]",
"if",
"len",
"(",
"mentions_of_b_within_a_range",
")",
">",
"0",
":",
"# Nope, chuck out this combination.",
"continue",
"# try another A",
"found_valid_a",
"=",
"True",
"break",
"if",
"not",
"found_valid_a",
":",
"continue",
"# try another B",
"# ---------------------------------------------------------------------",
"# OK; here we have found a combination that we like.",
"# Add it to the results.",
"# ---------------------------------------------------------------------",
"# https://stackoverflow.com/questions/19365513/how-to-add-an-extra-row-to-a-pandas-dataframe/19368360 # noqa",
"# http://pandas.pydata.org/pandas-docs/stable/indexing.html#setting-with-enlargement # noqa",
"expect_response_by_date",
"=",
"(",
"antidepressant_b_first_mention",
"+",
"timedelta_days",
"(",
"expect_response_by_days",
")",
")",
"end_of_symptom_period",
"=",
"(",
"antidepressant_b_first_mention",
"+",
"timedelta_days",
"(",
"expect_response_by_days",
"+",
"symptom_assessment_time_days",
"-",
"1",
")",
")",
"result",
"=",
"_get_generic_two_antidep_episodes_result",
"(",
"(",
"patient_id",
",",
"antidepressant_a_name",
",",
"antidepressant_a_first_mention",
",",
"antidepressant_a_second_mention",
",",
"antidepressant_b_name",
",",
"antidepressant_b_first_mention",
",",
"antidepressant_b_second_mention",
",",
"expect_response_by_date",
",",
"end_of_symptom_period",
")",
")",
"# We only care about the first episode per patient that matches, so:",
"if",
"first_episode_only",
":",
"return",
"result",
"else",
":",
"all_episodes_for_patient",
"=",
"all_episodes_for_patient",
".",
"append",
"(",
"result",
")",
"if",
"len",
"(",
"all_episodes_for_patient",
")",
"==",
"0",
":",
"return",
"None",
"# nothing found",
"return",
"all_episodes_for_patient"
] | Processes a single patient for ``two_antidepressant_episodes()`` (q.v.).
Implements the key algorithm. | [
"Processes",
"a",
"single",
"patient",
"for",
"two_antidepressant_episodes",
"()",
"(",
"q",
".",
"v",
".",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/treatment_resistant_depression.py#L171-L364 |
RudolfCardinal/pythonlib | cardinal_pythonlib/psychiatry/treatment_resistant_depression.py | two_antidepressant_episodes | def two_antidepressant_episodes(
patient_drug_date_df: DataFrame,
patient_colname: str = DEFAULT_SOURCE_PATIENT_COLNAME,
drug_colname: str = DEFAULT_SOURCE_DRUG_COLNAME,
date_colname: str = DEFAULT_SOURCE_DATE_COLNAME,
course_length_days: int = DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS,
expect_response_by_days: int = DEFAULT_EXPECT_RESPONSE_BY_DAYS,
symptom_assessment_time_days: int =
DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS,
n_threads: int = DEFAULT_N_THREADS,
first_episode_only: bool = True) -> DataFrame:
"""
Takes a *pandas* ``DataFrame``, ``patient_drug_date_df`` (or, via
``reticulate``, an R ``data.frame`` or ``data.table``). This should contain
dated present-tense references to antidepressant drugs (only).
Returns a set of result rows as a ``DataFrame``.
"""
# Say hello
log.info("Running two_antidepressant_episodes...")
start = Pendulum.now()
# Work through each patient
patient_ids = sorted(list(set(patient_drug_date_df[patient_colname])))
n_patients = len(patient_ids)
log.info("Found {} patients", n_patients)
flush_stdout_stderr()
def _get_patient_result(_patient_id: str) -> Optional[DataFrame]:
return two_antidepressant_episodes_single_patient(
patient_id=_patient_id,
patient_drug_date_df=patient_drug_date_df,
patient_colname=patient_colname,
drug_colname=drug_colname,
date_colname=date_colname,
course_length_days=course_length_days,
expect_response_by_days=expect_response_by_days,
symptom_assessment_time_days=symptom_assessment_time_days,
first_episode_only=first_episode_only,
)
combined_result = _get_blank_two_antidep_episodes_result()
if n_threads > 1:
# Farm off the work to lots of threads:
log.info("Parallel processing method; {} threads", n_threads)
with ThreadPoolExecutor(max_workers=n_threads) as executor:
list_of_results_frames = executor.map(_get_patient_result,
patient_ids)
log.debug("Recombining results from parallel processing...")
for patient_result in list_of_results_frames:
if patient_result is not None:
combined_result = combined_result.append(patient_result)
log.debug("... recombined")
else:
log.info("Single-thread method")
for ptnum, patient_id in enumerate(patient_ids, start=1):
log.debug("Processing patient {} out of {}", ptnum, n_patients)
patient_result = _get_patient_result(patient_id)
if patient_result is not None:
combined_result = combined_result.append(patient_result)
# For consistent results order, even with parallel processing:
combined_result = combined_result.sort_values(
by=[RCN_PATIENT_ID], ascending=True)
# So that the DataFrame indices aren't all zero (largely cosmetic):
combined_result.reset_index(inplace=True, drop=True)
end = Pendulum.now()
duration = end - start
log.info("Took {} seconds for {} patients",
duration.total_seconds(), n_patients)
flush_stdout_stderr()
return combined_result | python | def two_antidepressant_episodes(
patient_drug_date_df: DataFrame,
patient_colname: str = DEFAULT_SOURCE_PATIENT_COLNAME,
drug_colname: str = DEFAULT_SOURCE_DRUG_COLNAME,
date_colname: str = DEFAULT_SOURCE_DATE_COLNAME,
course_length_days: int = DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS,
expect_response_by_days: int = DEFAULT_EXPECT_RESPONSE_BY_DAYS,
symptom_assessment_time_days: int =
DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS,
n_threads: int = DEFAULT_N_THREADS,
first_episode_only: bool = True) -> DataFrame:
"""
Takes a *pandas* ``DataFrame``, ``patient_drug_date_df`` (or, via
``reticulate``, an R ``data.frame`` or ``data.table``). This should contain
dated present-tense references to antidepressant drugs (only).
Returns a set of result rows as a ``DataFrame``.
"""
# Say hello
log.info("Running two_antidepressant_episodes...")
start = Pendulum.now()
# Work through each patient
patient_ids = sorted(list(set(patient_drug_date_df[patient_colname])))
n_patients = len(patient_ids)
log.info("Found {} patients", n_patients)
flush_stdout_stderr()
def _get_patient_result(_patient_id: str) -> Optional[DataFrame]:
return two_antidepressant_episodes_single_patient(
patient_id=_patient_id,
patient_drug_date_df=patient_drug_date_df,
patient_colname=patient_colname,
drug_colname=drug_colname,
date_colname=date_colname,
course_length_days=course_length_days,
expect_response_by_days=expect_response_by_days,
symptom_assessment_time_days=symptom_assessment_time_days,
first_episode_only=first_episode_only,
)
combined_result = _get_blank_two_antidep_episodes_result()
if n_threads > 1:
# Farm off the work to lots of threads:
log.info("Parallel processing method; {} threads", n_threads)
with ThreadPoolExecutor(max_workers=n_threads) as executor:
list_of_results_frames = executor.map(_get_patient_result,
patient_ids)
log.debug("Recombining results from parallel processing...")
for patient_result in list_of_results_frames:
if patient_result is not None:
combined_result = combined_result.append(patient_result)
log.debug("... recombined")
else:
log.info("Single-thread method")
for ptnum, patient_id in enumerate(patient_ids, start=1):
log.debug("Processing patient {} out of {}", ptnum, n_patients)
patient_result = _get_patient_result(patient_id)
if patient_result is not None:
combined_result = combined_result.append(patient_result)
# For consistent results order, even with parallel processing:
combined_result = combined_result.sort_values(
by=[RCN_PATIENT_ID], ascending=True)
# So that the DataFrame indices aren't all zero (largely cosmetic):
combined_result.reset_index(inplace=True, drop=True)
end = Pendulum.now()
duration = end - start
log.info("Took {} seconds for {} patients",
duration.total_seconds(), n_patients)
flush_stdout_stderr()
return combined_result | [
"def",
"two_antidepressant_episodes",
"(",
"patient_drug_date_df",
":",
"DataFrame",
",",
"patient_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_PATIENT_COLNAME",
",",
"drug_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_DRUG_COLNAME",
",",
"date_colname",
":",
"str",
"=",
"DEFAULT_SOURCE_DATE_COLNAME",
",",
"course_length_days",
":",
"int",
"=",
"DEFAULT_ANTIDEPRESSANT_COURSE_LENGTH_DAYS",
",",
"expect_response_by_days",
":",
"int",
"=",
"DEFAULT_EXPECT_RESPONSE_BY_DAYS",
",",
"symptom_assessment_time_days",
":",
"int",
"=",
"DEFAULT_SYMPTOM_ASSESSMENT_TIME_DAYS",
",",
"n_threads",
":",
"int",
"=",
"DEFAULT_N_THREADS",
",",
"first_episode_only",
":",
"bool",
"=",
"True",
")",
"->",
"DataFrame",
":",
"# Say hello",
"log",
".",
"info",
"(",
"\"Running two_antidepressant_episodes...\"",
")",
"start",
"=",
"Pendulum",
".",
"now",
"(",
")",
"# Work through each patient",
"patient_ids",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"patient_drug_date_df",
"[",
"patient_colname",
"]",
")",
")",
")",
"n_patients",
"=",
"len",
"(",
"patient_ids",
")",
"log",
".",
"info",
"(",
"\"Found {} patients\"",
",",
"n_patients",
")",
"flush_stdout_stderr",
"(",
")",
"def",
"_get_patient_result",
"(",
"_patient_id",
":",
"str",
")",
"->",
"Optional",
"[",
"DataFrame",
"]",
":",
"return",
"two_antidepressant_episodes_single_patient",
"(",
"patient_id",
"=",
"_patient_id",
",",
"patient_drug_date_df",
"=",
"patient_drug_date_df",
",",
"patient_colname",
"=",
"patient_colname",
",",
"drug_colname",
"=",
"drug_colname",
",",
"date_colname",
"=",
"date_colname",
",",
"course_length_days",
"=",
"course_length_days",
",",
"expect_response_by_days",
"=",
"expect_response_by_days",
",",
"symptom_assessment_time_days",
"=",
"symptom_assessment_time_days",
",",
"first_episode_only",
"=",
"first_episode_only",
",",
")",
"combined_result",
"=",
"_get_blank_two_antidep_episodes_result",
"(",
")",
"if",
"n_threads",
">",
"1",
":",
"# Farm off the work to lots of threads:",
"log",
".",
"info",
"(",
"\"Parallel processing method; {} threads\"",
",",
"n_threads",
")",
"with",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"n_threads",
")",
"as",
"executor",
":",
"list_of_results_frames",
"=",
"executor",
".",
"map",
"(",
"_get_patient_result",
",",
"patient_ids",
")",
"log",
".",
"debug",
"(",
"\"Recombining results from parallel processing...\"",
")",
"for",
"patient_result",
"in",
"list_of_results_frames",
":",
"if",
"patient_result",
"is",
"not",
"None",
":",
"combined_result",
"=",
"combined_result",
".",
"append",
"(",
"patient_result",
")",
"log",
".",
"debug",
"(",
"\"... recombined\"",
")",
"else",
":",
"log",
".",
"info",
"(",
"\"Single-thread method\"",
")",
"for",
"ptnum",
",",
"patient_id",
"in",
"enumerate",
"(",
"patient_ids",
",",
"start",
"=",
"1",
")",
":",
"log",
".",
"debug",
"(",
"\"Processing patient {} out of {}\"",
",",
"ptnum",
",",
"n_patients",
")",
"patient_result",
"=",
"_get_patient_result",
"(",
"patient_id",
")",
"if",
"patient_result",
"is",
"not",
"None",
":",
"combined_result",
"=",
"combined_result",
".",
"append",
"(",
"patient_result",
")",
"# For consistent results order, even with parallel processing:",
"combined_result",
"=",
"combined_result",
".",
"sort_values",
"(",
"by",
"=",
"[",
"RCN_PATIENT_ID",
"]",
",",
"ascending",
"=",
"True",
")",
"# So that the DataFrame indices aren't all zero (largely cosmetic):",
"combined_result",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"end",
"=",
"Pendulum",
".",
"now",
"(",
")",
"duration",
"=",
"end",
"-",
"start",
"log",
".",
"info",
"(",
"\"Took {} seconds for {} patients\"",
",",
"duration",
".",
"total_seconds",
"(",
")",
",",
"n_patients",
")",
"flush_stdout_stderr",
"(",
")",
"return",
"combined_result"
] | Takes a *pandas* ``DataFrame``, ``patient_drug_date_df`` (or, via
``reticulate``, an R ``data.frame`` or ``data.table``). This should contain
dated present-tense references to antidepressant drugs (only).
Returns a set of result rows as a ``DataFrame``. | [
"Takes",
"a",
"*",
"pandas",
"*",
"DataFrame",
"patient_drug_date_df",
"(",
"or",
"via",
"reticulate",
"an",
"R",
"data",
".",
"frame",
"or",
"data",
".",
"table",
")",
".",
"This",
"should",
"contain",
"dated",
"present",
"-",
"tense",
"references",
"to",
"antidepressant",
"drugs",
"(",
"only",
")",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/psychiatry/treatment_resistant_depression.py#L367-L441 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | does_unrtf_support_quiet | def does_unrtf_support_quiet() -> bool:
"""
The unrtf tool supports the '--quiet' argument from a version that I'm not
quite sure of, where ``0.19.3 < version <= 0.21.9``. We check against
0.21.9 here.
"""
required_unrtf_version = Version("0.21.9")
# ... probably: http://hg.savannah.gnu.org/hgweb/unrtf/
# ... 0.21.9 definitely supports --quiet
# ... 0.19.3 definitely doesn't support it
unrtf_filename = shutil.which('unrtf')
if not unrtf_filename:
return False
p = subprocess.Popen(["unrtf", "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err_bytes = p.communicate()
text = err_bytes.decode(sys.getdefaultencoding())
lines = text.split()
if len(lines) < 1:
return False
version_str = lines[0]
unrtf_version = Version(version_str)
return unrtf_version >= required_unrtf_version | python | def does_unrtf_support_quiet() -> bool:
"""
The unrtf tool supports the '--quiet' argument from a version that I'm not
quite sure of, where ``0.19.3 < version <= 0.21.9``. We check against
0.21.9 here.
"""
required_unrtf_version = Version("0.21.9")
# ... probably: http://hg.savannah.gnu.org/hgweb/unrtf/
# ... 0.21.9 definitely supports --quiet
# ... 0.19.3 definitely doesn't support it
unrtf_filename = shutil.which('unrtf')
if not unrtf_filename:
return False
p = subprocess.Popen(["unrtf", "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err_bytes = p.communicate()
text = err_bytes.decode(sys.getdefaultencoding())
lines = text.split()
if len(lines) < 1:
return False
version_str = lines[0]
unrtf_version = Version(version_str)
return unrtf_version >= required_unrtf_version | [
"def",
"does_unrtf_support_quiet",
"(",
")",
"->",
"bool",
":",
"required_unrtf_version",
"=",
"Version",
"(",
"\"0.21.9\"",
")",
"# ... probably: http://hg.savannah.gnu.org/hgweb/unrtf/",
"# ... 0.21.9 definitely supports --quiet",
"# ... 0.19.3 definitely doesn't support it",
"unrtf_filename",
"=",
"shutil",
".",
"which",
"(",
"'unrtf'",
")",
"if",
"not",
"unrtf_filename",
":",
"return",
"False",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"unrtf\"",
",",
"\"--version\"",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"_",
",",
"err_bytes",
"=",
"p",
".",
"communicate",
"(",
")",
"text",
"=",
"err_bytes",
".",
"decode",
"(",
"sys",
".",
"getdefaultencoding",
"(",
")",
")",
"lines",
"=",
"text",
".",
"split",
"(",
")",
"if",
"len",
"(",
"lines",
")",
"<",
"1",
":",
"return",
"False",
"version_str",
"=",
"lines",
"[",
"0",
"]",
"unrtf_version",
"=",
"Version",
"(",
"version_str",
")",
"return",
"unrtf_version",
">=",
"required_unrtf_version"
] | The unrtf tool supports the '--quiet' argument from a version that I'm not
quite sure of, where ``0.19.3 < version <= 0.21.9``. We check against
0.21.9 here. | [
"The",
"unrtf",
"tool",
"supports",
"the",
"--",
"quiet",
"argument",
"from",
"a",
"version",
"that",
"I",
"m",
"not",
"quite",
"sure",
"of",
"where",
"0",
".",
"19",
".",
"3",
"<",
"version",
"<",
"=",
"0",
".",
"21",
".",
"9",
".",
"We",
"check",
"against",
"0",
".",
"21",
".",
"9",
"here",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L186-L209 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_filelikeobject | def get_filelikeobject(filename: str = None,
blob: bytes = None) -> BinaryIO:
"""
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
"""
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob) | python | def get_filelikeobject(filename: str = None,
blob: bytes = None) -> BinaryIO:
"""
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
"""
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob) | [
"def",
"get_filelikeobject",
"(",
"filename",
":",
"str",
"=",
"None",
",",
"blob",
":",
"bytes",
"=",
"None",
")",
"->",
"BinaryIO",
":",
"if",
"not",
"filename",
"and",
"not",
"blob",
":",
"raise",
"ValueError",
"(",
"\"no filename and no blob\"",
")",
"if",
"filename",
"and",
"blob",
":",
"raise",
"ValueError",
"(",
"\"specify either filename or blob\"",
")",
"if",
"filename",
":",
"return",
"open",
"(",
"filename",
",",
"'rb'",
")",
"else",
":",
"return",
"io",
".",
"BytesIO",
"(",
"blob",
")"
] | Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object | [
"Open",
"a",
"file",
"-",
"like",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L272-L293 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_file_contents | def get_file_contents(filename: str = None, blob: bytes = None) -> bytes:
"""
Returns the binary contents of a file, or of a BLOB.
"""
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if blob:
return blob
with open(filename, 'rb') as f:
return f.read() | python | def get_file_contents(filename: str = None, blob: bytes = None) -> bytes:
"""
Returns the binary contents of a file, or of a BLOB.
"""
if not filename and not blob:
raise ValueError("no filename and no blob")
if filename and blob:
raise ValueError("specify either filename or blob")
if blob:
return blob
with open(filename, 'rb') as f:
return f.read() | [
"def",
"get_file_contents",
"(",
"filename",
":",
"str",
"=",
"None",
",",
"blob",
":",
"bytes",
"=",
"None",
")",
"->",
"bytes",
":",
"if",
"not",
"filename",
"and",
"not",
"blob",
":",
"raise",
"ValueError",
"(",
"\"no filename and no blob\"",
")",
"if",
"filename",
"and",
"blob",
":",
"raise",
"ValueError",
"(",
"\"specify either filename or blob\"",
")",
"if",
"blob",
":",
"return",
"blob",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] | Returns the binary contents of a file, or of a BLOB. | [
"Returns",
"the",
"binary",
"contents",
"of",
"a",
"file",
"or",
"of",
"a",
"BLOB",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L297-L308 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_chardet_encoding | def get_chardet_encoding(binary_contents: bytes) -> Optional[str]:
"""
Guess the character set encoding of the specified ``binary_contents``.
"""
if not binary_contents:
return None
if chardet is None or UniversalDetector is None:
log.warning("chardet not installed; limits detection of encodings")
return None
# METHOD 1
# http://chardet.readthedocs.io/en/latest/
#
# guess = chardet.detect(binary_contents)
#
# METHOD 2: faster with large files
# http://chardet.readthedocs.io/en/latest/
# http://stackoverflow.com/questions/13857856/split-byte-string-into-lines
# noinspection PyCallingNonCallable
detector = UniversalDetector()
for byte_line in binary_contents.split(b"\n"):
detector.feed(byte_line)
if detector.done:
break
guess = detector.result
# Handle result
if 'encoding' not in guess:
log.warning("Something went wrong within chardet; no encoding")
return None
return guess['encoding'] | python | def get_chardet_encoding(binary_contents: bytes) -> Optional[str]:
"""
Guess the character set encoding of the specified ``binary_contents``.
"""
if not binary_contents:
return None
if chardet is None or UniversalDetector is None:
log.warning("chardet not installed; limits detection of encodings")
return None
# METHOD 1
# http://chardet.readthedocs.io/en/latest/
#
# guess = chardet.detect(binary_contents)
#
# METHOD 2: faster with large files
# http://chardet.readthedocs.io/en/latest/
# http://stackoverflow.com/questions/13857856/split-byte-string-into-lines
# noinspection PyCallingNonCallable
detector = UniversalDetector()
for byte_line in binary_contents.split(b"\n"):
detector.feed(byte_line)
if detector.done:
break
guess = detector.result
# Handle result
if 'encoding' not in guess:
log.warning("Something went wrong within chardet; no encoding")
return None
return guess['encoding'] | [
"def",
"get_chardet_encoding",
"(",
"binary_contents",
":",
"bytes",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"not",
"binary_contents",
":",
"return",
"None",
"if",
"chardet",
"is",
"None",
"or",
"UniversalDetector",
"is",
"None",
":",
"log",
".",
"warning",
"(",
"\"chardet not installed; limits detection of encodings\"",
")",
"return",
"None",
"# METHOD 1",
"# http://chardet.readthedocs.io/en/latest/",
"#",
"# guess = chardet.detect(binary_contents)",
"#",
"# METHOD 2: faster with large files",
"# http://chardet.readthedocs.io/en/latest/",
"# http://stackoverflow.com/questions/13857856/split-byte-string-into-lines",
"# noinspection PyCallingNonCallable",
"detector",
"=",
"UniversalDetector",
"(",
")",
"for",
"byte_line",
"in",
"binary_contents",
".",
"split",
"(",
"b\"\\n\"",
")",
":",
"detector",
".",
"feed",
"(",
"byte_line",
")",
"if",
"detector",
".",
"done",
":",
"break",
"guess",
"=",
"detector",
".",
"result",
"# Handle result",
"if",
"'encoding'",
"not",
"in",
"guess",
":",
"log",
".",
"warning",
"(",
"\"Something went wrong within chardet; no encoding\"",
")",
"return",
"None",
"return",
"guess",
"[",
"'encoding'",
"]"
] | Guess the character set encoding of the specified ``binary_contents``. | [
"Guess",
"the",
"character",
"set",
"encoding",
"of",
"the",
"specified",
"binary_contents",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L311-L340 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_file_contents_text | def get_file_contents_text(
filename: str = None, blob: bytes = None,
config: TextProcessingConfig = _DEFAULT_CONFIG) -> str:
"""
Returns the string contents of a file, or of a BLOB.
"""
binary_contents = get_file_contents(filename=filename, blob=blob)
# 1. Try the encoding the user specified
if config.encoding:
try:
return binary_contents.decode(config.encoding)
except ValueError: # of which UnicodeDecodeError is more specific
# ... https://docs.python.org/3/library/codecs.html
pass
# 2. Try the system encoding
sysdef = sys.getdefaultencoding()
if sysdef != config.encoding:
try:
return binary_contents.decode(sysdef)
except ValueError:
pass
# 3. Try the best guess from chardet
# http://chardet.readthedocs.io/en/latest/usage.html
if chardet:
guess = chardet.detect(binary_contents)
if guess['encoding']:
return binary_contents.decode(guess['encoding'])
raise ValueError("Unknown encoding ({})".format(
"filename={}".format(repr(filename)) if filename else "blob")) | python | def get_file_contents_text(
filename: str = None, blob: bytes = None,
config: TextProcessingConfig = _DEFAULT_CONFIG) -> str:
"""
Returns the string contents of a file, or of a BLOB.
"""
binary_contents = get_file_contents(filename=filename, blob=blob)
# 1. Try the encoding the user specified
if config.encoding:
try:
return binary_contents.decode(config.encoding)
except ValueError: # of which UnicodeDecodeError is more specific
# ... https://docs.python.org/3/library/codecs.html
pass
# 2. Try the system encoding
sysdef = sys.getdefaultencoding()
if sysdef != config.encoding:
try:
return binary_contents.decode(sysdef)
except ValueError:
pass
# 3. Try the best guess from chardet
# http://chardet.readthedocs.io/en/latest/usage.html
if chardet:
guess = chardet.detect(binary_contents)
if guess['encoding']:
return binary_contents.decode(guess['encoding'])
raise ValueError("Unknown encoding ({})".format(
"filename={}".format(repr(filename)) if filename else "blob")) | [
"def",
"get_file_contents_text",
"(",
"filename",
":",
"str",
"=",
"None",
",",
"blob",
":",
"bytes",
"=",
"None",
",",
"config",
":",
"TextProcessingConfig",
"=",
"_DEFAULT_CONFIG",
")",
"->",
"str",
":",
"binary_contents",
"=",
"get_file_contents",
"(",
"filename",
"=",
"filename",
",",
"blob",
"=",
"blob",
")",
"# 1. Try the encoding the user specified",
"if",
"config",
".",
"encoding",
":",
"try",
":",
"return",
"binary_contents",
".",
"decode",
"(",
"config",
".",
"encoding",
")",
"except",
"ValueError",
":",
"# of which UnicodeDecodeError is more specific",
"# ... https://docs.python.org/3/library/codecs.html",
"pass",
"# 2. Try the system encoding",
"sysdef",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
"if",
"sysdef",
"!=",
"config",
".",
"encoding",
":",
"try",
":",
"return",
"binary_contents",
".",
"decode",
"(",
"sysdef",
")",
"except",
"ValueError",
":",
"pass",
"# 3. Try the best guess from chardet",
"# http://chardet.readthedocs.io/en/latest/usage.html",
"if",
"chardet",
":",
"guess",
"=",
"chardet",
".",
"detect",
"(",
"binary_contents",
")",
"if",
"guess",
"[",
"'encoding'",
"]",
":",
"return",
"binary_contents",
".",
"decode",
"(",
"guess",
"[",
"'encoding'",
"]",
")",
"raise",
"ValueError",
"(",
"\"Unknown encoding ({})\"",
".",
"format",
"(",
"\"filename={}\"",
".",
"format",
"(",
"repr",
"(",
"filename",
")",
")",
"if",
"filename",
"else",
"\"blob\"",
")",
")"
] | Returns the string contents of a file, or of a BLOB. | [
"Returns",
"the",
"string",
"contents",
"of",
"a",
"file",
"or",
"of",
"a",
"BLOB",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L343-L371 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_cmd_output | def get_cmd_output(*args, encoding: str = SYS_ENCODING) -> str:
"""
Returns text output of a command.
"""
log.debug("get_cmd_output(): args = {!r}", args)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.decode(encoding, errors='ignore') | python | def get_cmd_output(*args, encoding: str = SYS_ENCODING) -> str:
"""
Returns text output of a command.
"""
log.debug("get_cmd_output(): args = {!r}", args)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.decode(encoding, errors='ignore') | [
"def",
"get_cmd_output",
"(",
"*",
"args",
",",
"encoding",
":",
"str",
"=",
"SYS_ENCODING",
")",
"->",
"str",
":",
"log",
".",
"debug",
"(",
"\"get_cmd_output(): args = {!r}\"",
",",
"args",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"stdout",
".",
"decode",
"(",
"encoding",
",",
"errors",
"=",
"'ignore'",
")"
] | Returns text output of a command. | [
"Returns",
"text",
"output",
"of",
"a",
"command",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L374-L381 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | get_cmd_output_from_stdin | def get_cmd_output_from_stdin(stdint_content_binary: bytes,
*args, encoding: str = SYS_ENCODING) -> str:
"""
Returns text output of a command, passing binary data in via stdin.
"""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate(input=stdint_content_binary)
return stdout.decode(encoding, errors='ignore') | python | def get_cmd_output_from_stdin(stdint_content_binary: bytes,
*args, encoding: str = SYS_ENCODING) -> str:
"""
Returns text output of a command, passing binary data in via stdin.
"""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate(input=stdint_content_binary)
return stdout.decode(encoding, errors='ignore') | [
"def",
"get_cmd_output_from_stdin",
"(",
"stdint_content_binary",
":",
"bytes",
",",
"*",
"args",
",",
"encoding",
":",
"str",
"=",
"SYS_ENCODING",
")",
"->",
"str",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
"input",
"=",
"stdint_content_binary",
")",
"return",
"stdout",
".",
"decode",
"(",
"encoding",
",",
"errors",
"=",
"'ignore'",
")"
] | Returns text output of a command, passing binary data in via stdin. | [
"Returns",
"text",
"output",
"of",
"a",
"command",
"passing",
"binary",
"data",
"in",
"via",
"stdin",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L384-L391 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | convert_pdf_to_txt | def convert_pdf_to_txt(filename: str = None, blob: bytes = None,
config: TextProcessingConfig = _DEFAULT_CONFIG) -> str:
"""
Converts a PDF file to text.
Pass either a filename or a binary object.
"""
pdftotext = tools['pdftotext']
if pdftotext: # External command method
if filename:
return get_cmd_output(pdftotext, filename, '-')
else:
return get_cmd_output_from_stdin(blob, pdftotext, '-', '-')
elif pdfminer: # Memory-hogging method
with get_filelikeobject(filename, blob) as fp:
rsrcmgr = pdfminer.pdfinterp.PDFResourceManager()
retstr = StringIO()
codec = ENCODING
laparams = pdfminer.layout.LAParams()
device = pdfminer.converter.TextConverter(
rsrcmgr, retstr, codec=codec, laparams=laparams)
interpreter = pdfminer.pdfinterp.PDFPageInterpreter(rsrcmgr,
device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in pdfminer.pdfpage.PDFPage.get_pages(
fp, pagenos, maxpages=maxpages, password=password,
caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue().decode(ENCODING)
return text
else:
raise AssertionError("No PDF-reading tool available") | python | def convert_pdf_to_txt(filename: str = None, blob: bytes = None,
config: TextProcessingConfig = _DEFAULT_CONFIG) -> str:
"""
Converts a PDF file to text.
Pass either a filename or a binary object.
"""
pdftotext = tools['pdftotext']
if pdftotext: # External command method
if filename:
return get_cmd_output(pdftotext, filename, '-')
else:
return get_cmd_output_from_stdin(blob, pdftotext, '-', '-')
elif pdfminer: # Memory-hogging method
with get_filelikeobject(filename, blob) as fp:
rsrcmgr = pdfminer.pdfinterp.PDFResourceManager()
retstr = StringIO()
codec = ENCODING
laparams = pdfminer.layout.LAParams()
device = pdfminer.converter.TextConverter(
rsrcmgr, retstr, codec=codec, laparams=laparams)
interpreter = pdfminer.pdfinterp.PDFPageInterpreter(rsrcmgr,
device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in pdfminer.pdfpage.PDFPage.get_pages(
fp, pagenos, maxpages=maxpages, password=password,
caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue().decode(ENCODING)
return text
else:
raise AssertionError("No PDF-reading tool available") | [
"def",
"convert_pdf_to_txt",
"(",
"filename",
":",
"str",
"=",
"None",
",",
"blob",
":",
"bytes",
"=",
"None",
",",
"config",
":",
"TextProcessingConfig",
"=",
"_DEFAULT_CONFIG",
")",
"->",
"str",
":",
"pdftotext",
"=",
"tools",
"[",
"'pdftotext'",
"]",
"if",
"pdftotext",
":",
"# External command method",
"if",
"filename",
":",
"return",
"get_cmd_output",
"(",
"pdftotext",
",",
"filename",
",",
"'-'",
")",
"else",
":",
"return",
"get_cmd_output_from_stdin",
"(",
"blob",
",",
"pdftotext",
",",
"'-'",
",",
"'-'",
")",
"elif",
"pdfminer",
":",
"# Memory-hogging method",
"with",
"get_filelikeobject",
"(",
"filename",
",",
"blob",
")",
"as",
"fp",
":",
"rsrcmgr",
"=",
"pdfminer",
".",
"pdfinterp",
".",
"PDFResourceManager",
"(",
")",
"retstr",
"=",
"StringIO",
"(",
")",
"codec",
"=",
"ENCODING",
"laparams",
"=",
"pdfminer",
".",
"layout",
".",
"LAParams",
"(",
")",
"device",
"=",
"pdfminer",
".",
"converter",
".",
"TextConverter",
"(",
"rsrcmgr",
",",
"retstr",
",",
"codec",
"=",
"codec",
",",
"laparams",
"=",
"laparams",
")",
"interpreter",
"=",
"pdfminer",
".",
"pdfinterp",
".",
"PDFPageInterpreter",
"(",
"rsrcmgr",
",",
"device",
")",
"password",
"=",
"\"\"",
"maxpages",
"=",
"0",
"caching",
"=",
"True",
"pagenos",
"=",
"set",
"(",
")",
"for",
"page",
"in",
"pdfminer",
".",
"pdfpage",
".",
"PDFPage",
".",
"get_pages",
"(",
"fp",
",",
"pagenos",
",",
"maxpages",
"=",
"maxpages",
",",
"password",
"=",
"password",
",",
"caching",
"=",
"caching",
",",
"check_extractable",
"=",
"True",
")",
":",
"interpreter",
".",
"process_page",
"(",
"page",
")",
"text",
"=",
"retstr",
".",
"getvalue",
"(",
")",
".",
"decode",
"(",
"ENCODING",
")",
"return",
"text",
"else",
":",
"raise",
"AssertionError",
"(",
"\"No PDF-reading tool available\"",
")"
] | Converts a PDF file to text.
Pass either a filename or a binary object. | [
"Converts",
"a",
"PDF",
"file",
"to",
"text",
".",
"Pass",
"either",
"a",
"filename",
"or",
"a",
"binary",
"object",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L399-L432 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | availability_pdf | def availability_pdf() -> bool:
"""
Is a PDF-to-text tool available?
"""
pdftotext = tools['pdftotext']
if pdftotext:
return True
elif pdfminer:
log.warning("PDF conversion: pdftotext missing; "
"using pdfminer (less efficient)")
return True
else:
return False | python | def availability_pdf() -> bool:
"""
Is a PDF-to-text tool available?
"""
pdftotext = tools['pdftotext']
if pdftotext:
return True
elif pdfminer:
log.warning("PDF conversion: pdftotext missing; "
"using pdfminer (less efficient)")
return True
else:
return False | [
"def",
"availability_pdf",
"(",
")",
"->",
"bool",
":",
"pdftotext",
"=",
"tools",
"[",
"'pdftotext'",
"]",
"if",
"pdftotext",
":",
"return",
"True",
"elif",
"pdfminer",
":",
"log",
".",
"warning",
"(",
"\"PDF conversion: pdftotext missing; \"",
"\"using pdfminer (less efficient)\"",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Is a PDF-to-text tool available? | [
"Is",
"a",
"PDF",
"-",
"to",
"-",
"text",
"tool",
"available?"
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L435-L447 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | gen_xml_files_from_docx | def gen_xml_files_from_docx(fp: BinaryIO) -> Iterator[str]:
"""
Generate XML files (as strings) from a DOCX file.
Args:
fp: :class:`BinaryIO` object for reading the ``.DOCX`` file
Yields:
the string contents of each individual XML file within the ``.DOCX``
file
Raises:
zipfile.BadZipFile: if the zip is unreadable (encrypted?)
"""
try:
z = zipfile.ZipFile(fp)
filelist = z.namelist()
for filename in filelist:
if DOCX_HEADER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
yield z.read(DOCX_DOC_FILE)
for filename in filelist:
if DOCX_FOOTER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
except zipfile.BadZipFile:
# Clarify the error:
raise zipfile.BadZipFile("File is not a zip file - encrypted DOCX?") | python | def gen_xml_files_from_docx(fp: BinaryIO) -> Iterator[str]:
"""
Generate XML files (as strings) from a DOCX file.
Args:
fp: :class:`BinaryIO` object for reading the ``.DOCX`` file
Yields:
the string contents of each individual XML file within the ``.DOCX``
file
Raises:
zipfile.BadZipFile: if the zip is unreadable (encrypted?)
"""
try:
z = zipfile.ZipFile(fp)
filelist = z.namelist()
for filename in filelist:
if DOCX_HEADER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
yield z.read(DOCX_DOC_FILE)
for filename in filelist:
if DOCX_FOOTER_FILE_REGEX.match(filename):
yield z.read(filename).decode("utf8")
except zipfile.BadZipFile:
# Clarify the error:
raise zipfile.BadZipFile("File is not a zip file - encrypted DOCX?") | [
"def",
"gen_xml_files_from_docx",
"(",
"fp",
":",
"BinaryIO",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"try",
":",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"fp",
")",
"filelist",
"=",
"z",
".",
"namelist",
"(",
")",
"for",
"filename",
"in",
"filelist",
":",
"if",
"DOCX_HEADER_FILE_REGEX",
".",
"match",
"(",
"filename",
")",
":",
"yield",
"z",
".",
"read",
"(",
"filename",
")",
".",
"decode",
"(",
"\"utf8\"",
")",
"yield",
"z",
".",
"read",
"(",
"DOCX_DOC_FILE",
")",
"for",
"filename",
"in",
"filelist",
":",
"if",
"DOCX_FOOTER_FILE_REGEX",
".",
"match",
"(",
"filename",
")",
":",
"yield",
"z",
".",
"read",
"(",
"filename",
")",
".",
"decode",
"(",
"\"utf8\"",
")",
"except",
"zipfile",
".",
"BadZipFile",
":",
"# Clarify the error:",
"raise",
"zipfile",
".",
"BadZipFile",
"(",
"\"File is not a zip file - encrypted DOCX?\"",
")"
] | Generate XML files (as strings) from a DOCX file.
Args:
fp: :class:`BinaryIO` object for reading the ``.DOCX`` file
Yields:
the string contents of each individual XML file within the ``.DOCX``
file
Raises:
zipfile.BadZipFile: if the zip is unreadable (encrypted?) | [
"Generate",
"XML",
"files",
"(",
"as",
"strings",
")",
"from",
"a",
"DOCX",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L478-L505 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_text_from_xml | def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:
"""
Converts an XML tree of a DOCX file to string contents.
Args:
xml: raw XML text
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
"""
root = ElementTree.fromstring(xml)
return docx_text_from_xml_node(root, 0, config) | python | def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str:
"""
Converts an XML tree of a DOCX file to string contents.
Args:
xml: raw XML text
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
"""
root = ElementTree.fromstring(xml)
return docx_text_from_xml_node(root, 0, config) | [
"def",
"docx_text_from_xml",
"(",
"xml",
":",
"str",
",",
"config",
":",
"TextProcessingConfig",
")",
"->",
"str",
":",
"root",
"=",
"ElementTree",
".",
"fromstring",
"(",
"xml",
")",
"return",
"docx_text_from_xml_node",
"(",
"root",
",",
"0",
",",
"config",
")"
] | Converts an XML tree of a DOCX file to string contents.
Args:
xml: raw XML text
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string | [
"Converts",
"an",
"XML",
"tree",
"of",
"a",
"DOCX",
"file",
"to",
"string",
"contents",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L508-L520 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_text_from_xml_node | def docx_text_from_xml_node(node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
"""
Returns text from an XML node within a DOCX file.
Args:
node: an XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
"""
text = ''
# log.debug("Level {}, tag {}", level, node.tag)
if node.tag == DOCX_TEXT:
text += node.text or ''
elif node.tag == DOCX_TAB:
text += '\t'
elif node.tag in DOCX_NEWLINES:
text += '\n'
elif node.tag == DOCX_NEWPARA:
text += '\n\n'
if node.tag == DOCX_TABLE:
text += '\n\n' + docx_table_from_xml_node(node, level, config)
else:
for child in node:
text += docx_text_from_xml_node(child, level + 1, config)
return text | python | def docx_text_from_xml_node(node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
"""
Returns text from an XML node within a DOCX file.
Args:
node: an XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string
"""
text = ''
# log.debug("Level {}, tag {}", level, node.tag)
if node.tag == DOCX_TEXT:
text += node.text or ''
elif node.tag == DOCX_TAB:
text += '\t'
elif node.tag in DOCX_NEWLINES:
text += '\n'
elif node.tag == DOCX_NEWPARA:
text += '\n\n'
if node.tag == DOCX_TABLE:
text += '\n\n' + docx_table_from_xml_node(node, level, config)
else:
for child in node:
text += docx_text_from_xml_node(child, level + 1, config)
return text | [
"def",
"docx_text_from_xml_node",
"(",
"node",
":",
"ElementTree",
".",
"Element",
",",
"level",
":",
"int",
",",
"config",
":",
"TextProcessingConfig",
")",
"->",
"str",
":",
"text",
"=",
"''",
"# log.debug(\"Level {}, tag {}\", level, node.tag)",
"if",
"node",
".",
"tag",
"==",
"DOCX_TEXT",
":",
"text",
"+=",
"node",
".",
"text",
"or",
"''",
"elif",
"node",
".",
"tag",
"==",
"DOCX_TAB",
":",
"text",
"+=",
"'\\t'",
"elif",
"node",
".",
"tag",
"in",
"DOCX_NEWLINES",
":",
"text",
"+=",
"'\\n'",
"elif",
"node",
".",
"tag",
"==",
"DOCX_NEWPARA",
":",
"text",
"+=",
"'\\n\\n'",
"if",
"node",
".",
"tag",
"==",
"DOCX_TABLE",
":",
"text",
"+=",
"'\\n\\n'",
"+",
"docx_table_from_xml_node",
"(",
"node",
",",
"level",
",",
"config",
")",
"else",
":",
"for",
"child",
"in",
"node",
":",
"text",
"+=",
"docx_text_from_xml_node",
"(",
"child",
",",
"level",
"+",
"1",
",",
"config",
")",
"return",
"text"
] | Returns text from an XML node within a DOCX file.
Args:
node: an XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
contents as a string | [
"Returns",
"text",
"from",
"an",
"XML",
"node",
"within",
"a",
"DOCX",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L523-L555 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_table_from_xml_node | def docx_table_from_xml_node(table_node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
"""
Converts an XML node representing a DOCX table into a textual
representation.
Args:
table_node: XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
string representation
"""
table = CustomDocxTable()
for row_node in table_node:
if row_node.tag != DOCX_TABLE_ROW:
continue
table.new_row()
for cell_node in row_node:
if cell_node.tag != DOCX_TABLE_CELL:
continue
table.new_cell()
for para_node in cell_node:
text = docx_text_from_xml_node(para_node, level, config)
if text:
table.add_paragraph(text)
return docx_process_table(table, config) | python | def docx_table_from_xml_node(table_node: ElementTree.Element,
level: int,
config: TextProcessingConfig) -> str:
"""
Converts an XML node representing a DOCX table into a textual
representation.
Args:
table_node: XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
string representation
"""
table = CustomDocxTable()
for row_node in table_node:
if row_node.tag != DOCX_TABLE_ROW:
continue
table.new_row()
for cell_node in row_node:
if cell_node.tag != DOCX_TABLE_CELL:
continue
table.new_cell()
for para_node in cell_node:
text = docx_text_from_xml_node(para_node, level, config)
if text:
table.add_paragraph(text)
return docx_process_table(table, config) | [
"def",
"docx_table_from_xml_node",
"(",
"table_node",
":",
"ElementTree",
".",
"Element",
",",
"level",
":",
"int",
",",
"config",
":",
"TextProcessingConfig",
")",
"->",
"str",
":",
"table",
"=",
"CustomDocxTable",
"(",
")",
"for",
"row_node",
"in",
"table_node",
":",
"if",
"row_node",
".",
"tag",
"!=",
"DOCX_TABLE_ROW",
":",
"continue",
"table",
".",
"new_row",
"(",
")",
"for",
"cell_node",
"in",
"row_node",
":",
"if",
"cell_node",
".",
"tag",
"!=",
"DOCX_TABLE_CELL",
":",
"continue",
"table",
".",
"new_cell",
"(",
")",
"for",
"para_node",
"in",
"cell_node",
":",
"text",
"=",
"docx_text_from_xml_node",
"(",
"para_node",
",",
"level",
",",
"config",
")",
"if",
"text",
":",
"table",
".",
"add_paragraph",
"(",
"text",
")",
"return",
"docx_process_table",
"(",
"table",
",",
"config",
")"
] | Converts an XML node representing a DOCX table into a textual
representation.
Args:
table_node: XML node
level: current level in XML hierarchy (used for recursion; start level
is 0)
config: :class:`TextProcessingConfig` control object
Returns:
string representation | [
"Converts",
"an",
"XML",
"node",
"representing",
"a",
"DOCX",
"table",
"into",
"a",
"textual",
"representation",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L630-L660 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_process_simple_text | def docx_process_simple_text(text: str, width: int) -> str:
"""
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
"""
if width:
return '\n'.join(textwrap.wrap(text, width=width))
else:
return text | python | def docx_process_simple_text(text: str, width: int) -> str:
"""
Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text
"""
if width:
return '\n'.join(textwrap.wrap(text, width=width))
else:
return text | [
"def",
"docx_process_simple_text",
"(",
"text",
":",
"str",
",",
"width",
":",
"int",
")",
"->",
"str",
":",
"if",
"width",
":",
"return",
"'\\n'",
".",
"join",
"(",
"textwrap",
".",
"wrap",
"(",
"text",
",",
"width",
"=",
"width",
")",
")",
"else",
":",
"return",
"text"
] | Word-wraps text.
Args:
text: text to process
width: width to word-wrap to (or 0 to skip word wrapping)
Returns:
wrapped text | [
"Word",
"-",
"wraps",
"text",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L667-L681 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_process_table | def docx_process_table(table: DOCX_TABLE_TYPE,
config: TextProcessingConfig) -> str:
"""
Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+
"""
def get_cell_text(cell_) -> str:
cellparagraphs = [paragraph.text.strip()
for paragraph in cell_.paragraphs]
cellparagraphs = [x for x in cellparagraphs if x]
return '\n\n'.join(cellparagraphs)
ncols = 1
# noinspection PyTypeChecker
for row in table.rows:
ncols = max(ncols, len(row.cells))
pt = prettytable.PrettyTable(
field_names=list(range(ncols)),
encoding=ENCODING,
header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE if config.plain else prettytable.ALL,
)
pt.align = 'l'
pt.valign = 't'
pt.max_width = max(config.width // ncols, config.min_col_width)
if config.plain:
# noinspection PyTypeChecker
for row in table.rows:
for i, cell in enumerate(row.cells):
n_before = i
n_after = ncols - i - 1
# ... use ncols, not len(row.cells), since "cells per row" is
# not constant, but prettytable wants a fixed number.
# (changed in v0.2.8)
ptrow = (
[''] * n_before +
[get_cell_text(cell)] +
[''] * n_after
)
assert(len(ptrow) == ncols)
pt.add_row(ptrow)
else:
# noinspection PyTypeChecker
for row in table.rows:
ptrow = [] # type: List[str]
# noinspection PyTypeChecker
for cell in row.cells:
ptrow.append(get_cell_text(cell))
ptrow += [''] * (ncols - len(ptrow)) # added in v0.2.8
assert (len(ptrow) == ncols)
pt.add_row(ptrow)
return pt.get_string() | python | def docx_process_table(table: DOCX_TABLE_TYPE,
config: TextProcessingConfig) -> str:
"""
Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+
"""
def get_cell_text(cell_) -> str:
cellparagraphs = [paragraph.text.strip()
for paragraph in cell_.paragraphs]
cellparagraphs = [x for x in cellparagraphs if x]
return '\n\n'.join(cellparagraphs)
ncols = 1
# noinspection PyTypeChecker
for row in table.rows:
ncols = max(ncols, len(row.cells))
pt = prettytable.PrettyTable(
field_names=list(range(ncols)),
encoding=ENCODING,
header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE if config.plain else prettytable.ALL,
)
pt.align = 'l'
pt.valign = 't'
pt.max_width = max(config.width // ncols, config.min_col_width)
if config.plain:
# noinspection PyTypeChecker
for row in table.rows:
for i, cell in enumerate(row.cells):
n_before = i
n_after = ncols - i - 1
# ... use ncols, not len(row.cells), since "cells per row" is
# not constant, but prettytable wants a fixed number.
# (changed in v0.2.8)
ptrow = (
[''] * n_before +
[get_cell_text(cell)] +
[''] * n_after
)
assert(len(ptrow) == ncols)
pt.add_row(ptrow)
else:
# noinspection PyTypeChecker
for row in table.rows:
ptrow = [] # type: List[str]
# noinspection PyTypeChecker
for cell in row.cells:
ptrow.append(get_cell_text(cell))
ptrow += [''] * (ncols - len(ptrow)) # added in v0.2.8
assert (len(ptrow) == ncols)
pt.add_row(ptrow)
return pt.get_string() | [
"def",
"docx_process_table",
"(",
"table",
":",
"DOCX_TABLE_TYPE",
",",
"config",
":",
"TextProcessingConfig",
")",
"->",
"str",
":",
"def",
"get_cell_text",
"(",
"cell_",
")",
"->",
"str",
":",
"cellparagraphs",
"=",
"[",
"paragraph",
".",
"text",
".",
"strip",
"(",
")",
"for",
"paragraph",
"in",
"cell_",
".",
"paragraphs",
"]",
"cellparagraphs",
"=",
"[",
"x",
"for",
"x",
"in",
"cellparagraphs",
"if",
"x",
"]",
"return",
"'\\n\\n'",
".",
"join",
"(",
"cellparagraphs",
")",
"ncols",
"=",
"1",
"# noinspection PyTypeChecker",
"for",
"row",
"in",
"table",
".",
"rows",
":",
"ncols",
"=",
"max",
"(",
"ncols",
",",
"len",
"(",
"row",
".",
"cells",
")",
")",
"pt",
"=",
"prettytable",
".",
"PrettyTable",
"(",
"field_names",
"=",
"list",
"(",
"range",
"(",
"ncols",
")",
")",
",",
"encoding",
"=",
"ENCODING",
",",
"header",
"=",
"False",
",",
"border",
"=",
"True",
",",
"hrules",
"=",
"prettytable",
".",
"ALL",
",",
"vrules",
"=",
"prettytable",
".",
"NONE",
"if",
"config",
".",
"plain",
"else",
"prettytable",
".",
"ALL",
",",
")",
"pt",
".",
"align",
"=",
"'l'",
"pt",
".",
"valign",
"=",
"'t'",
"pt",
".",
"max_width",
"=",
"max",
"(",
"config",
".",
"width",
"//",
"ncols",
",",
"config",
".",
"min_col_width",
")",
"if",
"config",
".",
"plain",
":",
"# noinspection PyTypeChecker",
"for",
"row",
"in",
"table",
".",
"rows",
":",
"for",
"i",
",",
"cell",
"in",
"enumerate",
"(",
"row",
".",
"cells",
")",
":",
"n_before",
"=",
"i",
"n_after",
"=",
"ncols",
"-",
"i",
"-",
"1",
"# ... use ncols, not len(row.cells), since \"cells per row\" is",
"# not constant, but prettytable wants a fixed number.",
"# (changed in v0.2.8)",
"ptrow",
"=",
"(",
"[",
"''",
"]",
"*",
"n_before",
"+",
"[",
"get_cell_text",
"(",
"cell",
")",
"]",
"+",
"[",
"''",
"]",
"*",
"n_after",
")",
"assert",
"(",
"len",
"(",
"ptrow",
")",
"==",
"ncols",
")",
"pt",
".",
"add_row",
"(",
"ptrow",
")",
"else",
":",
"# noinspection PyTypeChecker",
"for",
"row",
"in",
"table",
".",
"rows",
":",
"ptrow",
"=",
"[",
"]",
"# type: List[str]",
"# noinspection PyTypeChecker",
"for",
"cell",
"in",
"row",
".",
"cells",
":",
"ptrow",
".",
"append",
"(",
"get_cell_text",
"(",
"cell",
")",
")",
"ptrow",
"+=",
"[",
"''",
"]",
"*",
"(",
"ncols",
"-",
"len",
"(",
"ptrow",
")",
")",
"# added in v0.2.8",
"assert",
"(",
"len",
"(",
"ptrow",
")",
"==",
"ncols",
")",
"pt",
".",
"add_row",
"(",
"ptrow",
")",
"return",
"pt",
".",
"get_string",
"(",
")"
] | Converts a DOCX table to text.
Structure representing a DOCX table:
.. code-block:: none
table
.rows[]
.cells[]
.paragraphs[]
.text
That's the structure of a :class:`docx.table.Table` object, but also of our
homebrew creation, :class:`CustomDocxTable`.
The ``plain`` option optimizes for natural language processing, by:
- removing vertical lines:
.. code-block:: none
+-------------+-------------+
| AAA AAA | BBB BBB |
| AAA AAA | BBB BBB |
+-------------+-------------+
becomes
.. code-block:: none
-----------------------------
AAA AAA BBB BBB
AAA AAA BBB BBB
-----------------------------
- and offsetting cells:
.. code-block:: none
AAA AAA BBB BBB CCC CCC
AAA AAA BBB BBB CCC CCC
becomes
.. code-block:: none
AAA AAA
AAA AAA
BBB BBB
BBB BBB
CCC CCC
CCC CCC
- Note also that the grids in DOCX files can have varying number of cells
per row, e.g.
.. code-block:: none
+---+---+---+
| 1 | 2 | 3 |
+---+---+---+
| 1 | 2 |
+---+---+ | [
"Converts",
"a",
"DOCX",
"table",
"to",
"text",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L684-L800 |
RudolfCardinal/pythonlib | cardinal_pythonlib/extract_text.py | docx_docx_iter_block_items | def docx_docx_iter_block_items(parent: DOCX_CONTAINER_TYPE) \
-> Iterator[DOCX_BLOCK_ITEM_TYPE]:
# only called if docx loaded
"""
Iterate through items of a DOCX file.
See https://github.com/python-openxml/python-docx/issues/40.
Yield each paragraph and table child within ``parent``, in document order.
Each returned value is an instance of either :class:`Table` or
:class:`Paragraph`. ``parent`` would most commonly be a reference to a main
:class:`Document` object, but also works for a :class:`_Cell` object, which
itself can contain paragraphs and tables.
NOTE: uses internals of the ``python-docx`` (``docx``) library; subject to
change; this version works with ``docx==0.8.5``.
"""
if isinstance(parent, docx.document.Document):
parent_elm = parent.element.body
elif isinstance(parent, docx.table._Cell):
parent_elm = parent._tc
else:
raise ValueError("something's not right")
for child in parent_elm.iterchildren():
if isinstance(child, docx.oxml.text.paragraph.CT_P):
yield docx.text.paragraph.Paragraph(child, parent)
elif isinstance(child, docx.oxml.table.CT_Tbl):
yield docx.table.Table(child, parent) | python | def docx_docx_iter_block_items(parent: DOCX_CONTAINER_TYPE) \
-> Iterator[DOCX_BLOCK_ITEM_TYPE]:
# only called if docx loaded
"""
Iterate through items of a DOCX file.
See https://github.com/python-openxml/python-docx/issues/40.
Yield each paragraph and table child within ``parent``, in document order.
Each returned value is an instance of either :class:`Table` or
:class:`Paragraph`. ``parent`` would most commonly be a reference to a main
:class:`Document` object, but also works for a :class:`_Cell` object, which
itself can contain paragraphs and tables.
NOTE: uses internals of the ``python-docx`` (``docx``) library; subject to
change; this version works with ``docx==0.8.5``.
"""
if isinstance(parent, docx.document.Document):
parent_elm = parent.element.body
elif isinstance(parent, docx.table._Cell):
parent_elm = parent._tc
else:
raise ValueError("something's not right")
for child in parent_elm.iterchildren():
if isinstance(child, docx.oxml.text.paragraph.CT_P):
yield docx.text.paragraph.Paragraph(child, parent)
elif isinstance(child, docx.oxml.table.CT_Tbl):
yield docx.table.Table(child, parent) | [
"def",
"docx_docx_iter_block_items",
"(",
"parent",
":",
"DOCX_CONTAINER_TYPE",
")",
"->",
"Iterator",
"[",
"DOCX_BLOCK_ITEM_TYPE",
"]",
":",
"# only called if docx loaded",
"if",
"isinstance",
"(",
"parent",
",",
"docx",
".",
"document",
".",
"Document",
")",
":",
"parent_elm",
"=",
"parent",
".",
"element",
".",
"body",
"elif",
"isinstance",
"(",
"parent",
",",
"docx",
".",
"table",
".",
"_Cell",
")",
":",
"parent_elm",
"=",
"parent",
".",
"_tc",
"else",
":",
"raise",
"ValueError",
"(",
"\"something's not right\"",
")",
"for",
"child",
"in",
"parent_elm",
".",
"iterchildren",
"(",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"docx",
".",
"oxml",
".",
"text",
".",
"paragraph",
".",
"CT_P",
")",
":",
"yield",
"docx",
".",
"text",
".",
"paragraph",
".",
"Paragraph",
"(",
"child",
",",
"parent",
")",
"elif",
"isinstance",
"(",
"child",
",",
"docx",
".",
"oxml",
".",
"table",
".",
"CT_Tbl",
")",
":",
"yield",
"docx",
".",
"table",
".",
"Table",
"(",
"child",
",",
"parent",
")"
] | Iterate through items of a DOCX file.
See https://github.com/python-openxml/python-docx/issues/40.
Yield each paragraph and table child within ``parent``, in document order.
Each returned value is an instance of either :class:`Table` or
:class:`Paragraph`. ``parent`` would most commonly be a reference to a main
:class:`Document` object, but also works for a :class:`_Cell` object, which
itself can contain paragraphs and tables.
NOTE: uses internals of the ``python-docx`` (``docx``) library; subject to
change; this version works with ``docx==0.8.5``. | [
"Iterate",
"through",
"items",
"of",
"a",
"DOCX",
"file",
"."
] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L808-L836 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.