python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/env python
#*****************************************************************************
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
# This script generates libbsdf_bitcode.h from libbsdf.bc
import sys
import os
import struct
copyright_str = """/******************************************************************************
* Copyright 2023 NVIDIA Corporation. All rights reserved.
*****************************************************************************/
"""
def process_data_set(table_data_filename, fout):
f = open(table_data_filename, "rb")
name_we = os.path.basename(table_data_filename)
name = os.path.splitext(name_we)[0]
print("- processing multi-scatter data set: %s" % name)
fout.write("\n// Automatically generated from %s\n" % name_we)
# check header
res_roughness, = struct.unpack('i', f.read(4))
res_theta, = struct.unpack('i', f.read(4))
res_ior, = struct.unpack('i', f.read(4))
if (res_roughness != 64) or (res_theta != 64) or ((res_ior != 16) and (res_ior != 0)):
print("unexpected resolutions in dataset %s" % name)
print("- res_roughness: %d" % res_roughness)
print("- res_theta: %d" % res_theta)
print("- res_ior: %d" % res_ior)
return -1
expected_buffer_size = (res_ior * 2 + 1) * res_roughness * (res_theta + 1) * 4
bytes = f.read()
buffer_size = len(bytes)
if expected_buffer_size != buffer_size:
print("unexpected size of dataset %s" % name)
print("- expected_buffer_size: %d" % expected_buffer_size)
print("- buffer_size: %d" % buffer_size)
return -1
fout.write("unsigned const libbsdf_multiscatter_res_theta_%s = %d;\n" % (name, res_theta))
fout.write("unsigned const libbsdf_multiscatter_res_roughness_%s = %d;\n" % (name, res_roughness))
fout.write("unsigned const libbsdf_multiscatter_res_ior_%s = %d;\n" % (name, res_ior))
# process the actual data after the header
fout.write("unsigned char const libbsdf_multiscatter_data_%s[] = {\n" % name)
i = 0
fout.write(" ")
for byte in bytes:
if isinstance(byte, str):
byte = ord(byte)
fout.write("0x%02x, " % byte)
if i == 7:
fout.write("\n ")
i = 0
else:
i += 1
fout.write("};\n")
return 0
def usage():
print("Usage: %s <output directory> <inputfile1> <inputfile2> ..." % sys.argv[0])
return 1
def main(args):
if len(args) < 3:
return usage()
with open(args[1], "w") as f:
f.write(copyright_str)
f.write("\n")
f.write("#include <mdl/jit/generator_jit/generator_jit_libbsdf_data.h>\n")
f.write("\n")
f.write("namespace mi {\n")
f.write("namespace mdl {\n")
f.write("namespace libbsdf_data {\n")
# process all files
for x in range(2, len(args)):
res = process_data_set(args[x], f)
if (res < 0):
print("res: %s" % res)
return -1
f.write("}\n")
f.write("}\n")
f.write("}\n")
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| MDL-SDK-master | src/mdl/jit/generator_jit/gen_libbsdf_multiscatter_tables.py |
#!/bin/env python
#*****************************************************************************
# Copyright (c) 2017-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
# This script generates the MDL runtime header file for libbsdf.
#
# Call it like this:
# python gen_libbsdf_runtime_header.py ../../compiler/stdmodule ../libbsdf/libbsdf_runtime.h
#
# python 2.6 or higher is needed
#
import sys
import re
import os
from gen_intrinsic_func import SignatureParser, error
reference_parameter_types = {
"bool2",
"bool3",
"bool4",
"color",
"double2",
"double3",
"double4",
"float2",
"float3",
"float4",
"int2",
"int3",
"int4"
}
def eat_until(token_set, tokens):
"""eat tokens until token_kind is found and return them, handle parenthesis"""
r = 0
e = 0
g = 0
a = 0
l = len(tokens)
eaten_tokens = []
while l > 0:
tok = tokens[0]
if r == 0 and e == 0 and g == 0 and a == 0 and tok in token_set:
return eaten_tokens, tokens
if tok == '(':
r += 1
elif tok == ')':
r -= 1
elif tok == '[':
e += 1
elif tok == ']':
e -= 1
elif tok == '{':
g += 1
elif tok == '}':
g -= 1
elif tok == '[[':
a += 1
elif tok == ']]':
a -= 1
eaten_tokens.append(tokens[0])
tokens = tokens[1:]
l -= 1
# do not return empty tokens, the parser do not like that
return eaten_tokens, [None]
def format_param(param):
typename, name, defparam = param
if typename in reference_parameter_types:
res = "%s const &%s" % (typename, name)
else:
res = "%s %s" % (typename, name)
if defparam:
res += " = %s" % defparam
return res
def parse_prototype(sigparser, decl, prototypes):
"""Get the C++ prototype for a given function declaration."""
# poor man's scanner :-)
tokens = re.sub(r'[,()]', lambda m: ' ' + m.group(0) + ' ', decl).split()
tokens, ret_type = sigparser.get_type(tokens)
name = tokens[0]
if tokens[1] != '(':
error("unknown token '" + tokens[1] + "' while processing '" + decl + "': '(' expected")
sys.exit(1)
tokens = tokens[2:]
params = []
if tokens[0] != ')':
while True:
tokens, t = sigparser.get_type(tokens)
paramname = tokens[0]
tokens = tokens[1:]
if tokens[0] == '=':
# default argument
defarg, tokens = eat_until({',':None, ')':None}, tokens[1:])
else:
defarg = []
params.append((t, paramname, ''.join(defarg)))
if tokens[0] == ')':
break
if tokens[0] != ',':
error("unknown token '" + tokens[1] + "' while processing '"
+ decl + "': ',' expected")
sys.exit(1)
# skip the comma
tokens = tokens[1:]
# For array returns, add one pointer parameter per array element
if "[" in ret_type:
match = re.match("([^[]+)\[(\d+)\]", ret_type)
if match:
elem_type = match.group(1)
num_elems = int(match.group(2))
for i in range(num_elems):
params.append((elem_type + "*", "res_%d" % i, []))
ret_type = "void"
prototype = "%s %s(%s);" % (ret_type, name, ", ".join(map(format_param, params)))
if "[" in ret_type or name == "transpose" or "[<N>]" in prototype:
prototype = "// %s (not supported yet)" % prototype
prototypes.append(prototype)
def print_wrapped(parser, fileobj, line, wrap_pos = 99):
"""print the given line (provided without newline at end) and wrap it at wrap_pos,
splitting the line at commas. Also handles commented out lines."""
orig_line = line
prefix = ""
next_prefix = "// " if line.startswith("//") else " "
while parser.indent * 4 + len(prefix) + len(line) >= wrap_pos:
splitpos = line.rfind(',', 0, wrap_pos - parser.indent * 4 - len(prefix))
if splitpos == -1:
raise Exception("Unable to split line: %s" % orig_line)
parser.write(fileobj, prefix + line[:splitpos + 1] + "\n")
line = line[splitpos + 1:].lstrip()
prefix = next_prefix
parser.write(fileobj, prefix + line + "\n")
def usage(args):
"""print usage info and exit"""
print "Usage: %s stdlib_directory outputfile" % args[0]
return 1
def main(args):
"""Process one file and generate signatures."""
if len(args) != 3:
return usage(args)
stdlib_dir = args[1]
out_name = args[2]
strict = True
prototypes = []
# monkey patch SignatureParser to generate signature names suitable for C++ header files
SignatureParser.get_signature = (
lambda self, decl: parse_prototype(self, decl, prototypes))
try:
parser = SignatureParser(args[0], stdlib_dir, out_name, strict)
# copy the copyright from first 3 lines of libbsdf.h
libbsdf_h_path = os.path.join(os.path.dirname(out_name), "libbsdf.h")
with open(libbsdf_h_path) as f:
copyright = "".join([next(f) for x in xrange(3)])
with open(out_name, "w") as f:
parser.write(f, copyright)
parser.write(f, "\n#ifndef MDL_LIBBSDF_RUNTIME_H\n"
"#define MDL_LIBBSDF_RUNTIME_H\n")
for module_name in ["math", "debug"]:
# clear list before parsing next module
del prototypes[:]
parser.parse(module_name)
parser.write(f, "\nnamespace %s\n" % module_name)
parser.write(f, "{\n")
parser.indent += 1
for prototype in prototypes:
print_wrapped(parser, f, prototype)
parser.indent -= 1
parser.write(f, "}\n")
parser.write(f, "\n#endif // MDL_LIBBSDF_RUNTIME_H\n")
except Exception as e:
error(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| MDL-SDK-master | src/mdl/jit/generator_jit/gen_libbsdf_runtime_header.py |
#!/usr/bin/env python
#*****************************************************************************
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
#
# Generate HLSL intrinsics
#
import sys
###############################################################################
# HLSL-specific information. #
###############################################################################
class db_hlsl_attribute(object):
"An HLSL attribute declaration"
def __init__(self, title_name, scope, args, doc):
self.name = title_name.lower() # lowercase attribute name
self.title_name = title_name # title-case attribute name
self.scope = scope # one of l (loop), c (condition), s (switch), f (function)
self.args = args # list of arguments
self.doc = doc # documentation
class db_hlsl_intrinsic(object):
"An HLSL intrinsic declaration"
def __init__(self, name, idx, opname, params, ns, ns_idx, doc, ro, rn, unsigned_op, overload_idx):
self.name = name # Function name
self.idx = idx # Unique number within namespace
self.opname = opname # D3D-style name
self.params = params # List of parameters
self.ns = ns # Function namespace
self.ns_idx = ns_idx # Namespace index
self.doc = doc # Documentation
id_prefix = "DS_IOP" if ns == "Intrinsics" else "DS_MOP"
self.enum_name = "%s_%s" % (id_prefix, name) # enum name
self.readonly = ro # Only read memory
self.readnone = rn # Not read memory
self.unsigned_op = unsigned_op # Unsigned opcode if exist
if unsigned_op != "":
self.unsigned_op = "%s_%s" % (id_prefix, unsigned_op)
self.overload_param_index = overload_idx # Parameter determines the overload type, -1 means ret type
self.key = ("%3d" % ns_idx) + "!" + name + "!" + ("%2d" % len(params)) + "!" + ("%3d" % idx) # Unique key
self.vulkanSpecific = ns.startswith("Vk") # Vulkan specific intrinsic - SPIRV change
class db_hlsl_namespace(object):
"A grouping of HLSL intrinsics"
def __init__(self, name):
self.name = name
self.intrinsics = []
class db_hlsl_intrisic_param(object):
"An HLSL parameter declaration for an intrinsic"
def __init__(self, name, param_qual, template_id, template_list, component_id, component_list, rows, cols, type_name, idx, template_id_idx, component_id_idx):
self.name = name # Parameter name
self.param_qual = param_qual # Parameter qualifier expressions
self.template_id = template_id # Template ID (possibly identifier)
self.template_list = template_list # Template list (possibly identifier)
self.component_id = component_id # Component ID (possibly identifier)
self.component_list = component_list # Component list (possibly identifier)
self.rows = rows # Row count for parameter, possibly identifier
self.cols = cols # Row count for parameter, possibly identifier
self.type_name = type_name # Type name
self.idx = idx # Argument index
self.template_id_idx = template_id_idx # Template ID numeric value
self.component_id_idx = component_id_idx # Component ID numeric value
class db_hlsl(object):
"A database of HLSL language data"
def __init__(self, intrinsic_defs):
self.base_types = {
"bool": "LICOMPTYPE_BOOL",
"int": "LICOMPTYPE_INT",
"int16_t": "LICOMPTYPE_INT16",
"uint": "LICOMPTYPE_UINT",
"uint16_t": "LICOMPTYPE_UINT16",
"u64": "LICOMPTYPE_UINT64",
"any_int": "LICOMPTYPE_ANY_INT",
"any_int32": "LICOMPTYPE_ANY_INT32",
"uint_only": "LICOMPTYPE_UINT_ONLY",
"float16_t": "LICOMPTYPE_FLOAT16",
"float": "LICOMPTYPE_FLOAT",
"fldbl": "LICOMPTYPE_FLOAT_DOUBLE",
"any_float": "LICOMPTYPE_ANY_FLOAT",
"float_like": "LICOMPTYPE_FLOAT_LIKE",
"double": "LICOMPTYPE_DOUBLE",
"double_only": "LICOMPTYPE_DOUBLE_ONLY",
"numeric": "LICOMPTYPE_NUMERIC",
"numeric16_only": "LICOMPTYPE_NUMERIC16_ONLY",
"numeric32": "LICOMPTYPE_NUMERIC32",
"numeric32_only": "LICOMPTYPE_NUMERIC32_ONLY",
"any": "LICOMPTYPE_ANY",
"sampler1d": "LICOMPTYPE_SAMPLER1D",
"sampler2d": "LICOMPTYPE_SAMPLER2D",
"sampler3d": "LICOMPTYPE_SAMPLER3D",
"sampler_cube": "LICOMPTYPE_SAMPLERCUBE",
"sampler_cmp": "LICOMPTYPE_SAMPLERCMP",
"sampler": "LICOMPTYPE_SAMPLER",
"ray_desc" : "LICOMPTYPE_RAYDESC",
"acceleration_struct" : "LICOMPTYPE_ACCELERATION_STRUCT",
"udt" : "LICOMPTYPE_USER_DEFINED_TYPE",
"void": "LICOMPTYPE_VOID",
"string": "LICOMPTYPE_STRING",
"wave": "LICOMPTYPE_WAVE"}
self.trans_rowcol = {
"r": "IA_R",
"c": "IA_C",
"r2": "IA_R2",
"c2": "IA_C2"}
self.param_qual = {
"in": "AR_QUAL_IN",
"inout": "AR_QUAL_IN | AR_QUAL_OUT",
"out": "AR_QUAL_OUT",
"col_major": "AR_QUAL_COLMAJOR",
"row_major": "AR_QUAL_ROWMAJOR"}
self.intrinsics = []
self.load_intrinsics(intrinsic_defs)
self.create_namespaces()
self.populate_attributes()
self.opcode_namespace = "Def_function"
def create_namespaces(self):
last_ns = None
self.namespaces = {}
for i in sorted(self.intrinsics, key=lambda x: x.key):
if last_ns is None or last_ns.name != i.ns:
last_ns = db_hlsl_namespace(i.ns)
self.namespaces[i.ns] = last_ns
last_ns.intrinsics.append(i)
def load_intrinsics(self, intrinsic_defs):
import re
blank_re = re.compile(r"^\s*$")
comment_re = re.compile(r"^\s*//")
namespace_beg_re = re.compile(r"^namespace\s+(\w+)\s*{\s*$")
namespace_end_re = re.compile(r"^}\s*namespace\s*$")
intrinsic_re = re.compile(r"^\s*([^(]+)\s+\[\[(\S*)\]\]\s+(\w+)\s*\(\s*([^)]*)\s*\)\s*(:\s*\w+\s*)?;$")
operand_re = re.compile(r"^:\s*(\w+)\s*$")
bracket_cleanup_re = re.compile(r"<\s*(\S+)\s*,\s*(\S+)\s*>") # change <a,b> to <a@> to help split params and parse
params_split_re = re.compile(r"\s*,\s*")
ws_split_re = re.compile(r"\s+")
typeref_re = re.compile(r"\$type(\d+)$")
type_matrix_re = re.compile(r"(\S+)<(\S+)@(\S+)>$")
type_vector_re = re.compile(r"(\S+)<(\S+)>$")
type_any_re = re.compile(r"(\S+)<>$")
digits_re = re.compile(r"^\d+$")
opt_param_match_re = re.compile(r"^\$match<(\S+)@(\S+)>$")
ns_idx = 0
num_entries = 0
def add_flag(val, new_val):
if val == "" or val == "0":
return new_val
return val + " | " + new_val
def translate_rowcol(val):
digits_match = digits_re.match(val)
if digits_match:
return val
assert val in self.trans_rowcol, "unknown row/col %s" % val
return self.trans_rowcol[val]
def process_arg(desc, idx, done_args, intrinsic_name):
"Process a single parameter description."
opt_list = []
desc = desc.strip()
if desc == "...":
param_name = "..."
type_name = "..."
else:
opt_list = ws_split_re.split(desc)
assert len(opt_list) > 0, "malformed parameter desc %s" % (desc)
param_name = opt_list.pop() # last token is name
type_name = opt_list.pop() # next-to-last is type specifier
param_qual = "0"
template_id = str(idx)
template_list = "LITEMPLATE_ANY"
component_id = str(idx)
component_list = "LICOMPTYPE_ANY"
rows = "1"
cols = "1"
if type_name == "$unspec":
assert idx == 0, "'$unspec' can only be used as the return type"
# template_id may be -1 in other places other than return type, for example in Stream.Append().
# $unspec is a shorthand for return types only though.
template_id = "-1"
component_id = "0"
type_name = "void"
elif type_name == "...":
assert idx != 0, "'...' can only be used in the parameter list"
template_id = "-2"
component_id = "0"
type_name = "void"
else:
typeref_match = typeref_re.match(type_name)
if typeref_match:
template_id = typeref_match.group(1)
component_id = template_id
assert idx != 1, "Can't use $type on the first argument"
assert template_id != "0", "Can't match an input to the return type"
done_idx = int(template_id) - 1
assert done_idx <= len(args) + 1, "$type must refer to a processed arg"
done_arg = done_args[done_idx]
type_name = done_arg.type_name
# Determine matrix/vector/any/scalar type names.
type_matrix_match = type_matrix_re.match(type_name)
if type_matrix_match:
base_type = type_matrix_match.group(1)
rows = type_matrix_match.group(2)
cols = type_matrix_match.group(3)
template_list = "LITEMPLATE_MATRIX"
else:
type_vector_match = type_vector_re.match(type_name)
if type_vector_match:
base_type = type_vector_match.group(1)
cols = type_vector_match.group(2)
template_list = "LITEMPLATE_VECTOR"
else:
type_any_match = type_any_re.match(type_name)
if type_any_match:
base_type = type_any_match.group(1)
rows = "r"
cols = "c"
template_list = "LITEMPLATE_ANY"
else:
base_type = type_name
if base_type.startswith("sampler") or base_type.startswith("string") or base_type.startswith("wave") or base_type.startswith("acceleration_struct") or base_type.startswith("ray_desc"):
template_list = "LITEMPLATE_OBJECT"
else:
template_list = "LITEMPLATE_SCALAR"
assert base_type in self.base_types, "Unknown base type '%s' in '%s'" % (base_type, desc)
component_list = self.base_types[base_type]
rows = translate_rowcol(rows)
cols = translate_rowcol(cols)
for opt in opt_list:
if opt in self.param_qual:
param_qual = add_flag(param_qual, self.param_qual[opt])
else:
opt_param_match_match = opt_param_match_re.match(opt)
assert opt_param_match_match, "Unknown parameter qualifier '%s'" % (opt)
template_id = opt_param_match_match.group(1)
component_id = opt_param_match_match.group(2)
if component_list == "LICOMPTYPE_VOID":
if type_name == "void":
template_list = "LITEMPLATE_VOID"
rows = "0"
cols = "0"
if template_id == "0":
param_qual = "0"
# Keep these as numeric values.
template_id_idx = int(template_id)
component_id_idx = int(component_id)
# Verify that references don't point to the right (except for the return value).
assert idx == 0 or template_id_idx <= int(idx), "Argument '%s' has a forward reference" % (param_name)
assert idx == 0 or component_id_idx <= int(idx), "Argument '%s' has a forward reference" % (param_name)
if template_id == "-1":
template_id = "INTRIN_TEMPLATE_FROM_TYPE"
elif template_id == "-2":
template_id = "INTRIN_TEMPLATE_VARARGS"
if component_id == "-1":
component_id = "INTRIN_COMPTYPE_FROM_TYPE_ELT0"
return db_hlsl_intrisic_param(param_name, param_qual, template_id, template_list, component_id, component_list, rows, cols, type_name, idx, template_id_idx, component_id_idx)
def process_attr(attr):
attrs = attr.split(',')
readonly = False # Only read memory
readnone = False # Not read memory
unsigned_op = "" # Unsigned opcode if exist
overload_param_index = -1 # Parameter determines the overload type, -1 means ret type.
for a in attrs:
if (a == ""):
continue
if (a == "ro"):
readonly = True
continue
if (a == "rn"):
readnone = True
continue
assign = a.split('=')
if (len(assign) != 2):
assert False, "invalid attr %s" % (a)
continue
d = assign[0]
v = assign[1]
if (d == "unsigned_op"):
unsigned_op = v
continue
if (d == "overload"):
overload_param_index = int(v)
continue
assert False, "invalid attr %s" % (a)
return readonly, readnone, unsigned_op, overload_param_index
current_namespace = None
for line in intrinsic_defs:
if blank_re.match(line): continue
if comment_re.match(line): continue
match_obj = namespace_beg_re.match(line)
if match_obj:
assert not current_namespace, "cannot open namespace without closing prior one"
current_namespace = match_obj.group(1)
num_entries = 0
ns_idx += 1
continue
if namespace_end_re.match(line):
assert current_namespace, "cannot close namespace without previously opening it"
current_namespace = None
continue
match_obj = intrinsic_re.match(line)
if match_obj:
assert current_namespace, "instruction missing namespace %s" % (line)
# Get a D3D-style operand name for the instruction.
# Unused for DXIL.
opts = match_obj.group(1)
attr = match_obj.group(2)
name = match_obj.group(3)
params = match_obj.group(4)
op = match_obj.group(5)
if op:
operand_match = operand_re.match(op)
if operand_match:
op = operand_match.group(1)
if not op:
op = name
readonly, readnone, unsigned_op, overload_param_index = process_attr(attr)
# Add an entry for this intrinsic.
if bracket_cleanup_re.search(opts):
opts = bracket_cleanup_re.sub(r"<\1@\2>", opts)
if bracket_cleanup_re.search(params):
params = bracket_cleanup_re.sub(r"<\g<1>@\2>", params)
ret_desc = "out " + opts + " " + name
if len(params) > 0:
in_args = params_split_re.split(params)
else:
in_args = []
arg_idx = 1
args = []
for in_arg in in_args:
args.append(process_arg(in_arg, arg_idx, args, name))
arg_idx += 1
# We have to process the return type description last
# to match the compiler's handling of it and allow
# the return type to match an input type.
# It needs to be the first entry, so prepend it.
args.insert(0, process_arg(ret_desc, 0, args, name))
# TODO: verify a single level of indirection
self.intrinsics.append(db_hlsl_intrinsic(
name, num_entries, op, args, current_namespace, ns_idx, "pending doc for " + name,
readonly, readnone, unsigned_op, overload_param_index))
num_entries += 1
continue
assert False, "cannot parse line %s" % (line)
def populate_attributes(self):
"Populate basic definitions for attributes."
attributes = []
def add_attr(title_name, scope, doc):
attributes.append(db_hlsl_attribute(title_name, scope, [], doc))
def add_attr_arg(title_name, scope, args, doc):
attributes.append(db_hlsl_attribute(title_name, scope, args, doc))
add_attr("Allow_UAV_Condition", "l", "Allows a compute shader loop termination condition to be based off of a UAV read. The loop must not contain synchronization intrinsics")
add_attr("Branch", "c", "Evaluate only one side of the if statement depending on the given condition")
add_attr("Call", "s", "The bodies of the individual cases in the switch will be moved into hardware subroutines and the switch will be a series of subroutine calls")
add_attr("EarlyDepthStencil", "f", "Forces depth-stencil testing before a shader executes")
add_attr("FastOpt", "l", "Reduces the compile time but produces less aggressive optimizations")
add_attr("Flatten", "c", "Evaluate both sides of the if statement and choose between the two resulting values")
add_attr("ForceCase", "s", "Force a switch statement in the hardware")
add_attr("Loop", "l", "Generate code that uses flow control to execute each iteration of the loop")
add_attr_arg("ClipPlanes", "f", "Optional list of clip planes", [{"name":"ClipPlane", "type":"int", "count":6}])
add_attr_arg("Domain", "f", "Defines the patch type used in the HS", [{"name":"DomainType", type:"string"}])
add_attr_arg("Instance", "f", "Use this attribute to instance a geometry shader", [{"name":"Count", "type":"int"}])
add_attr_arg("MaxTessFactor", "f", "Indicates the maximum value that the hull shader would return for any tessellation factor.", [{"name":"Count", "type":"int"}])
add_attr_arg("MaxVertexCount", "f", "maxvertexcount doc", [{"name":"Count", "type":"int"}])
add_attr_arg("NumThreads", "f", "Defines the number of threads to be executed in a single thread group.", [{"name":"x", "type":"int"},{"name":"z", "type":"int"},{"name":"y", "type":"int"}])
add_attr_arg("OutputControlPoints", "f", "Defines the number of output control points per thread that will be created in the hull shader", [{"name":"Count", "type":"int"}])
add_attr_arg("OutputTopology", "f", "Defines the output primitive type for the tessellator", [{"name":"Topology", "type":"string"}])
add_attr_arg("Partitioning", "f", "Defines the tesselation scheme to be used in the hull shader", [{"name":"Scheme", "type":"scheme"}])
add_attr_arg("PatchConstantFunc", "f", "Defines the function for computing patch constant data", [{"name":"FunctionName", "type":"string"}])
add_attr_arg("RootSignature", "f", "RootSignature doc", [{"name":"SignatureName", "type":"string"}])
add_attr_arg("Unroll", "l", "Unroll the loop until it stops executing or a max count", [{"name":"Count", "type":"int"}])
self.attributes = attributes
g_db_hlsl = None
g_templ_name = None
def get_db_hlsl():
global g_db_hlsl
if g_db_hlsl is None:
with open(g_templ_name, "r") as f:
g_db_hlsl = db_hlsl(f)
return g_db_hlsl
def get_hlsl_intrinsics():
db = get_db_hlsl()
result = ""
last_ns = ""
ns_table = ""
is_vk_table = False # SPIRV Change
id_prefix = ""
arg_idx = 0
opcode_namespace = db.opcode_namespace
for i in sorted(db.intrinsics, key=lambda x: x.key):
if last_ns != i.ns:
last_ns = i.ns
id_prefix = "DS_IOP" if last_ns == "Intrinsics" else "DS_MOP"
if (len(ns_table)):
result += ns_table + "};\n"
# SPIRV Change Starts
if is_vk_table:
result += "\n#endif // ENABLE_SPIRV_CODEGEN\n"
is_vk_table = False
# SPIRV Change Ends
result += "\n//\n// Start of %s\n//\n\n" % (last_ns)
# This used to be qualified as __declspec(selectany), but that's no longer necessary.
ns_table = "static HLSL_intrinsic const g_%s[] =\n{\n" % (last_ns)
# SPIRV Change Starts
if (i.vulkanSpecific):
is_vk_table = True
result += "#ifdef ENABLE_SPIRV_CODEGEN\n\n"
# SPIRV Change Ends
arg_idx = 0
ma = "MA_WRITE"
if i.readonly:
ma = "MA_READ_ONLY"
elif i.readnone:
ma = "MA_READ_NONE"
ns_table += " { %s::%s_%s, %s, %d, %d, g_%s_Args%s },\n" % (opcode_namespace, id_prefix, i.name, ma, i.overload_param_index,len(i.params), last_ns, arg_idx)
result += "static HLSL_intrinsic_argument const g_%s_Args%s[] =\n{\n" % (last_ns, arg_idx)
for p in i.params:
result += " {\"%s\", %s, %s, %s, %s, %s, %s, %s},\n" % (
p.name, p.param_qual, p.template_id, p.template_list,
p.component_id, p.component_list, p.rows, p.cols)
result += "};\n\n"
arg_idx += 1
result += ns_table + "};\n"
result += "\n#endif // ENABLE_SPIRV_CODEGEN\n" if is_vk_table else "" # SPIRV Change
return result
# SPIRV Change Starts
def wrap_with_ifdef_if_vulkan_specific(intrinsic, text):
if intrinsic.vulkanSpecific:
return "#ifdef ENABLE_SPIRV_CODEGEN\n" + text + "#endif // ENABLE_SPIRV_CODEGEN\n"
return text
# SPIRV Change Ends
def enum_hlsl_intrinsics():
db = get_db_hlsl()
result = " DS_HLSL_INTRINSIC_FIRST,\n"
first = " = DS_HLSL_INTRINSIC_FIRST"
enumed = []
last = None
for i in sorted(db.intrinsics, key=lambda x: x.key):
if (i.enum_name not in enumed):
enumerant = " %s%s,\n" % (i.enum_name, first)
first =""
result += wrap_with_ifdef_if_vulkan_specific(i, enumerant) # SPIRV Change
enumed.append(i.enum_name)
last = i.enum_name
# unsigned
result += " // unsigned\n"
for i in sorted(db.intrinsics, key=lambda x: x.key):
if (i.unsigned_op != ""):
if (i.unsigned_op not in enumed):
result += " %s%s,\n" % (i.unsigned_op, first)
first = ""
enumed.append(i.unsigned_op)
last = i.unsigned_op
result += " DS_HLSL_INTRINSIC_LAST = %s,\n" % last
return result
def error(msg):
"""Write a message to stderr"""
sys.stderr.write("gen_intrinsic_eval: Error: " + msg + "\n")
def usage(args):
"""print usage info and exit"""
print("Usage: %s outputfile specification" % args[0])
return 1
def main(args):
global g_templ_name
"""Process one file and generate signatures."""
g_templ_name = args[1]
out_enum = args[2]
out_defs = args[3]
try:
if out_enum:
f = open(out_enum, "wt")
else:
f = sys.stderr
f.write("// Generated by gen_hlsl_intrinsics.py.\n")
f.write(enum_hlsl_intrinsics())
f.close()
if out_defs:
f = open(out_defs, "wt")
else:
f = sys.stderr
f.write("// Generated by gen_hlsl_intrinsics.py.\n")
f.write(get_hlsl_intrinsics())
f.close()
except IOError as e:
error(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| MDL-SDK-master | src/mdl/compiler/compiler_hlsl/gen_hlsl_intrinsics.py |
#!/bin/env python
#
# Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script generated signatures for compiler known functions.
#
# python 2.3 or higher is needed
#
import sys
import os
import re
def error(msg):
"""Write a message to stderr"""
sys.stderr.write("gen_intrinsic_eval: Error: " + msg + "\n")
def warning(msg):
"""Write a message to stderr"""
sys.stderr.write("gen_intrinsic_eval: Warning: " + msg + "\n")
def make_temp_file():
"""Return a temporary file name"""
fd, name = tempfile.mkstemp()
os.close(fd)
return name
class SignatureParser:
"""main signature parser"""
def __init__(self, script_name, indir, out_name, strict):
"""constructor"""
self.debug = False
self.indir = indir
self.out_name = out_name
self.r_intrinsic = re.compile(r"\[\[\s+intrinsic\(\)[^]]*\]\];")
self.curr_module = ""
self.m_intrinsics = {}
self.m_intrinsic_mods = {}
self.m_signatures = {}
self.indent = 0
self.strict = strict
self.intrinsic_modes = {}
#
# ADD NEW TYPES HERE!
#
self.m_types = {
"bool" : "BB",
"bool2" : "B2",
"bool3" : "B3",
"bool4" : "B4",
"int" : "II",
"int2" : "I2",
"int3" : "I3",
"int4" : "I4",
"float" : "FF",
"float2" : "F2",
"float3" : "F3",
"float4" : "F4",
"double" : "DD",
"double2" : "D2",
"double3" : "D3",
"double4" : "D4",
"color" : "CC",
"float2x2" : "F22",
"float2x3" : "F23",
"float2x4" : "F24",
"float3x2" : "F32",
"float3x3" : "F33",
"float3x4" : "F34",
"float4x2" : "F42",
"float4x3" : "F43",
"float4x4" : "F44",
"double2x2" : "D22",
"double2x3" : "D23",
"double2x4" : "D24",
"double3x2" : "D32",
"double3x3" : "D33",
"double3x4" : "D34",
"double4x2" : "D42",
"double4x3" : "D43",
"double4x4" : "D44",
"float[2]" : "FA2",
"float2[2]" : "F2A2",
"float3[2]" : "F3A2",
"float4[2]" : "F4A2",
"double[2]" : "DA2",
"double2[2]" : "D2A2",
"double3[2]" : "D3A2",
"double4[2]" : "D4A2",
"float[<N>]" : "FAN",
"float[N]" : "FAn",
}
# create inverse mapping
self.m_inv_types = {}
for type, code in self.m_types.items():
old_type = self.m_inv_types.setdefault(code, type)
if type != old_type:
error("type code %s is not unique, used by '%s' and '%s'" % (code, old_type, type))
def split_signature(self, signature):
"""Split a signature into return type and parameter types."""
params = signature.split('_')
ret_type = params[0]
params = params[1:]
return ret_type, params
def get_atomic_value_kind(self, type_code):
"""If type_code is an atomic value, return its value kind, else None."""
cases = {
"bool": "IValue::VK_BOOL",
"int": "IValue::VK_INT",
"float": "IValue::VK_FLOAT",
"double": "IValue::VK_DOUBLE",
"color": "IValue::VK_RGB_COLOR",
"string": "IValue::VK_STRING"
}
return cases.get(self.m_inv_types[type_code], None)
def get_vector_type_kind(self, type_code):
"""If type_code is an vector value, return its type kind, else None."""
cases = {
"bool2": "IType::TK_BOOL",
"bool3": "IType::TK_BOOL",
"bool4": "IType::TK_BOOL",
"int2": "IType::TK_INT",
"int3": "IType::TK_INT",
"int4": "IType::TK_INT",
"float2": "IType::TK_FLOAT",
"float3": "IType::TK_FLOAT",
"float4": "IType::TK_FLOAT",
"double2": "IType::TK_DOUBLE",
"double3": "IType::TK_DOUBLE",
"double4": "IType::TK_DOUBLE",
}
return cases.get(self.m_inv_types[type_code], None)
def get_vector_type_and_size(self, type_code):
"""If type_code is an vector value, return its (element type, size) pair else None."""
cases = {
"bool2": ("bool", 2),
"bool3": ("bool", 3),
"bool4": ("bool", 4),
"int2": ("int", 2),
"int3": ("int", 3),
"int4": ("int", 4),
"float2": ("float", 2),
"float3": ("float", 3),
"float4": ("float", 4),
"double2": ("double", 2),
"double3": ("double", 3),
"double4": ("double", 4),
"color": ("float", 3)
}
return cases.get(self.m_inv_types[type_code], None)
def get_matrix_type_kind(self, type_code):
"""If type_code is an matrix value, return its type kind, else None."""
cases = {
"float2x2" : "IType::TK_FLOAT",
"float2x3" : "IType::TK_FLOAT",
"float2x4" : "IType::TK_FLOAT",
"float3x2" : "IType::TK_FLOAT",
"float3x3" : "IType::TK_FLOAT",
"float3x4" : "IType::TK_FLOAT",
"float4x2" : "IType::TK_FLOAT",
"float4x3" : "IType::TK_FLOAT",
"float4x4" : "IType::TK_FLOAT",
"double2x2" : "IType::TK_DOUBLE",
"double2x3" : "IType::TK_DOUBLE",
"double2x4" : "IType::TK_DOUBLE",
"double3x2" : "IType::TK_DOUBLE",
"double3x3" : "IType::TK_DOUBLE",
"double3x4" : "IType::TK_DOUBLE",
"double4x2" : "IType::TK_DOUBLE",
"double4x3" : "IType::TK_DOUBLE",
"double4x4" : "IType::TK_DOUBLE",
}
return cases.get(self.m_inv_types[type_code], None)
def write(self, f, s):
"""write string s to file f after doing indent."""
for i in range(self.indent):
f.write(" ")
f.write(s)
def parse(self, mdl_name):
"""Parse a mdl module."""
self.curr_module = mdl_name
fname = self.indir + "/" + mdl_name + ".mdl"
f = open(fname, "r")
o = self.parse_file(f)
f.close()
def as_intrinsic_function(self, decl):
"""Check if the given declaration is an intrinsic function declaration."""
if decl[:5] == "const":
return None
if decl[:4] == "enum":
return None
if decl[:5] == "struct":
return None
if decl[:8] == "material":
return None
m = self.r_intrinsic.search(decl)
if m:
decl = decl[:m.start()]
# kill all other annotations
return re.sub(r'\[\[[^]]*\]\]', "", decl).strip()
return None
def get_type(self, tokens):
"""decode a type"""
start = 0
end = 1
if tokens[0] == "uniform" or tokens[0] == "varying":
# skip uniform and varying modifier
end += 1
start += 1
ret_type = " ".join(tokens[start:end])
return tokens[end:], ret_type
def do_get_type_code(self, s):
"""get the type code"""
try:
return self.m_types[s]
except KeyError as e:
error("Unsupported type '" + s + "' found")
sys.exit(1)
def get_type_code(self, s):
"""get the type code"""
c = self.do_get_type_code(s)
return c
def create_signature(self, ret_type, args):
"""create the signature"""
ret_tp = self.get_type_code(ret_type)
sig = ""
for arg in args:
sig += '_' + self.get_type_code(arg)
return ret_tp + sig
def is_float_type(self, type_code):
"""If type_code is an float type, return True, else False."""
cases = {
"float": True,
"double": True,
}
return cases.get(self.m_inv_types[type_code], False)
def is_int_type(self, type_code):
"""If type_code is an int type, return True, else False."""
return self.m_inv_types[type_code] == "int"
def is_bool_type(self, type_code):
"""If type_code is a bool type, return True, else False."""
return self.m_inv_types[type_code] == "bool"
def is_atomic_type(self, type_code):
"""If type_code is a bool, int, or float type, return True, else False."""
return self.is_bool_type(type_code) or self.is_int_type(type_code) or self.is_float_type(type_code)
def is_math_supported(self, name, signature):
"""Checks if the given math intrinsic is supported."""
ret_type, params = self.split_signature(signature)
base = None
dim = 0
vt = self.get_vector_type_and_size(ret_type)
if vt:
base = vt[0]
dim = vt[1]
all_atomic = self.is_atomic_type(ret_type)
all_base_same = base != None
for param in params:
if not self.is_atomic_type(param):
all_atomic = False
if self.m_inv_types[param] != base:
vt = self.get_vector_type_and_size(param)
if not vt or vt[0] != base or vt[1] != dim:
all_base_same = False
if len(params) == 1:
if name == "blackbody" and params[0] == "FF" :
self.intrinsic_modes[name + signature] = "math::blackbody"
return True
elif name == "average":
# support average with one argument
self.intrinsic_modes[name + signature] = "math::average"
return True
elif name == "DX" or name == "DY":
vt = self.get_vector_type_and_size(params[0])
if (params[0] == "FF" or params[0] == "DD" or
(vt and (vt[0] == "float" or vt[0] == "double"))):
# support DX(floatX) and DY(floatX)
self.intrinsic_modes[name + signature] = "math::DX|DY"
return True
return False # Not yet supported derivations cannot be handled component_wise
if len(params) == 2:
if name == "emission_color" and params[0] == "FAN" and params[1] == "FAn":
self.intrinsic_modes[name + signature] = "math::emission_color_spectrum"
return True
if all_atomic and self.is_atomic_type(ret_type):
# simple all float/int/bool functions
self.intrinsic_modes[name + signature] = "all_atomic"
return True
if len(params) == 1:
if name == "any" or name == "all":
# support any and all with one argument
self.intrinsic_modes[name + signature] = "math::any|all"
return True
elif name == "isnan" or name == "isfinite":
if self.get_vector_type_and_size(params[0]) or self.is_atomic_type(params[0]):
# support all isnan/isfinite with one argument
self.intrinsic_modes[name + signature] = "math::isnan|isfinite"
return True
elif name == "luminance":
if params[0] == "F3" or params[0] == "CC":
# support luminance(float3) and luminance(color)
self.intrinsic_modes[name + signature] = "math::luminance"
return True
elif name == "max_value" or name == "min_value":
if params[0] == "CC":
# support max_value(color) and min_value(color)
self.intrinsic_modes[name + signature] = "math::max_value|min_value"
return True
else:
vt = self.get_vector_type_and_size(params[0])
if vt and (vt[0] == "float" or vt[0] == "double"):
# support max_value(floatX) and min_value(floatX)
self.intrinsic_modes[name + signature] = "math::max_value|min_value"
return True
elif name == "max_value_wavelength" or name == "min_value_wavelength":
# support max_value_wavelength(color) and min_value_wavelength(color)
self.intrinsic_modes[name + signature] = "math::max_value_wavelength|min_value_wavelength"
return True
elif name == "length" or name == "normalize":
vt = self.get_vector_type_and_size(params[0])
if params[0] != "CC" and vt and (vt[0] == "float" or vt[0] == "double"):
# support length(floatX) and normalize(floatX)
self.intrinsic_modes[name + signature] = "math::length|normalize"
return True
return False # Not yet supported modes may not be handled via component_wise
elif name == "transpose":
if self.get_matrix_type_kind(params[0]):
# support length(floatX)
self.intrinsic_modes[name + signature] = "math::transpose"
return True
elif name == "emission_color":
if params[0] == "CC":
self.intrinsic_modes[name + signature] = "math::emission_color_color"
return True
if name == "distance" or name == "dot":
if len(params) == 2:
if params[0] == params[1] and params[0] != "CC":
vt = self.get_vector_type_and_size(params[0])
if vt and (vt[0] == "float" or vt[0] == "double"):
# support distance(floatX)
self.intrinsic_modes[name + signature] = "math::distance|dot"
return True
if name == "cross":
if signature == "F3_F3_F3" or signature == "D3_D3_D3":
# the only supported cross variant
self.intrinsic_modes[name + signature] = "math::cross"
return True
else:
return False
if name == "sincos":
if len(params) == 1:
arg_tp = params[0]
if self.is_float_type(arg_tp):
# support sincos for float types
self.intrinsic_modes[name + signature] = "math::sincos"
return True
vt = self.get_vector_type_and_size(arg_tp)
if vt and (vt[0] == "float" or vt[0] == "double"):
# support sincos for float vector types
self.intrinsic_modes[name + signature] = "math::sincos"
return True
return False
if name == "modf":
if len(params) == 1:
arg_tp = params[0]
if self.is_float_type(arg_tp):
# support modf for float types
self.intrinsic_modes[name + signature] = "math::modf"
return True
vt = self.get_vector_type_and_size(arg_tp)
if vt and (vt[0] == "float" or vt[0] == "double"):
# support modf for float vector types
self.intrinsic_modes[name + signature] = "math::modf"
return True
return False
if name == "eval_at_wavelength":
self.intrinsic_modes[name + signature] = "math::eval_at_wavelength"
return True
if all_base_same:
# assume component operation
self.intrinsic_modes[name + signature] = "math::component_wise"
return True
return False
def is_supported(self, modname, name, signature):
"""Checks if the given intrinsic is supported."""
if modname == "math":
return self.is_math_supported(name, signature)
return False
def get_signature(self, decl):
"""Get the signature for a given function declaration."""
# poor man's scanner :-)
tokens = re.sub(r'[,()]', lambda m: ' ' + m.group(0) + ' ', decl).split()
tokens, ret_type = self.get_type(tokens)
name = tokens[0]
self.m_intrinsic_mods[name] = self.curr_module
if tokens[1] != '(':
error("unknown token '" + tokens[1] + "' while processing '" + decl + "': '(' expected")
sys.exit(1)
tokens = tokens[2:]
args = []
if tokens[0] != ')':
while True:
tokens, t = self.get_type(tokens)
args.append(t)
# throw away the name
tokens = tokens[1:]
if tokens[0] == ')':
break
if tokens[0] != ',':
error("unknown token '" + tokens[1] + "' while processing '"
+ decl + "': ',' expected")
sys.exit(1)
# skip the comma
tokens = tokens[1:]
signature = self.create_signature(ret_type, args)
if self.debug:
print("%s %s" % (decl, signature))
if self.is_supported(self.curr_module, name, signature):
# insert the new signature for the given name
sigs = self.m_intrinsics.setdefault(name, {})
sigs[signature] = True
# remember the signature (without return type)
_, params = self.split_signature(signature)
self.m_signatures["_".join(params)] = True
else:
warning("Cannot evaluate %s" % decl)
return ""
def parse_file(self, f):
"""Parse a file and retrieve intrinsic function definitions."""
start = False
curr_line = ""
for line in f.readlines():
l = line.strip();
if not start:
if l[:6] == "export":
start = True
curr_line = l[7:].strip()
else:
curr_line += l
if start:
if l[-1] == ";":
start = False
decl = self.as_intrinsic_function(curr_line)
if not decl:
continue
if self.debug:
print(decl)
sig = self.get_signature(decl)
if self.debug:
print(sig)
def gen_condition(self, f, params, as_assert, pre_if = ""):
"""Generate the condition for the parameter type check."""
if as_assert:
self.write(f, "MDL_ASSERT(check_sig_%s(arguments));\n" % "_".join(params))
else:
self.write(f, "%sif (check_sig_%s(arguments)) {\n" % (pre_if, "_".join(params)))
def create_evaluation(self, f, intrinsic, signature):
"""Create the evaluation call for a given intrinsic, signature pair."""
ret_type, params = self.split_signature(signature)
mode = self.intrinsic_modes.get(intrinsic + signature)
if mode == "all_atomic":
self.write(f, "// atomic\n")
idx = 0
for param in params:
kind = self.m_inv_types[param]
self.write(f, "%s const %s = cast<IValue_%s>(arguments[%d])->get_value();\n" % (
kind, chr(ord('a') + idx), kind, idx))
idx += 1
kind = self.m_inv_types[ret_type]
call = ("return value_factory->create_%s(" + intrinsic + "(") % kind
idx = 0
comma = ""
for param in params:
call += comma
comma = ", "
call += chr(ord('a') + idx)
idx += 1
call += "));\n"
self.write(f, call);
return
elif mode == "math::eval_at_wavelength":
# FIXME: not supported yet
self.write(f, "return value_factory->create_float(0.0f);\n")
return
elif mode == "math::blackbody":
self.write(f, "float sRGB[3];\n")
self.write(f, "spectral::mdl_blackbody(sRGB, cast<IValue_float>(arguments[0])->get_value());\n")
self.write(f, "IValue_float const *r = value_factory->create_float(sRGB[0]);\n")
self.write(f, "IValue_float const *g = value_factory->create_float(sRGB[1]);\n")
self.write(f, "IValue_float const *b = value_factory->create_float(sRGB[2]);\n")
self.write(f, "return value_factory->create_rgb_color(r, g, b);\n")
return
elif mode == "math::emission_color_spectrum":
# FIXME: so far black
self.write(f, "IValue_float const *zero = value_factory->create_float(0.0f);\n")
self.write(f, "return value_factory->create_rgb_color(zero, zero, zero);\n")
return
elif mode == "math::emission_color_color":
# FIXME: so far no-op
self.write(f, "return arguments[0];\n")
return
elif mode == "math::DX|DY":
# always zero IF called on a constant
self.write(f, "IType const *arg_tp = arguments[0]->get_type()->skip_type_alias();\n")
self.write(f, "return value_factory->create_zero(arg_tp);\n")
return
elif mode == "math::cross":
vt = self.get_vector_type_and_size(params[0])
if vt[0] == "float":
self.write(f, "return do_cross<float>(value_factory, arguments);\n")
else:
self.write(f, "return do_cross<double>(value_factory, arguments);\n")
return
elif mode == "math::sincos":
if len(params) == 1:
arg_tp = params[0]
if self.is_float_type(arg_tp):
# support sincos for float types
kind = self.m_inv_types[arg_tp]
self.write(f, "IValue_%s const *a = cast<IValue_%s>(arguments[0]);\n" % (kind, kind))
self.write(f, "%s t_s, t_c;\n" % kind)
self.write(f, "sincos(a->get_value(), t_s, t_c);\n")
self.write(f, "IValue const *res[2] = {\n")
self.indent += 1
self.write(f, "value_factory->create_%s(t_s),\n" % kind)
self.write(f, "value_factory->create_%s(t_c)};\n" % kind)
self.indent -= 1
self.write(f, "IType_factory *type_factory = value_factory->get_type_factory();\n")
self.write(f, "IType const *a_type = type_factory->create_array(a->get_type(), 2);\n")
self.write(f, "return value_factory->create_array(as<IType_array>(a_type), res, 2);\n")
return
vt = self.get_vector_type_and_size(arg_tp)
if vt and (vt[0] == "float" or vt[0] == "double"):
# support sincos for float vector types
kind = vt[0]
self.write(f, "IValue const *r_s[%d];\n" % vt[1])
self.write(f, "IValue const *r_c[%d];\n" % vt[1])
self.write(f, "IValue_vector const *arg = cast<IValue_vector>(arguments[0]);\n")
self.write(f, "for (int j = 0; j < %d; ++j) {\n" % vt[1])
self.indent += 1
self.write(f, "IValue_%s const *a = cast<IValue_%s>(arg->get_value(j));\n" % (kind, kind))
self.write(f, "%s t_s, t_c;\n" % kind)
self.write(f, "sincos(a->get_value(), t_s, t_c);\n")
self.write(f, "r_s[j] = value_factory->create_%s(t_s);\n" % kind)
self.write(f, "r_c[j] = value_factory->create_%s(t_c);\n" % kind)
self.indent -= 1
self.write(f, "}\n")
self.write(f, "IType_vector const *v_type = arg->get_type();\n")
self.write(f, "IValue const *res[2] = {\n")
self.indent += 1
self.write(f, "value_factory->create_vector(v_type, r_s, %d),\n" % vt[1])
self.write(f, "value_factory->create_vector(v_type, r_c, %d)};\n" % vt[1])
self.indent -= 1
self.write(f, "IType_factory *type_factory = value_factory->get_type_factory();\n")
self.write(f, "IType const *a_type = type_factory->create_array(v_type, 2);\n")
self.write(f, "return value_factory->create_array(as<IType_array>(a_type), res, 2);\n")
return
elif mode == "math::modf":
if len(params) == 1:
arg_tp = params[0]
if self.is_float_type(arg_tp):
# support modf for float types
kind = self.m_inv_types[arg_tp]
self.write(f, "IValue_%s const *a = cast<IValue_%s>(arguments[0]);\n" % (kind, kind))
self.write(f, "%s t_fractional, t_integral;\n" % kind)
self.write(f, "t_fractional = modf(a->get_value(), t_integral);\n")
self.write(f, "IValue const *res[2] = {\n")
self.indent += 1
self.write(f, "value_factory->create_%s(t_integral),\n" % kind)
self.write(f, "value_factory->create_%s(t_fractional)};\n" % kind)
self.indent -= 1
self.write(f, "IType_factory *type_factory = value_factory->get_type_factory();\n")
self.write(f, "IType const *a_type = type_factory->create_array(a->get_type(), 2);\n")
self.write(f, "return value_factory->create_array(as<IType_array>(a_type), res, 2);\n")
return
vt = self.get_vector_type_and_size(arg_tp)
if vt and (vt[0] == "float" or vt[0] == "double"):
# support modf for float vector types
kind = vt[0]
self.write(f, "IValue const *r_fractional[%d];\n" % vt[1])
self.write(f, "IValue const *r_integral[%d];\n" % vt[1])
self.write(f, "IValue_vector const *arg = cast<IValue_vector>(arguments[0]);\n")
self.write(f, "for (int j = 0; j < %d; ++j) {\n" % vt[1])
self.indent += 1
self.write(f, "IValue_%s const *a = cast<IValue_%s>(arg->get_value(j));\n" % (kind, kind))
self.write(f, "%s t_fractional, t_integral;\n" % kind)
self.write(f, "t_fractional = modf(a->get_value(), t_integral);\n")
self.write(f, "r_fractional[j] = value_factory->create_%s(t_fractional);\n" % kind)
self.write(f, "r_integral[j] = value_factory->create_%s(t_integral);\n" % kind)
self.indent -= 1
self.write(f, "}\n")
self.write(f, "IType_vector const *v_type = arg->get_type();\n")
self.write(f, "IValue const *res[2] = {\n")
self.indent += 1
self.write(f, "value_factory->create_vector(v_type, r_integral, %d),\n" % vt[1])
self.write(f, "value_factory->create_vector(v_type, r_fractional, %d)};\n" % vt[1])
self.indent -= 1
self.write(f, "IType_factory *type_factory = value_factory->get_type_factory();\n")
self.write(f, "IType const *a_type = type_factory->create_array(v_type, 2);\n")
self.write(f, "return value_factory->create_array(as<IType_array>(a_type), res, 2);\n")
return
elif mode == "math::any|all":
vt = self.get_vector_type_and_size(params[0])
need_or = intrinsic == "any"
if need_or:
self.write(f, "bool res = false;\n")
else:
self.write(f, "bool res = true;\n")
self.write(f, "for (int j = 0; j < %d; ++j) {\n" % vt[1])
self.indent += 1
self.write(f, "IValue const *tmp;\n")
idx = 0
for param in params:
kind = self.m_inv_types[param]
self.write(f, "tmp = cast<IValue_vector>(arguments[%d])->get_value(j);\n" % idx)
self.write(f, "%s const %s = cast<IValue_%s>(tmp)->get_value();\n" % (
vt[0], chr(ord('a') + idx), vt[0]))
idx += 1
call = "res = res"
idx = 0
cases = { "bool" : "false", "float" : "1.0f", "double" : "1.0" }
zero = cases.get(vt[0], "0")
if need_or:
comma = " | "
else:
comma = " & "
for param in params:
call += comma + "("
call += chr(ord('a') + idx)
call += " != %s)" % zero
idx += 1
call += ";\n"
self.write(f, call);
self.indent -= 1
self.write(f, "}\n")
self.write(f, "return value_factory->create_bool(res);\n")
return
elif mode == "math::average":
if params[0] == "CC":
size = 3
type = "float"
vec = "rgb_color"
else:
vt = self.get_vector_type_and_size(params[0])
if not vt:
self.write(f, "return arguments[0];\n");
return
size = vt[1]
type = vt[0]
vec = "vector"
self.write(f, "IValue_%s const *arg = cast<IValue_%s>(arguments[0]);\n" % (vec, vec))
self.write(f, "IValue const *sum = arg->get_value(0);\n")
self.write(f, "for (int j = 1; j < %d; ++j) {\n" % size)
self.indent += 1
self.write(f, "sum = sum->add(value_factory, arg->get_value(j));\n")
self.indent -= 1
self.write(f, "}\n")
self.write(f, "IValue const *c = value_factory->create_%s(%s(%d));\n" % (type, type, size))
self.write(f, "return sum->divide(value_factory, c);\n")
return
elif mode == "math::isnan|isfinite":
if len(params) == 1:
vt = self.get_vector_type_and_size(params[0])
if vt:
self.write(f, "IValue const *res[%d];\n" % (vt[1]))
self.write(f, "IValue_vector const *v = cast<IValue_vector>(arguments[0]);\n")
self.write(f, "for (int j = 0; j < %d; ++j) {\n" % vt[1])
self.indent += 1
self.write(f, "IValue_%s const *a = cast<IValue_%s>(v->get_value(j));\n" % (vt[0], vt[0]))
self.write(f, "res[j] = value_factory->create_bool(" + intrinsic + "(a->get_value()));\n");
self.indent -= 1
self.write(f, "}\n")
self.write(f, "IType_factory *type_factory = value_factory->get_type_factory();\n")
self.write(f, "IType_bool const *b_type = type_factory->create_bool();\n")
self.write(f, "IType_vector const *v_type = type_factory->create_vector(b_type, %d);\n" % vt[1])
self.write(f, "return value_factory->create_vector(v_type, res, %d);\n" % vt[1])
else:
kind = self.m_inv_types(params[0])
self.write(f, "%s a = cast<IValue_%s>(arguments[0])->get_value());\n" % (kind))
self.write(f, "return value_factory->create_bool(" + intrinsic + "(a)));\n")
return
elif mode == "math::luminance":
if len(params) == 1:
vt = self.get_vector_type_and_size(params[0])
if params[0] == "F3":
self.write(f, "return do_luminance_sRGB(value_factory, arguments);\n")
elif params[0] == "CC":
self.write(f, "return do_luminance_color(value_factory, arguments);\n")
return
elif mode == "math::max_value|min_value":
if len(params) == 1:
if params[0] == "CC":
# color argument currently unsupported
self.write(f, "return do_%s_rgb_color(value_factory, arguments);\n" % intrinsic)
else:
vt = self.get_vector_type_and_size(params[0])
if vt:
if vt[0] == "float":
self.write(f, "return do_%s<float>(value_factory, arguments);\n" % intrinsic)
elif vt[0] == "double":
self.write(f, "return do_%s<double>(value_factory, arguments);\n" % intrinsic)
return
elif mode == "math::max_value_wavelength|min_value_wavelength":
# FIXME: so far black
self.write(f, "return value_factory->create_float(0.0f);\n")
return
elif mode == "math::distance|dot":
if len(params) == 2 and params[0] == params[1]:
vt = self.get_vector_type_and_size(params[0])
if vt:
if vt[0] == "float":
self.write(f, "return do_%s<float>(value_factory, arguments);\n" % intrinsic)
elif vt[0] == "double":
self.write(f, "return do_%s<double>(value_factory, arguments);\n" % intrinsic)
elif mode == "math::length|normalize":
if len(params) == 1:
vt = self.get_vector_type_and_size(params[0])
if vt and params[0] != "CC":
if vt[0] == "float":
self.write(f, "return do_%s<float>(value_factory, arguments);\n" % intrinsic)
elif vt[0] == "double":
self.write(f, "return do_%s<double>(value_factory, arguments);\n" % intrinsic)
return
elif mode == "math::transpose":
if len(params) == 1:
mk = self.get_matrix_type_kind(params[0])
if mk:
if mk == "IType::TK_FLOAT":
self.write(f, "return do_%s<float>(value_factory, arguments);\n" % intrinsic)
elif mk == "IType::TK_DOUBLE":
self.write(f, "return do_%s<double>(value_factory, arguments);\n" % intrinsic)
return
elif mode == "math::component_wise":
vt = self.get_vector_type_and_size(ret_type)
# vector/color all same base arguments
if ret_type == "CC":
self.write(f, "IValue_float const *res[3];\n")
else:
if self.get_vector_type_and_size(params[0]):
self.write(f, "IType_vector const *v_type = cast<IValue_vector>(arguments[0])->get_type();\n")
else:
self.write(f, "IType_vector const *v_type = cast<IValue_vector>(arguments[1])->get_type();\n")
self.write(f, "IValue const *res[%d];\n" % (vt[1]))
idx = 0
for param in params:
if self.is_atomic_type(param):
self.write(f, "IValue_%s const *%s = cast<IValue_%s>(arguments[%d]);\n" %
(vt[0], chr(ord('a') + idx), vt[0], idx))
elif param == "CC":
self.write(f, "IValue_rgb_color const *v_%s = cast<IValue_rgb_color>(arguments[%d]);\n" %
(chr(ord('a') + idx), idx))
else:
self.write(f, "IValue_vector const *v_%s = cast<IValue_vector>(arguments[%d]);\n" %
(chr(ord('a') + idx), idx))
idx += 1
self.write(f, "for (int j = 0; j < %d; ++j) {\n" % vt[1])
self.indent += 1
idx = 0
for param in params:
if not self.is_atomic_type(param):
self.write(f, "IValue_%s const *%s = cast<IValue_%s>(v_%s->get_value(j));\n" %
(vt[0], chr(ord('a') + idx), vt[0], chr(ord('a') + idx)))
idx += 1
call = ("res[j] = value_factory->create_%s(" + intrinsic + "(") % vt[0]
idx = 0
comma = ""
for param in params:
call += comma
comma = ", "
call += chr(ord('a') + idx)
call += "->get_value()"
idx += 1
call += "));\n"
self.write(f, call);
self.indent -= 1
self.write(f, "}\n")
if ret_type == "CC":
self.write(f, "return value_factory->create_rgb_color(res[0], res[1], res[2]);\n")
else:
self.write(f, "return value_factory->create_vector(v_type, res, %d);\n" % vt[1])
return
elif mode == None:
error("Mode not set for intrinsic: %s %s" % (intrinsic, signature))
else:
error("Unsupported mode for intrinsic: %s %s %s" % (intrinsic, signature, mode))
self.write(f, "//Unsupported\n")
def handle_signatures(self, f, intrinsic, signatures):
"""Create code all sigtatures of one intrinsic."""
if len(signatures) == 1:
# no overloads
params = signatures[0].split('_')[1:]
if self.strict:
self.gen_condition(f, params, False)
self.indent += 1
else:
self.gen_condition(f, params, True)
self.create_evaluation(f, intrinsic, signatures[0])
if self.strict:
self.indent -= 1
self.write(f, "}\n")
else:
# have overloads
signatures.sort()
pre_if = ""
for sig in signatures:
params = sig.split('_')[1:]
self.gen_condition(f, params, False, pre_if)
pre_if = "} else "
self.indent += 1
self.create_evaluation(f, intrinsic, sig)
self.indent -= 1
self.write(f, "}\n")
def handle_intrinsic(self, f, intrinsic):
"""Create code for one intrinsic."""
sigs = self.m_intrinsics[intrinsic]
# order all signatures by ascending lenght
l = {}
for sig in sigs:
sig_token = sig.split('_')
n_params = len(sig_token) - 1
l.setdefault(n_params, []).append(sig)
k = list(l.keys())
if len(k) == 1:
# typical case: all signatures have the same length
n_param = k[0]
if self.strict:
self.write(f, "if (n_arguments == %d) {\n" % n_param)
self.indent += 1
else:
# create just an assertion
self.write(f, "MDL_ASSERT(n_arguments == %d);\n" % n_param)
for n_param in k:
self.handle_signatures(f, intrinsic, l[n_param])
if self.strict:
self.indent -= 1
self.write(f, "}\n")
else:
# overloads with different signature length
self.write(f, "switch (n_arguments) {\n")
n_params = k
n_params.sort()
for n_param in n_params:
self.write(f, "case %d:\n" % n_param)
self.indent += 1
self.write(f, "{\n")
self.indent += 1
self.handle_signatures(f, intrinsic, l[n_param])
self.indent -= 1
self.write(f, "}\n")
self.write(f, "break;\n")
self.indent -= 1
self.write(f, "}\n")
def create_type_sig_tuple(self, params):
"""Create a type signature tuple (a, b) for a signature a_b."""
res = []
comma = ""
for param in params:
res.append(self.m_inv_types[param])
return "(" + ", ".join(res) + ")"
def gen_type_check(self, f, idx, type_code):
"""Create a check for the idx parameter to be of given type."""
atomic_chk = self.get_atomic_value_kind(type_code)
if atomic_chk:
self.write(f, "if (arguments[%s]->get_kind() != %s)\n" % (idx, atomic_chk))
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
else:
vector_chk = self.get_vector_type_kind(type_code)
matrix_chk = self.get_matrix_type_kind(type_code)
if vector_chk:
self.write(f, "if (IValue_vector const *v = as<IValue_vector>(arguments[%s])) {\n" % (idx))
self.indent += 1
self.write(f, "if (v->get_component_count() != %s)\n" % type_code[-1])
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.write(f, "IType_vector const *v_type = v->get_type();\n")
self.write(f, "IType_atomic const *e_type = v_type->get_element_type();\n")
self.write(f, "if (e_type->get_kind() != %s)\n" % vector_chk)
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.indent -= 1
self.write(f, "} else {\n")
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.write(f, "}\n")
elif matrix_chk:
self.write(f, "if (IValue_matrix const *v = as<IValue_matrix>(arguments[%s])) {\n" % (idx))
self.indent += 1
self.write(f, "if (v->get_component_count() != %s)\n" % type_code[-1])
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.write(f, "IType_matrix const *m_type = v->get_type();\n")
self.write(f, "IType_vector const *v_type = m_type->get_element_type();\n")
self.write(f, "if (v_type->get_size() != %s)\n" % type_code[-2])
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.write(f, "IType_atomic const *e_type = v_type->get_element_type();\n")
self.write(f, "if (e_type->get_kind() != %s)\n" % matrix_chk)
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.indent -= 1
self.write(f, "} else {\n")
self.indent += 1
self.write(f, "return false;\n")
self.indent -= 1
self.write(f, "}\n")
else:
self.write(f, "// Unsupported\n");
self.write(f, "return false;\n")
def create_signature_checker(self, f):
"""Create all signature checker functions."""
signatures = list(self.m_signatures.keys())
signatures.sort()
for sig in signatures:
params = sig.split('_')
self.write(f, "/// Check that the given arguments have the signature %s.\n" % self.create_type_sig_tuple(params))
self.write(f, "///\n")
self.write(f, "/// \\param arguments the values, must be of length %d\n" % len(params))
self.write(f, "static bool check_sig_%s(IValue const * const arguments[])\n" % sig)
self.write(f, "{\n")
self.indent += 1
all_equal = True
first_p = params[0]
for param in params:
if first_p != param:
all_equal = False
break
if all_equal and len(params) > 1:
self.write(f, "for (size_t i = 0; i < %d; ++i) {\n" % (len(params)))
self.indent += 1
self.gen_type_check(f, 'i', first_p)
self.indent -= 1
self.write(f, "}\n")
else:
for i in range(len(params)):
self.gen_type_check(f, str(i), params[i])
self.write(f, "return true;\n")
self.indent -= 1
self.write(f, "}\n\n")
def finalize(self):
"""Create output."""
f = open(self.out_name, "w")
self.create_signature_checker(f)
self.write(f, "/// Evaluates an intrinsic function called on constant arguments.\n")
self.write(f, "///\n")
self.write(f, "/// \\param value_factory The value factory used to create new values\n")
self.write(f, "/// \\param sema The semantic of the intrinsic function\n")
self.write(f, "/// \\param arguments The values of the function arguments\n")
self.write(f, "/// \\param n_arguments The number of arguments\n")
self.write(f, "///\n")
self.write(f, "/// \\return The function result or IValue_bad if the function could\n")
self.write(f, "/// not be evaluated\n")
self.write(f, "IValue const *evaluate_intrinsic_function(\n")
self.indent += 1
self.write(f, "IValue_factory *value_factory,\n")
self.write(f, "IDefinition::Semantics sema,\n")
self.write(f, "IValue const * const arguments[],\n")
self.write(f, "size_t n_arguments)\n")
self.indent -= 1
self.write(f, "{\n")
self.indent += 1
self.write(f, "switch (sema) {\n")
keys = list(self.m_intrinsics.keys())
keys.sort()
for intrinsic in keys:
mod_name = self.m_intrinsic_mods[intrinsic]
self.write(f, "case IDefinition::DS_INTRINSIC_%s_%s:\n" % (mod_name.upper(), intrinsic.upper()))
self.indent += 1
self.handle_intrinsic(f, intrinsic)
self.write(f, "break;\n");
self.indent -= 1
self.write(f, "default:\n")
self.indent += 1
self.write(f, "break;\n");
self.indent -= 1
self.write(f, "}\n")
self.write(f, "// cannot evaluate\n")
self.write(f, "return value_factory->create_bad();\n")
self.indent -= 1
self.write(f, "}\n")
f.close()
def add_support(self, decl):
"""The given declaration is supported."""
decl = self.as_intrinsic_function(decl)
# NYI
pass
def add_simple_math(self):
pass
def usage(args):
"""print usage info and exit"""
print("Usage: %s stdlib_directory outputfile" % args[0])
return 1
def main(args):
"""Process one file and generate signatures."""
if len(args) != 3:
return usage(args)
stdlib_dir = args[1]
out_name = args[2]
strict = True
try:
parser = SignatureParser(args[0], stdlib_dir, out_name, strict)
parser.parse("math")
parser.finalize()
except IOError as e:
error(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| MDL-SDK-master | src/mdl/compiler/compilercore/gen_intrinsic_eval.py |
#!/usr/bin/env python
#*****************************************************************************
# Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
#
# Convert a mdl file to C++ string literals
#
import glob, os, string, re
import time
import sys
try: # Python 2.2 has no optparse
from optparse import OptionParser
no_optparse = False
except:
no_optparse = True
class Block:
def __init__(self):
self.name = ""
self.comment = []
self.text = []
def scan_src_file(prefix, filename):
"""
Read a .mdl file and build a Block(name,comment,text).
The name is the filename without extension, with module_
prepended
"""
block = Block()
# remove extension and add prefix
block.name, ext = os.path.splitext(os.path.basename(filename))
block.name = prefix + "_" + block.name
in_file = open(filename)
state = "header"
for line in in_file:
# rstrip removes the '\n' too
line = line.rstrip()
if state == "header":
# read header, lines must start with "//"
res = re.search(r"\s*//(.*)", line)
if res:
block.comment.append(res.group(1))
else:
block.text.append(line)
state = "in_text"
elif state == "in_text":
block.text.append(line)
return block
def check_whitespace(src):
"""Check a given file for whitespace errors."""
f = open(src)
lineno = 1
module = os.path.basename(src)
s_re = re.compile("^[ ]*\t+[ ]*.*")
e_re = re.compile(".*[ \t]+$")
bad = False
for line in f.readlines():
if s_re.match(line):
os.sys.stderr.write("%s(%d): error: has whitespace errors at start\n" % (module, lineno))
bad = True
elif e_re.match(line):
os.sys.stderr.write("%s(%d): error: has whitespace errors at end\n" % (module, lineno))
bad = True
lineno = lineno + 1
if bad:
sys.exit("Module has whitespace errors")
def check_module(checker, src):
"""Run a checker on a given module."""
path, name = os.path.split(src)
module_name, ext = header_filename, ext = os.path.splitext(name)
if module_name == "distilling_support":
# does not work with this simple logic because it is in nvidia AND requieres base import, so ignore
return
retval = os.system(checker + " " + path + " " + module_name)
if retval != 0:
sys.exit("Checking module '" + module_name + "' failed! Aborting.")
def write_cpp_header(module, blocks, target):
# write header
head = time.strftime( """/******************************************************************************
* Copyright %Y NVIDIA Corporation. All rights reserved.
*****************************************************************************/
""")
target.write(head)
def generate_cpp_file(files,
dst,
pch,
prefix,
module,
namespace,
do_escape,
silent,
key,
checker):
"""
Generate C++ files (.cpp/.h) from parsed blocks.
"""
blocks = []
for src in files:
check_whitespace(src)
if checker:
check_module(checker, src)
block = scan_src_file(prefix, src)
blocks.append(block);
header_filename, ext = os.path.splitext(dst)
header_filename += '.h'
# generate .cpp file
target = open(dst, "w+")
write_cpp_header(module, blocks, target)
target.write('#include ' + pch + '\n')
target.write('#include "' + os.path.basename(header_filename) + '"\n\n')
target.write('namespace mi {\n')
target.write('namespace ' + namespace + ' {\n\n')
# write all blocks
for block in blocks:
for str in block.comment:
target.write("// " + str + '\n')
text = "\n".join(block.text) + "\n"
l = len(text)
kl = len(key)
target.write("unsigned char const " + block.name + "[%u] = {" % (l))
first = False
if key:
for i in range(l):
first = False
if i % 8 == 0:
target.write('\n ')
first = True
code = ord(text[i]) ^ ord(key[i % kl]) ^ (i & 0xFF)
start = code
if not first:
target.write(' ')
target.write("0x%02x," % code)
target.write('\n};\n\n');
else:
for i in range(l):
first = False
if i % 8 == 0:
target.write('\n ')
first = True
if not first:
target.write(' ')
target.write("0x%02x," % ord(text[i]))
target.write('\n};\n\n');
# write footer
target.write('} // mi\n} // ' + namespace + '\n')
target.close()
# generate header file
target = open(header_filename, "w+")
write_cpp_header(module, blocks, target)
guard = header_filename[:].replace("\\", "/")
guard = os.path.basename(guard).upper().replace('.', '_')
target.write('#ifndef ' + guard + '\n')
target.write('#define ' + guard + '\n\n')
target.write('namespace mi {\n')
target.write('namespace ' + namespace + ' {\n\n')
# write all blocks
for block in blocks:
text = "\n".join(block.text) + "\n"
l = len(text)
target.write("extern unsigned char const " + block.name + "[%u];\n" % (l))
target.write('\n} // mi\n} // ' + namespace + '\n')
target.write('#endif // ' + guard + '\n')
target.close()
def main():
try:
mi_src = os.environ["MI_SRC"];
except:
mi_src = "../../.."
default_dst = os.path.normpath(mi_src + "/mdl/compiler/compilercore")
if no_optparse:
class Options:
def __init__(self):
self.dst_path = default_dst
self.silent = False
self.namespace = "mdl"
options = Options()
args = []
#print sys.argv
state = "default"
for arg in sys.argv[1:]:
if state == "args":
args.append(arg)
state = "args" # keep looking for 'em
elif state == "expect_dst":
options.dst_path = arg
state = "default"
elif state == "expect_namespace":
options.namespace = arg
state = "default"
elif state == "expect_module":
options.module = arg
state = "default"
elif state == "expect_prefix":
options.prefix = arg
state = "default"
elif state == "expect_pch":
options.pch = arg
state = "default"
elif state == "expect_key":
options.encode = True
options.key = arg
state = "default"
elif state == "default":
if arg == "-d": state = "expect_dst"
elif arg == "-n": state = "expect_namespace"
elif arg == "-s": options.silent = True
elif arg == "-e": options.do_escape = True
elif arg == "--escape": options.do_escape = True
elif arg == "-m": state = "expect_module"
elif arg == "-p": state = "expect_prefix"
elif arg == "--prefix": state = "expect_prefix"
elif arg == "--pch": state = "expect_pch"
elif arg == "-E": state = "expect_key"
elif arg == "--encode": state = "expect_key"
else:
args.append(arg)
state = "args"
else:
parser = OptionParser()
parser.add_option("-d", "--dst",
help="dst C++ file to generate",
dest="dst_path")
parser.add_option("-s", "--silent",
help="suppress messages",
action="store_true", dest="silent")
parser.add_option("-e", "--escape",
help="escape quotes and backslashes found in files",
action="store_true", dest="do_escape")
parser.add_option("-n", "--namespace",
help="namespace to use for generated cpp files",
dest="namespace",
default="mdl")
parser.add_option("-m", "--module",
help="module name to put into source files",
dest="module",
default="mdl/compiler/compilercore")
parser.add_option("-p", "--prefix",
help="prefix given value to const char * declarations",
dest="prefix",
default="mdl_module")
parser.add_option("--pch",
help="precompiled header to use",
dest="pch",
default='"pch.h"')
parser.add_option("-E", "--encode",
help="encode string using simple XOR chiffre",
dest="key",
default=None)
parser.add_option("-c", "--check",
help="enforce module check",
dest="checker",
default=None)
(options, args) = parser.parse_args()
if len(args) == 0:
print("Must supply at least one mdl file as input")
sys.exit(1)
if not options.silent:
print("Creating '%s' from '%s'" % (options.dst_path, ' '.join(args)))
generate_cpp_file(args,
options.dst_path,
options.pch,
options.prefix,
options.module,
options.namespace,
options.do_escape,
options.silent,
options.key,
options.checker)
if __name__ == "__main__":
main()
| MDL-SDK-master | src/mdl/compiler/compilercore/generate_stdmodule.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools as it
import os
import re
from setuptools import setup
from subprocess import call
import sys
match_mxnet_req = re.compile(r"mxnet>?=?=\d+.\d+\d*")
extract_major_minor = re.compile(r"\D*(\d+.\d+)\D*")
def check_mxnet_version(min_ver):
if not int(os.environ.get('UPDATE_MXNET_FOR_ONNX_EXPORTER', '1')):
print("Env var set to not upgrade MxNet for ONNX exporter. Skipping.")
return False
try:
print("Checking if MxNet is installed.")
import mxnet as mx
except ImportError:
print("MxNet is not installed. Installing version from requirements.txt")
return False
ver = float(re.match(extract_major_minor, mx.__version__).group(1))
min_ver = float(re.match(extract_major_minor, min_ver).group(1))
if ver < min_ver:
print("MxNet is installed, but installed version (%s) is older than expected (%s). Upgrading." % (str(ver).rstrip('0'), str(min_ver).rstrip('0')))
return False
print("Installed MxNet version (%s) meets the requirement of >= (%s). No need to install." % (str(ver).rstrip('0'), str(min_ver).rstrip('0')))
return True
if __name__ == '__main__':
with open('requirements.txt') as f:
required = f.read().splitlines()
mx_match_str = lambda x: re.match(match_mxnet_req, x) is None
mx_str, new_reqs = tuple([list(i[1]) for i in it.groupby(sorted(required, key = mx_match_str), key = mx_match_str)])
if not check_mxnet_version(mx_str[0]):
new_reqs += mx_str
setup(
install_requires = new_reqs,
name = 'mx2onnx',
description = 'MxNet to ONNX converter',
author = 'NVIDIA Corporation',
packages = ['mx2onnx_converter'],
classifiers = [
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5'
],
keywords = 'mxnet onnx',
zip_safe = False,
test_suite='nose.collector',
tests_require=['nose'],
version = '0.1'
)
call("rm -rf dist".split())
call("rm -rf build".split())
call("rm -rf mx2onnx.egg-info".split())
| mxnet_to_onnx-master | setup.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import subprocess
from unittest import TestCase
import mxnet as mx
import numpy as np
# needed by both the exporter and importer
import onnx
# MxNet exporter
from mx2onnx_converter.conversion_helpers import from_mxnet
# MxNet importer
# Needed for ONNX -> NNVM -> MxNet conversion
# to validate the results of the export
#import onnx_mxnet
from mxnet.contrib.onnx import import_model
def check_gpu_id(gpu_id):
try:
result = subprocess.check_output("nvidia-smi --query-gpu=gpu_bus_id --format=csv,noheader", shell=True).strip()
except OSError as e:
return False
if not isinstance(result, str):
result = str(result.decode("ascii"))
gpu_ct = len(result.split("\n"))
# count is zero-based
exists = gpu_id < gpu_ct
print("\nChecked for GPU ID %d. Less than GPU count (%d)? %s\n" % (gpu_id, gpu_ct, exists))
return exists
# MxNet LeNet-5 implementation
def lenet5():
data = mx.sym.var('data')
# first conv layer
conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.sym.Activation(data=conv1, act_type="tanh")
pool1 = mx.sym.Pooling(data=tanh1, pool_type="max", kernel=(2,2), stride=(2,2))
# second conv layer
conv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.sym.Activation(data=conv2, act_type="tanh")
pool2 = mx.sym.Pooling(data=tanh2, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc layer
flatten = mx.sym.flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.sym.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=10)
# softmax loss
lenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')
return lenet
# train LeNet-5 model on MNIST data
def train_lenet5(num_epochs, gpu_id, train_iter, val_iter, test_iter, batch_size):
ctx = mx.gpu(gpu_id) if gpu_id is not None else mx.cpu()
print("\nUsing %s to train" % str(ctx))
lenet_model = lenet5()
lenet_model = mx.mod.Module(lenet_model, context=ctx)
# This is cached so download will only take place if needed
mnist = mx.test_utils.get_mnist()
train_iter = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)
data = mx.sym.var('data')
data = mx.sym.flatten(data=data)
lenet_model.fit(train_iter,
eval_data=val_iter,
optimizer='sgd',
optimizer_params={'learning_rate': 0.1, 'momentum': 0.9},
eval_metric='acc',
batch_end_callback = mx.callback.Speedometer(batch_size, 100),
num_epoch=num_epochs)
test_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)
# predict accuracy for lenet
acc = mx.metric.Accuracy()
lenet_model.score(test_iter, acc)
accuracy = acc.get()[1]
print("Training accuracy: %.2f" % accuracy)
assert accuracy > 0.98, "Accuracy was too low"
return lenet_model
class LeNet5Test(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
# self.tearDown = lambda: subprocess.call("rm -f *.gz *-symbol.json *.params *.onnx", shell=True)
def test_convert_and_compare_prediction(self):
# get data iterators and set basic hyperparams
num_epochs = 10
mnist = mx.test_utils.get_mnist()
batch_size = 1000
train_iter = mx.io.NDArrayIter(mnist['train_data'], mnist['train_label'], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)
test_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)
model_name = 'lenet5'
model_file = '%s-symbol.json' % model_name
params_file = '%s-%04d.params' % (model_name, num_epochs)
onnx_file = "%s.onnx" % model_name
test_gpu_id = 0
gpu_id = check_gpu_id(test_gpu_id)
if not gpu_id:
print("\nWARNING: GPU id %d is invalid on this machine" % test_gpu_id)
gpu_id = None
# If trained model exists, re-use cached version. Otherwise, train model.
if not (os.path.exists(model_file) and os.path.exists(params_file)):
print("\n\nTraining LeNet-5 on MNIST data")
trained_lenet = train_lenet5(num_epochs, gpu_id, train_iter, val_iter, test_iter, batch_size)
print("Training finished. Saving model")
trained_lenet.save_checkpoint(model_name, num_epochs)
# delete object so we can verify correct loading of the checkpoint from disk
del trained_lenet
else:
print("\n\nTrained model exists. Skipping training.")
# Load serialized MxNet model (model-symbol.json + model-epoch.params)
trained_lenet = mx.mod.Module.load(model_name, num_epochs)
trained_lenet.bind(data_shapes=test_iter.provide_data, label_shapes=None, for_training=False, force_rebind=True)
# Run inference in MxNet from json/params serialized model
test_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'], batch_size)
pred_softmax = trained_lenet.predict(test_iter).asnumpy()
pred_classes = np.argmax(pred_softmax, axis=1)
# Create and save ONNX model
print("\nConverting trained MxNet model to ONNX")
model = from_mxnet(model_file, params_file, [1, 1, 28, 28], np.float32, log=True)
with open(onnx_file, "wb") as f:
serialized = model.SerializeToString()
f.write(serialized)
print("\nONNX file %s serialized to disk" % onnx_file)
print("\nLoading ONNX file and comparing results to original MxNet output.")
# ONNX load and inference step
onnx_sym, onnx_arg_params, onnx_aux_params = import_model(onnx_file)
onnx_mod = mx.mod.Module(symbol=onnx_sym, data_names=['data'], context=mx.cpu(), label_names=None)
# Need to rename data argument from 'data' to 'input_0' because that's how
# the MxNet ONNX importer expects it by default
test_iter = mx.io.NDArrayIter(data={'data': mnist['test_data']}, label=None, batch_size=batch_size)
onnx_mod.bind(data_shapes=test_iter.provide_data, label_shapes=None, for_training=False, force_rebind=True)
onnx_mod.set_params(arg_params=onnx_arg_params, aux_params=onnx_aux_params, allow_missing=True)
onnx_pred_softmax = onnx_mod.predict(test_iter).asnumpy()
onnx_pred_classes = np.argmax(pred_softmax, axis=1)
pred_matches = onnx_pred_classes == pred_classes
pred_match_ct = pred_matches.sum()
pred_total_ct = np.size(pred_matches)
pct_match = 100.0 * pred_match_ct / pred_total_ct
print("\nOriginal MxNet predictions and ONNX-based predictions after export and re-import:")
print("Total examples tested: %d" % pred_total_ct)
print("Matches: %d" % pred_match_ct)
print("Percent match: %.2f\n" % pct_match)
assert pred_match_ct == pred_total_ct, "Not all predictions from the ONNX representation match"
| mxnet_to_onnx-master | tests/test_convert_lenet5.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
mx_to_uff_converter_functions.py
Conversion Functions for common layers.
Add new functions here with a decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import defs, checker, helper, numpy_helper, mapping
from .mx2onnx_converter import MxNetToONNXConverter as mx2onnx
import numpy as np
import re
import sys
def looks_like_weight(name):
"""Internal helper to figure out if node should be hidden with `hide_weights`.
"""
if name.endswith("_weight"):
return True
if name.endswith("_bias"):
return True
if name.endswith("_beta") or name.endswith("_gamma") or name.endswith("_moving_var") or name.endswith("_moving_mean"):
return True
return False
@mx2onnx.register("null")
def convert_weights_and_inputs(node, **kwargs):
name = node["name"]
if looks_like_weight(name):
weights = kwargs["weights"]
initializer = kwargs["initializer"]
weights = kwargs["weights"]
np_arr = weights[name]
data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
dims = np.shape(np_arr)
tensor_node = helper.make_tensor_value_info(name, data_type, dims)
initializer.append(
helper.make_tensor(
name=name,
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
)
)
return tensor_node
else:
tval_node = helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
return tval_node
@mx2onnx.register("Deconvolution")
def convert_deconvolution(node, **kwargs):
name = node["name"]
inputs = node["inputs"]
num_inputs = len(inputs)
proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[inputs[0][0]].name
weights_node = proc_nodes[inputs[1][0]].name
if num_inputs > 2:
bias_node = proc_nodes[inputs[2][0]].name
attrs = node.get("attrs")
tuple_re = re.compile('\([0-9|,| ]+\)')
def parse_helper(attrs_name, alt_value=None):
if attrs is None:
return alt_value
attrs_str = attrs.get(attrs_name)
if attrs_str is None:
return alt_value
attrs_match = tuple_re.search(attrs_str)
if attrs_match is not None:
if attrs_match.span() == (0, len(attrs_str)):
dims = eval(attrs_str)
return dims
else:
raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str)))
return alt_value
num_filter = int(attrs["num_filter"])
kernel_dims = list(parse_helper("kernel"))
stride_dims = list(parse_helper("stride", [1, 1]))
pad_dims = parse_padding(attrs)
num_group = int(attrs.get("num_group", 1))
# Not sure why this is included, it seems to change what the graphs is doing.
# TODO(kellens): Ask Marek if this is requried.
# if len(pad_dims) < 2 * len(kernel_dims):
# pad_dims = [0] * (2 * len(kernel_dims) - len(pad_dims)) + pad_dims
input_nodes = [input_node, weights_node]
if num_inputs > 2:
input_nodes.append(bias_node)
deconv_node = helper.make_node(
"ConvTranspose",
inputs=input_nodes,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
pads=pad_dims,
group=num_group,
name=name
)
return deconv_node
@mx2onnx.register("Convolution")
def convert_convolution(node, **kwargs):
name = node["name"]
inputs = node["inputs"]
num_inputs = len(inputs)
proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[inputs[0][0]].name
weights_node = proc_nodes[inputs[1][0]].name
if num_inputs > 2:
bias_node = proc_nodes[inputs[2][0]].name
attrs = node.get("attrs")
tuple_re = re.compile('\([0-9|,| ]+\)')
def parse_helper(attrs_name, alt_value=None):
if attrs is None:
return alt_value
attrs_str = attrs.get(attrs_name)
if attrs_str is None:
return alt_value
attrs_match = tuple_re.search(attrs_str)
if attrs_match is not None:
if attrs_match.span() == (0, len(attrs_str)):
dims = eval(attrs_str)
return dims
else:
raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str)))
return alt_value
num_filter = int(attrs["num_filter"])
kernel_dims = list(parse_helper("kernel"))
stride_dims = list(parse_helper("stride", [1, 1]))
pad_dims = parse_padding(attrs)
num_group = int(attrs.get("num_group", 1))
# Not sure why this is included, it seems to change what the graphs is doing.
# TODO(kellens): Ask Marek if this is requried.
# if len(pad_dims) < 2 * len(kernel_dims):
# pad_dims = [0] * (2 * len(kernel_dims) - len(pad_dims)) + pad_dims
input_nodes = [input_node, weights_node]
if num_inputs > 2:
input_nodes.append(bias_node)
conv_node = helper.make_node(
"Conv",
inputs=input_nodes,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
pads=pad_dims,
group=num_group,
name=name,
)
return conv_node
@mx2onnx.register("FullyConnected")
def convert_fully_connected(node, **kwargs):
name = node["name"]
inputs = node["inputs"]
input_node_id = inputs[0][0]
weight_node_id = inputs[1][0]
bias_node_id = inputs[2][0]
proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[input_node_id]
weights_node = proc_nodes[weight_node_id]
bias_node = proc_nodes[bias_node_id]
input_name = input_node.name
weights_name = weights_node.name
bias_name = bias_node.name
node = helper.make_node(
"Gemm",
[input_name, weights_name, bias_name], # input (A, B, C) - C can be in place
[name], # output
alpha=1.0,
beta=1.0,
transA=False,
transB=True,
name=name
)
return node
@mx2onnx.register("BatchNorm")
def convert_batchnorm(node, **kwargs):
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
inputs = node["inputs"]
attrs = node["attrs"]
# Default momentum is 0.9
try:
momentum = float(attrs["momentum"])
except:
momentum = 0.9
# Default eps is 0.001
try:
eps = float(attrs["eps"])
except:
eps = 0.001
data_idx = inputs[0][0]
gamma_idx = inputs[1][0]
beta_idx = inputs[2][0]
moving_mean_idx = inputs[3][0]
moving_var_idx = inputs[4][0]
data_node = proc_nodes[data_idx].name
gamma_node = proc_nodes[gamma_idx].name
beta_node = proc_nodes[beta_idx].name
mov_mean_node = proc_nodes[moving_mean_idx]
mov_mean_node = mov_mean_node.name
mov_var_node = proc_nodes[moving_var_idx].name
bn_node = helper.make_node(
"BatchNormalization",
[data_node,
gamma_node, # scale
beta_node, # bias
mov_mean_node,
mov_var_node
],
[name],
name=name,
epsilon=eps,
momentum=momentum,
is_test=1,
spatial=1,
consumed_inputs=(0, 0, 0, 1, 1)
)
return bn_node
@mx2onnx.register("Activation")
def convert_activation(node, **kwargs):
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
attrs = node["attrs"]
act_type = attrs["act_type"]
inputs = node["inputs"]
input_node_idx = inputs[0][0]
input_node = proc_nodes[input_node_idx].output[0]
# Creating a dictionary here, but if this titlecase pattern
# is consistent for other activations, this can be changed to
# mxnet_name.title()
act_types = {
"tanh": "Tanh",
"relu": "Relu",
"sigmoid": "Sigmoid",
"softrelu": "Softplus",
"softsign": "Softsign"
}
act_name = act_types.get(act_type)
if act_name:
node = helper.make_node(
act_name,
[input_node],
[name],
name=name
)
else:
raise AttributeError(
"Activation %s not implemented or recognized in the converter" % act_type
)
return node
def parse_padding(attrs):
tuple_re = re.compile('\([0-9|,| ]+\)')
def parse_helper(attrs_name, alt_value=None):
if attrs is None:
return alt_value
attrs_str = attrs.get(attrs_name)
if attrs_str is None:
return alt_value
attrs_match = tuple_re.search(attrs_str)
if attrs_match is not None:
if attrs_match.span() == (0, len(attrs_str)):
dims = eval(attrs_str)
return dims
else:
raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str)))
return alt_value
symetric_pads = list(parse_helper("pad", [0, 0]))
result = []
# Each padding in MXNet is assumed to be symetric in dim1, dim2 ...
# In ONNX we need to have a start_dim1, start_dim2, ..., end_dim1, end_dim2
for pad in symetric_pads:
result.append(pad)
for pad in symetric_pads:
result.append(pad)
return result
@mx2onnx.register("Pooling")
def convert_pooling(node, **kwargs):
proc_nodes = kwargs["proc_nodes"]
attrs = node["attrs"]
kernel = eval(attrs["kernel"])
pool_type = attrs["pool_type"]
# Default stride in MXNet for pooling is (1,1)
stride = eval(attrs["stride"]) if attrs.get("stride") else (1, 1)
# Global pooling is set explicitly with an attr on the op.
global_pool = eval(attrs["global"]) if attrs.get("global") else None
node_inputs = node["inputs"]
input_node_idx = node_inputs[0][0]
input_node = proc_nodes[input_node_idx]
name = node["name"]
pad_dims = parse_padding(attrs)
pool_types = {"max": "MaxPool", "avg": "AveragePool"}
global_pool_types = {"max": "GlobalMaxPool", "avg": "GlobalAveragePool"}
if global_pool:
node = helper.make_node(
global_pool_types[pool_type],
[input_node.output[0]],
[name],
name=name,
pads=pad_dims
)
else:
node = helper.make_node(
pool_types[pool_type],
[input_node.output[0]], # input
[name],
# dilations = [0, 0],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
return node
@mx2onnx.register("exp")
def convert_exp(node, **kwargs):
raise NotImplementedError
# There's also mx.sym.softmax(), which doesn't do cross-entropy loss,
# just softmax for inference - hence the name convert_softmax_output.
@mx2onnx.register("SoftmaxOutput")
def convert_softmax_output(node, **kwargs):
# print("\nIn convert_softmax_output")
inputs = node["inputs"]
input1_idx = inputs[0][0]
proc_nodes = kwargs["proc_nodes"]
input1 = proc_nodes[input1_idx]
name = node["name"]
softmax_node = helper.make_node(
"Softmax",
[input1.output[0]],
[name],
axis=1,
name=name
)
return softmax_node
@mx2onnx.register("Crop")
def convert_concat(node, **kwargs):
name = node["name"]
inputs = node["inputs"]
proc_nodes = kwargs["proc_nodes"]
input_names = [proc_nodes[i[0]].name for i in inputs]
attrs = node["attrs"]
border = [0, 0, 0, 0]
offset = list(eval(attrs['offset']))
if len(inputs) == 2:
border = inputs[1]
axis = int(node.get("attrs", {}).get("axis", 1))
concat_node = helper.make_node(
"Crop",
input_names,
[name],
border=border,
scale=offset,
name=name
)
return concat_node
@mx2onnx.register("Concat")
def convert_concat(node, **kwargs):
name = node["name"]
inputs = node["inputs"]
proc_nodes = kwargs["proc_nodes"]
input_names = [proc_nodes[i[0]].name for i in inputs]
axis = int(node.get("attrs", {}).get("axis", 1))
concat_node = helper.make_node(
"Concat",
input_names,
[name],
axis = axis,
name = name
)
return concat_node
@mx2onnx.register("Dropout")
def convert_dropout(node, **kwargs):
name = node["name"]
input_id = node["inputs"][0][0]
input_name = kwargs["proc_nodes"][input_id].name
attrs = node["attrs"]
p = float(attrs["p"])
dropout_node = helper.make_node(
"Dropout",
[input_name],
[name],
ratio = p,
is_test = 0,
name = name
)
return dropout_node
@mx2onnx.register("Flatten")
def convert_flatten(node, **kwargs):
name = node["name"]
input_idx = node["inputs"][0][0]
proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[input_idx].name #.output[0]
flatten_node = helper.make_node(
"Flatten",
[input_node],
[name],
name = name,
axis = 1
)
return flatten_node
@mx2onnx.register("_mul_scalar")
def convert_mul_scalar(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("elemwise_add")
def convert_elementwise_add(node, **kwargs):
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
inputs = node["inputs"]
weights = kwargs["weights"]
a = inputs[0][0]
b = inputs[1][0]
a_node = proc_nodes[a].name
b_node = proc_nodes[b].name
add_node = helper.make_node(
"Add",
[a_node, b_node],
[name],
name = name,
)
return add_node
@mx2onnx.register("_sub")
def convert_elementwise_sub(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("abs")
def convert_abs(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("_mul")
def convert_mul(node, proc_nodes):
raise NotImplementedError
@mx2onnx.register("_div")
def convert_div(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("log")
def convert_log(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("max")
def convert_max(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("_maximum")
def convert_maximum(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("min")
def convert_min(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("_minimum")
def convert_minimum(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("_power")
def convert_power(node, **kwargs):
raise NotImplementedError
@mx2onnx.register("sqrt")
def convert_sqrt(node, **kwargs):
raise NotImplementedError
| mxnet_to_onnx-master | mx2onnx_converter/mx2onnx_converter_functions.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import mx2onnx_converter.conversion_helpers
import mx2onnx_converter.mx2onnx_converter
import mx2onnx_converter.mx2onnx_converter_functions
| mxnet_to_onnx-master | mx2onnx_converter/__init__.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import sys
from onnx import (defs, checker, helper, numpy_helper, mapping, onnx_pb,
ModelProto, GraphProto, NodeProto, AttributeProto, TensorProto)
from onnx.helper import make_tensor, make_tensor_value_info
class MxNetToONNXConverter:
registry_ = {}
input_output_maps_ = {}
def __init__(self):
# topologically sorted nodes
self.nodes = []
self.input_tensors = []
self.output_tensors = []
@staticmethod
def register(op_name):
def wrapper(func):
MxNetToONNXConverter.registry_[op_name] = func
return func
return wrapper
@staticmethod
def convert_layer(node, **kwargs):
op = str(node["op"])
if op not in MxNetToONNXConverter.registry_:
raise AttributeError("No conversion function registered for op type %s yet." % op)
convert_fun = MxNetToONNXConverter.registry_[op]
return convert_fun(node, **kwargs)
# Add transpose?
@staticmethod
def convert_weights_to_numpy(weights_dict):
return dict([(k.replace("arg:", "").replace("aux:", ""), v.asnumpy()) for k, v in weights_dict.items()])
def convert_mx2onnx_graph(self, mx_graph, mx_weights, in_shape, in_type, log=False):
print("\nconverting weights from MxNet NDArrays to NumPy arrays.\n")
weights = MxNetToONNXConverter.convert_weights_to_numpy(mx_weights)
onnx_graph = GraphProto()
initializer = []
all_processed_nodes = []
onnx_processed_nodes = []
onnx_processed_inputs = []
onnx_processed_outputs = []
for idx, node in enumerate(mx_graph):
op = node["op"]
name = node["name"]
if log:
print("Converting idx: %d, op: %s, name: %s" % (idx, op, name))
converted = MxNetToONNXConverter.convert_layer(
node,
mx_graph = mx_graph,
weights = weights,
in_shape = in_shape,
in_type = in_type,
proc_nodes = all_processed_nodes,
initializer = initializer
)
if isinstance(converted, onnx_pb.ValueInfoProto):
if idx < (len(mx_graph) - 1):
onnx_processed_inputs.append(converted)
else:
onnx_processed_outputs.append(converted)
elif isinstance(converted, onnx_pb.NodeProto):
if idx < (len(mx_graph) - 1):
onnx_processed_nodes.append(converted)
else:
onnx_processed_nodes.append(converted)
onnx_processed_outputs.append(
make_tensor_value_info(
name=converted.name,
elem_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')],
shape=(in_shape[0], -1)
)
)
if log:
print("Output node is: %s" % converted.name)
elif isinstance(converted, onnx_pb.TensorProto):
raise ValueError("Did not expect TensorProto")
if idx < (len(mx_graph) - 1):
onnx_processed_inputs.append(converted)
else:
onnx_processed_outputs.append(converted)
else:
print(converted)
raise ValueError("node is of an unrecognized type: %s" % type(node))
all_processed_nodes.append(converted)
graph = helper.make_graph(
onnx_processed_nodes,
"main",
onnx_processed_inputs,
onnx_processed_outputs
)
graph.initializer.extend(initializer)
checker.check_graph(graph)
return graph
| mxnet_to_onnx-master | mx2onnx_converter/mx2onnx_converter.py |
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import defs, checker, helper, numpy_helper, mapping
from .mx2onnx_converter import MxNetToONNXConverter
import json
import mxnet as mx
import numpy as np
def from_mxnet(model_file, weight_file, input_shape, input_type, log=False):
mx_weights = mx.ndarray.load(weight_file)
with open(model_file, 'r') as f:
graph = json.loads(f.read())["nodes"]
converter = MxNetToONNXConverter()
onnx_graph = converter.convert_mx2onnx_graph(graph, mx_weights, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(input_type)], log=log)
onnx_model = helper.make_model(onnx_graph)
return onnx_model
| mxnet_to_onnx-master | mx2onnx_converter/conversion_helpers.py |
from conans import ConanFile, CMake, tools
import requests
import re
from os import path
class P2300Recipe(ConanFile):
name = "P2300"
description = "std::execution"
author = "Michał Dominiak, Lewis Baker, Lee Howes, Kirk Shoop, Michael Garland, Eric Niebler, Bryce Adelstein Lelbach"
topics = ("WG21", "concurrency")
homepage = "https://github.com/NVIDIA/stdexec"
url = "https://github.com/NVIDIA/stdexec"
license = "Apache 2.0"
settings = "compiler" # Header only - compiler only used for flags
tool_requires = "catch2/2.13.6"
exports_sources = "include/*"
generators = "cmake_find_package"
def validate(self):
tools.check_min_cppstd(self,"20")
def set_version(self):
# Get the version from the spec file
response = requests.get("https://raw.githubusercontent.com/brycelelbach/wg21_p2300_execution/main/execution.bs")
rev = re.search(r"Revision: (\d+)", response.text).group(1).strip()
self.version = f"0.{rev}.0"
def package(self):
self.copy("*.hpp")
def package_info(self):
# Make sure to add the correct flags for gcc
if self.settings.compiler == "gcc":
self.cpp_info.cxxflags = ["-fcoroutines", "-Wno-non-template-friend"]
| stdexec-main | conanfile.py |
import os
from conans import ConanFile, CMake, tools
class P2300TestConan(ConanFile):
settings = "compiler"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
os.chdir("bin")
self.run(".{}test_p2300".format(os.sep))
| stdexec-main | test_package/conanfile.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import this before torch to avoid GLIBC error
import xarray
import os
import pytest
from earth2mip import config
import torch
import os
import torch
def get_gpu_count():
return torch.cuda.device_count()
@pytest.fixture()
def has_registry():
if not config.MODEL_REGISTRY:
pytest.skip("MODEL_REGISTRY not configured.")
@pytest.fixture()
def dist():
from modulus.distributed.manager import DistributedManager
DistributedManager.initialize()
return DistributedManager()
@pytest.fixture()
def ngpu():
return get_gpu_count()
| earth2mip-main | conftest.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !/usr/bin/env python
from setuptools import setup
setup()
| earth2mip-main | setup.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from concurrent.futures import ThreadPoolExecutor
import random
def test_thread_pool_always_returns_same_order():
pool = ThreadPoolExecutor(4)
def func(x):
# ensure that threads all finish at different times
time.sleep(random.uniform(0, 0.01))
return x
items = list(range(10))
for i in range(4):
assert list(pool.map(func, items)) == items
| earth2mip-main | tests/test_thread_pool.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import registry
def test_list_models(has_registry):
ans = registry.list_models()
assert ans
assert "/" not in ans[0], ans[0]
| earth2mip-main | tests/test_registry.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import netcdf
import numpy as np
import netCDF4 as nc
from earth2mip import schema, weather_events
import torch
import pytest
import pathlib
@pytest.mark.parametrize("cls", ["raw"])
def test_diagnostic(cls: str, tmp_path: pathlib.Path):
domain = weather_events.Window(
name="Test",
lat_min=-15,
lat_max=15,
diagnostics=[
weather_events.Diagnostic(type=cls, function="", channels=["tcwv"])
],
)
lat = np.array([-20, 0, 20])
lon = np.array([0, 1, 2])
n_ensemble = 2
path = tmp_path / "a.nc"
weather_event = weather_events.read("EastCoast")
with nc.Dataset(path.as_posix(), "w") as ncfile:
total_diagnostics = netcdf.initialize_netcdf(
ncfile,
[domain],
schema.Grid.grid_720x1440,
lat,
lon,
n_ensemble,
torch.device(type="cpu"),
)[0]
for diagnostic in total_diagnostics:
print(ncfile)
print(ncfile["Test"])
nlat = len(ncfile["Test"]["lat"][:])
nlon = len(ncfile["Test"]["lon"][:])
data = torch.randn((n_ensemble, 1, nlat, nlon))
time_index = 0
batch_id = 0
batch_size = n_ensemble
diagnostic.update(data, time_index, batch_id, batch_size)
# TODO Fix input data issues with crps, skill
if not (cls in ["crps", "skill"]):
diagnostic.finalize(
np.array([time_index]), weather_event, schema.ChannelSet.var34
)
if cls == "skill":
assert "tcwv" in ncfile["Test"]["skill"].variables
elif cls == "raw":
assert "tcwv" in ncfile["Test"].variables
else:
assert "tcwv" in ncfile["Test"][cls].variables
| earth2mip-main | tests/test_diagnostics.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import forecasts
import xarray
import numpy as np
import datetime
import torch
import pytest
class MockTimeLoop:
in_channel_names = ["b", "a"]
out_channel_names = ["b", "a"]
time_step = datetime.timedelta(hours=6)
def __call__(self, time, x):
assert torch.all(x == torch.tensor([1, 0], device=x.device))
while True:
yield time, x, None
time += self.time_step
async def getarr():
arr = np.arange(3)
coords = {}
coords["channel"] = ["a", "b", "c"]
return xarray.DataArray(arr, dims=["channel"], coords=coords)
async def test_TimeLoopForecast():
if not torch.cuda.is_available():
pytest.skip("No Cuda")
times = [
datetime.datetime(1, 1, 1) + datetime.timedelta(hours=12) * k for k in range(3)
]
mock_obs = [getarr() for t in times]
forecast = forecasts.TimeLoopForecast(MockTimeLoop(), times, mock_obs)
iter = forecast[0]
k = 0
async for state in iter:
k += 1
if k >= 4:
break
| earth2mip-main | tests/test_forecasts.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xarray as xr
from earth2mip.datasets import zarr_directory, hindcast
import numpy as np
import os
import pytest
def test_zarr_directory():
# Generate data filled with ones
data_a = np.ones((3, 4))
data_b = np.arange(4)
# Create xarray dataset
ds = xr.Dataset(
{
"a": (["x", "y"], data_a),
"b": (["y"], data_b),
},
coords={"x": np.arange(3), "y": np.arange(4)},
)
store = {}
ds.to_zarr(store)
nested_store = {}
directories = [str(i) for i in range(10)]
for i in directories:
for key in store:
# TODO remove the hardcode
nested_store[f"{i}/mean.zarr/" + key] = store[key]
obj = zarr_directory.NestedDirectoryStore(
nested_store,
directories=directories,
concat_dim="dim",
group="mean.zarr",
static_coords=("x", "y"),
dim_rename=None,
)
loaded = xr.open_zarr(obj)
for val in directories:
iobj = loaded.sel(dim=val).load()
for v in iobj:
xr.testing.assert_equal(iobj.variables[v], ds.variables[v])
def test_open_forecast():
root = "/lustre/fsw/sw_climate_fno/nbrenowitz/scoring_tools/sfno_coszen/deterministic-medium" # noqa
if not os.path.isdir(root):
pytest.skip()
ds = hindcast.open_forecast(root, group="mean.zarr")
print(ds)
ds.z500
| earth2mip-main | tests/test_datasets.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.model_registry import Package
def test_package(tmp_path):
string = "hello"
afile = tmp_path / "a.txt"
afile.write_text(string)
path = "file://" + tmp_path.as_posix()
package = Package(path, seperator="/")
path = package.get("a.txt")
with open(path) as f:
ans = f.read()
assert ans == string
| earth2mip-main | tests/test_model_registry.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xarray
import numpy as np
from earth2mip.xarray.utils import to_cupy, concat_dict
try:
import cupy
except ImportError:
cupy = None
import pytest
@pytest.fixture()
def has_gpu():
try:
cupy.cuda.runtime.getDevice()
except Exception:
pytest.skip("No GPU.")
@pytest.mark.parametrize("chunked", [True, False])
def test_to_cupy_weighted(has_gpu, chunked):
nlat, nlon = 10, 20
lat = xarray.Variable(["lat"], np.linspace(90, -90, nlat))
arr = xarray.DataArray(
np.ones([nlat, nlon]), dims=["lat", "lon"], coords={"lat": lat}
)
arr_cp = to_cupy(arr)
if chunked:
arr_cp = arr_cp.chunk({"lat": 2})
arr_cp.load().data.device
def test_concat_dict():
# Create sample input dictionary of xarray objects
data1 = xarray.DataArray([1, 2, 3], dims="dim1")
data2 = xarray.DataArray([4, 5, 6], dims="dim1")
data3 = xarray.DataArray([7, 8, 9], dims="dim1")
input_dict = {
("coord1_val1",): data1,
("coord1_val2",): data2,
("coord1_val3",): data3,
}
# Call the function
result = concat_dict(input_dict, key_names=("coord1",), concat_dim="key")
coord = xarray.Variable(["key"], [k[0] for k in input_dict])
expected_values = xarray.DataArray(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dims=("key", "dim1"),
coords={"coord1": coord},
)
xarray.testing.assert_equal(result, expected_values)
| earth2mip-main | tests/test_xarray_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.time import convert_to_datetime
import numpy as np
import datetime
def test_convert_to_datetime():
time = np.datetime64("2021-01-01T00:00:00")
expected = datetime.datetime(2021, 1, 1)
assert convert_to_datetime(time) == expected
| earth2mip-main | tests/test_time.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pathlib import Path
from modulus.utils import filesystem
@pytest.fixture
def pyfile_name():
return "test_filesystem.py"
def test_modulus_filesystem_local(pyfile_name):
# Check if this test file is seen in a Fsspec local file system
file_path = Path(__file__).parent.resolve()
fssystem = filesystem._get_fs("file")
assert pyfile_name in [Path(file).name for file in fssystem.ls(file_path)]
| earth2mip-main | tests/test_modulus_filesystem.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| earth2mip-main | tests/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import torch
import xarray
import hashlib
import numpy as np
from earth2mip.networks import persistence
from earth2mip import (
schema,
weather_events,
inference_ensemble,
score_ensemble_outputs,
inference_medium_range,
)
import pytest
def checksum_reduce_precision(arr, digits=3):
most_significant = max(arr.max(), -arr.min())
least = most_significant / 10**digits
arr = (most_significant / least).astype(np.int32)
checksum = hashlib.md5(arr.data)
return checksum.hexdigest()
class get_data_source:
def __init__(self, inference):
arr = xarray.DataArray(
np.ones([1, len(inference.in_channel_names), 721, 1440]),
dims=["time", "channel", "lat", "lon"],
)
arr["channel"] = inference.in_channel_names
arr["lat"] = np.linspace(90, -90, 721)
arr["lon"] = np.linspace(0, 360, 1440, endpoint=False)
self.arr = arr
self.channel_names = inference.out_channel_names
def __getitem__(self, time):
return self.arr.assign_coords(time=[time])
def test_inference_ensemble(tmp_path):
inference = persistence(package=None)
data_source = get_data_source(inference)
time = datetime.datetime(2018, 1, 1)
config = schema.EnsembleRun(
weather_model="dummy",
simulation_length=40,
output_path=tmp_path.as_posix(),
weather_event=schema.WeatherEvent(
properties=weather_events.WeatherEventProperties(
name="test", start_time=time
),
domains=[
weather_events.Window(
name="globe",
diagnostics=[
weather_events.Diagnostic(
type="raw",
channels=inference.out_channel_names,
)
],
)
],
),
)
inference_ensemble.run_inference(
inference, config, data_source=data_source, progress=True
)
path = tmp_path / "ensemble_out_0.nc"
ds = xarray.open_dataset(path.as_posix(), decode_times=False)
assert ds.time[0].item() == 0
out = tmp_path / "out"
score_ensemble_outputs.main(tmp_path.as_posix(), out.as_posix(), score=False)
def test_checksum_reduce_precision(regtest):
# Test case 1: Basic example
arr1 = np.array([1.23456, 2.34567, 3.45678])
arr2 = np.array([1, 2, 3])
assert checksum_reduce_precision(arr1, digits=1) == checksum_reduce_precision(
arr2, digits=1
)
arr1 = np.array([0.23456, 2.34567, 3.45678])
arr2 = np.array([1, 2, 3])
assert checksum_reduce_precision(arr1, digits=1) != checksum_reduce_precision(
arr2, digits=2
)
print(checksum_reduce_precision(arr1), file=regtest)
def test_inference_medium_range(tmpdir, regtest):
if not torch.cuda.is_available():
pytest.skip("need gpu")
inference = persistence(package=None).cuda()
data_source = get_data_source(inference)
time = datetime.datetime(2018, 1, 1)
dt = datetime.timedelta(hours=6)
times = [time + dt * k for k in range(10)]
mean = np.zeros((len(inference.out_channel_names), 721, 1440))
metrics = inference_medium_range.score_deterministic(
inference, n=5, initial_times=times, time_mean=mean, data_source=data_source
)
metrics["acc"].attrs["checksum"] = checksum_reduce_precision(metrics.acc, digits=3)
metrics["rmse"].attrs["checksum"] = checksum_reduce_precision(
metrics.rmse, digits=3
)
metrics.info(regtest)
| earth2mip-main | tests/test_end_to_end.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.networks import pangu
import datetime
import torch
class MockPangu(pangu.PanguWeather):
def __init__(self):
pass
def __call__(self, pl, sl):
return pl, sl
def test_pangu():
model_6 = pangu.PanguStacked(MockPangu())
model_24 = pangu.PanguStacked(MockPangu())
inference = pangu.PanguInference(model_6, model_24)
t0 = datetime.datetime(2018, 1, 1)
dt = datetime.timedelta(hours=6)
x = torch.ones((1, 1, len(inference.in_channel_names), 721, 1440))
n = 5
times = []
for k, (time, y, _) in enumerate(inference(t0, x)):
if k > n:
break
assert y.shape == x.shape[1:]
assert torch.all(y == x[0])
times.append(time)
assert times == [t0 + k * dt for k in range(n + 1)]
| earth2mip-main | tests/test_pangu.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import netcdf
import numpy as np
import netCDF4 as nc
from earth2mip.schema import Grid
from earth2mip.weather_events import Window, Diagnostic
import torch
def test_initialize_netcdf(tmp_path):
domain = Window(
name="TestAverage",
lat_min=-15,
lat_max=15,
diagnostics=[Diagnostic(type="raw", function="", channels=["tcwv"])],
)
lat = np.array([-20, 0, 20])
lon = np.array([0, 1, 2])
n_ensemble = 1
path = tmp_path / "a.nc"
with nc.Dataset(path.as_posix(), "w") as ncfile:
netcdf.initialize_netcdf(
ncfile,
[domain],
Grid("720x1440"),
lat,
lon,
n_ensemble,
torch.device(type="cpu"),
)
| earth2mip-main | tests/test_io.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
from earth2mip import regrid
from earth2mip import schema
def test_get_regridder():
src = schema.Grid.grid_721x1440
dest = schema.Grid.s2s_challenge
try:
f = regrid.get_regridder(src, dest)
except FileNotFoundError as e:
pytest.skip(f"{e}")
x = torch.ones(1, 1, 721, 1440)
y = f(x)
assert y.shape == (1, 1, 121, 240)
assert torch.allclose(y, torch.ones_like(y))
| earth2mip-main | tests/test_regrid.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import weather_events
import pytest
@pytest.mark.parametrize("event", weather_events.list_())
def test_read(event):
domain = weather_events.read(event)
print(domain)
def test_parse():
obj = {
"properties": {"name": "Globe", "start_time": "2018-01-01 00:00:00"},
"domains": [
{
"name": "global",
"type": "Window",
"diagnostics": [
{
"type": "absolute",
"function": "mean",
"channels": ["tcwv", "t2m", "u10", "v10"],
}
],
}
],
}
weather_events.WeatherEvent.parse_obj(obj)
def test_parse_cwbdomain():
obj = {
"properties": {"name": "Taiwan", "start_time": "2018-10-07 18:00:00"},
"domains": [
{
"name": "Taiwan",
"type": "CWBDomain",
"lat_min": 18,
"lat_max": 30,
"lon_min": 115,
"lon_max": 125,
"diagnostics": [
{
"type": "raw",
"function": "",
"channels": [
"u10",
"v10",
"t2m",
"sp",
"msl",
"t850",
"u1000",
"v1000",
"z1000",
"u850",
"v850",
"z850",
"u500",
"v500",
"z500",
"t500",
"z50",
"r500",
"r850",
"tcwv",
"u100m",
"v100m",
"u250",
"v250",
"z250",
"t250",
],
}
],
}
],
}
weather_events.WeatherEvent.parse_obj(obj)
def test_parse_multipoint():
obj = {
"properties": {"name": "EastCoast", "start_time": "2018-01-01 00:00:00"},
"domains": [
{
"name": "Somewhere",
"type": "MultiPoint",
"lat": [40, 25, 42, 18, 29, 38],
"lon": [286, 280, 289, 294, 265, 283],
"diagnostics": [
{
"type": "raw",
"function": "",
"channels": ["tcwv", "t2m", "u10", "v10"],
}
],
}
],
}
weather_events.WeatherEvent.parse_obj(obj)
| earth2mip-main | tests/test_weather_events.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import schema
import json
def test_model():
obj = schema.Model(
architecture="some_arch",
n_history=0,
channel_set=schema.ChannelSet.var34,
grid=schema.Grid.grid_720x1440,
in_channels=[0, 1],
out_channels=[0, 1],
)
loaded = json.loads(obj.json())
assert loaded["channel_set"] == obj.channel_set.var34.value
| earth2mip-main | tests/test_schema.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import datetime
from earth2mip.ensemble_utils import generate_noise_correlated, generate_bred_vector
from earth2mip import networks
from earth2mip.schema import Grid, ChannelSet
@pytest.mark.slow
def test_generate_noise_correlated():
torch.manual_seed(0)
shape = (2, 34, 32, 64)
noise = generate_noise_correlated(
shape=shape, reddening=2.0, noise_amplitude=0.1, device="cpu"
)
assert tuple(noise.shape) == tuple(shape)
assert torch.mean(noise) < torch.tensor(1e-09).to()
class Dummy(torch.nn.Module):
def forward(self, x, time):
return 2.5 * torch.abs(x) * (1 - torch.abs(x))
def test_bred_vector():
device = "cpu"
model = Dummy().to(device)
initial_time = datetime.datetime(2018, 1, 1)
channels = [0, 1]
center = [0, 0]
scale = [1, 1]
# batch, time_levels, channels, y, x
x = torch.rand([4, 1, 2, 5, 6], device=device)
model = networks.Inference(
model,
center=center,
channels=channels,
scale=scale,
grid=Grid.grid_720x1440,
channel_set=ChannelSet.var34,
).to(device)
noise_amplitude = 0.01
noise = generate_bred_vector(
x,
model,
noise_amplitude=noise_amplitude,
time=initial_time,
integration_steps=20,
inflate=False,
)
assert noise.device == x.device
assert noise.shape == x.shape
assert not torch.any(torch.isnan(noise))
noise = generate_bred_vector(
x,
model,
noise_amplitude=noise_amplitude,
time=initial_time,
integration_steps=20,
inflate=True,
)
assert noise.device == x.device
assert noise.shape == x.shape
assert not torch.any(torch.isnan(noise))
| earth2mip-main | tests/test_ensemble_perturb.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from earth2mip import filesystem
def test_glob(tmp_path: Path):
a = tmp_path / "a.txt"
a.touch()
# use file:// protocol to ensure handling is correct
(f,) = filesystem.glob(f"file://{tmp_path.as_posix()}/*.txt")
assert f == f"file://{a.as_posix()}"
def test_glob_no_scheme(tmp_path: Path):
a = tmp_path / "a.txt"
a.touch()
(f,) = filesystem.glob(f"{tmp_path.as_posix()}/*.txt")
assert f == a.as_posix()
def test__to_url():
assert (
filesystem._to_url("s3", "sw_climate_fno/a.txt") == "s3://sw_climate_fno/a.txt"
)
assert filesystem._to_url("", "sw_climate_fno/a.txt") == "sw_climate_fno/a.txt"
| earth2mip-main | tests/test_filesystem.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.initial_conditions.ifs import _get_filename, get
from earth2mip import schema
import datetime
import pytest
def test__get_filename():
expected = "20230310/00z/0p4-beta/oper/20230310000000-0h-oper-fc.grib2"
time = datetime.datetime(2023, 3, 10, 0)
assert _get_filename(time, "0h") == expected
@pytest.mark.slow
@pytest.mark.xfail
def test_get():
# uses I/O and old ICs are not available forever.
time = datetime.datetime(2023, 3, 10, 0)
ds = get(time, schema.ChannelSet.var34)
print(ds)
| earth2mip-main | tests/test_ifs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.lagged_ensembles.core import yield_lagged_ensembles
import torch
import pytest
import logging
c, lat, lon = 1, 10, 13
class Observations:
def __init__(self, device, nt=20):
self.device = device
self.nt = nt
async def __getitem__(self, i):
"""
Returns (channel, lat, lon)
"""
if i >= len(self):
raise KeyError(i)
return torch.tensor([i], device=self.device)
def __len__(self):
return self.nt
class Forecast:
def __init__(self, device):
self.device = device
async def __getitem__(self, i):
"""persistence forecast
Yields (channel, lat, lon)
"""
x = torch.zeros((2,), device=self.device)
x[0] = i
lead_time = -1
while True:
lead_time += 1
x[1] = lead_time
yield x.clone()
@pytest.fixture(scope="session")
def dist_info():
try:
torch.distributed.init_process_group(init_method="env://", backend="gloo")
except ValueError:
logging.warn("Could not initialize torch distributed with the gloo backend.")
return 0, 1
else:
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
return rank, world_size
@pytest.mark.parametrize("nt", [16, 20])
async def test_yield_lagged_ensembles(dist_info, nt):
rank, world_size = dist_info
device = "cpu"
forecast = Forecast(device)
async for (j, l), ens, o in yield_lagged_ensembles(
observations=Observations(device, nt),
forecast=forecast,
):
i = j - l
# assert this process is responsible for this lagged ensemble
assert i % world_size == rank
assert o == j
for m in ens:
arr = ens[m]
ll = arr[1]
assert ll == l - m
ii = arr[0]
assert ii == i + m
| earth2mip-main | tests/test_lagged_averaged_forecast.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import networks, schema
import torch
import torch.nn
import numpy as np
class Identity(torch.nn.Module):
def forward(self, x):
return x + 0.01
def test_inference_run_with_restart():
model = Identity()
channels = [0, 1]
center = [0, 0]
scale = [1, 1]
# batch, time_levels, channels, y, x
x = torch.zeros([1, 1, 2, 5, 6])
model = networks.Inference(
model,
center=center,
channels=channels,
scale=scale,
grid=schema.Grid.grid_720x1440,
channel_set=schema.ChannelSet.var34,
)
step1 = []
for _, state, restart in model.run_steps_with_restart(x, 3):
step1.append(restart)
assert len(step1) == 4
# start run from 50% done
for _, final_state, _ in model.run_steps_with_restart(n=2, **step1[1]):
pass
np.testing.assert_array_equal(final_state.numpy(), state.numpy())
| earth2mip-main | tests/test_network.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import earth2mip.initial_conditions.era5 as initial_conditions
from earth2mip.initial_conditions import get_data_source
import datetime
import pytest
import pathlib
from earth2mip import schema, config
import xarray
@pytest.mark.parametrize("year", [2018, 1980, 2017])
def test__get_path(year):
root = pathlib.Path(__file__).parent / "mock_data"
root = root.as_posix()
dt = datetime.datetime(year, 1, 2)
path = initial_conditions._get_path(root, dt)
assert pathlib.Path(path).name == dt.strftime("%Y.h5")
assert pathlib.Path(path).exists()
def test__get_path_key_error():
with pytest.raises(KeyError):
initial_conditions._get_path(".", datetime.datetime(2040, 1, 2))
@pytest.mark.slow
def test_initial_conditions_get():
time = datetime.datetime(2018, 1, 1)
if not config.ERA5_HDF5_73:
pytest.skip("No data location configured.")
data = get_data_source(
n_history=0,
channel_set=schema.ChannelSet.var73,
initial_condition_source=schema.InitialConditionSource.era5,
grid=schema.Grid.grid_721x1440,
)
ic = data[time]
assert isinstance(ic, xarray.DataArray)
| earth2mip-main | tests/test_initial_conditions.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from earth2mip.networks import depends_on_time
if sys.version_info < (3, 10):
from importlib_metadata import EntryPoint
else:
from importlib.metadata import EntryPoint
import pytest
def test_upstream_entrypoint():
ep = EntryPoint(name=None, group=None, value="sys")
assert ep.load() == sys
# refer to an attribute with ":"
ep = EntryPoint(name=None, group=None, value="sys:argv")
assert ep.load() == sys.argv
# if you don't use : it will give an error
with pytest.raises(ModuleNotFoundError):
ep = EntryPoint(name=None, group=None, value="sys.argv")
ep.load()
def test_inspect_for_time():
def f(x, time):
pass
def g(x):
pass
assert depends_on_time(f)
assert not depends_on_time(g)
| earth2mip-main | tests/test_entrypoint.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
xarray is confusing so let's test it to gain understanding
"""
import xarray
import numpy as np
import pytest
def test_xarray_var_reference():
ds = xarray.DataArray(np.ones((10, 10)), dims=("x", "y"))
ds["x"] = np.arange(10)
ds["y"] = np.arange(10)
datasets = ds.to_dataset(name="wind")
datasets["var"] = datasets["wind"]
assert isinstance(datasets["var"], xarray.DataArray)
def test_xarray_loop():
ds = xarray.DataArray(np.ones((10, 10)), dims=("x", "y"))
ds["x"] = np.arange(10)
ds["y"] = np.arange(10)
datasets = ds.to_dataset(name="wind")
assert list(datasets) == ["wind"]
assert set(datasets.variables) == {"x", "y", "wind"}
def test_xarray_var():
ds = xarray.DataArray(np.ones((10, 10)), dims=("x", "y"))
ds["x"] = np.arange(10)
ds["y"] = np.arange(10)
datasets = ds.to_dataset(name="wind")
with pytest.raises(AttributeError):
datasets.variables["wind"].weighted
| earth2mip-main | tests/test_xarray_apis.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import argparse
import numpy as np
import datetime
import torch
from earth2mip import schema, model_registry, networks, _cli_utils
from earth2mip.model_registry import Package
import hashlib
import pytest
def md5_checksum(x, precision):
x_rounded = np.round_(x, precision)
x_string = x_rounded.tostring()
md5 = hashlib.md5(x_string)
checksum = md5.hexdigest()
return checksum
def _mock_registry_with_metadata(metadata, model_name, tmp_path):
root = tmp_path / model_name
root.mkdir()
registry = model_registry.ModelRegistry(tmp_path.as_posix())
registry.put_metadata(model_name, metadata)
# save stats
def save_ones(path):
out = np.ones((len(metadata.in_channels),))
np.save(path, out)
save_ones(registry.get_scale_path(model_name))
save_ones(registry.get_center_path(model_name))
return registry
def test_pickle(tmp_path: pathlib.Path):
model_name = "model"
# use a baseline AFNO model as a mock
model = torch.nn.Conv2d(3, 3, 1)
# Save the model to the registry with appropriate metadata
metadata = schema.Model(
architecture="pickle",
n_history=0,
channel_set=schema.ChannelSet.var34,
grid=schema.Grid.grid_720x1440,
in_channels=list(range(3)),
out_channels=list(range(3)),
)
registry = _mock_registry_with_metadata(metadata, model_name, tmp_path)
# save model weights
torch.save(model, registry.get_weight_path(model_name))
# make sure it works
loaded = networks.get_model(model_name, registry)
assert loaded.in_channel_names == [
metadata.channel_set.list_channels()[i] for i in metadata.in_channels
]
# only do following if cuda enabled, it's too slow on the cpu
n_history = 0
ic = torch.ones(1, n_history + 1, len(metadata.in_channels), 2, 4)
time = datetime.datetime(1, 1, 1)
for k, (_, b, _) in enumerate(loaded(time, ic)):
if k > 10:
break
assert b.shape == ic[:, 0].shape
def MockLoader(package, pretrained):
assert pretrained
return torch.nn.Linear(3, 3)
def test_get_model_architecture_entrypoint(tmp_path):
registry = model_registry.ModelRegistry(tmp_path.as_posix())
metadata = schema.Model(
architecture_entrypoint="tests.test_models:MockLoader",
n_history=0,
channel_set=schema.ChannelSet.var34,
grid=schema.Grid.grid_720x1440,
in_channels=list(range(3)),
out_channels=list(range(3)),
)
model_name = "model"
registry = _mock_registry_with_metadata(metadata, model_name, tmp_path)
model = networks.get_model(model_name, registry)
assert isinstance(model, torch.nn.Module)
def test_get_model_uses_metadata(tmp_path):
registry = model_registry.ModelRegistry(tmp_path.as_posix())
model_name = "model"
model = networks.get_model(model_name, registry, metadata=metadata_with_entrypoint)
assert isinstance(model, MyTestInference)
@pytest.mark.parametrize("required", [True, False])
def test__cli_utils(tmp_path, required):
path = tmp_path / "meta.json"
with path.open("w") as f:
f.write(metadata_with_entrypoint.json())
parser = argparse.ArgumentParser()
_cli_utils.add_model_args(parser, required=required)
model_args = ["model"] if required else ["--model", "unused"]
args = parser.parse_args(model_args + ["--model-metadata", path.as_posix()])
loop = _cli_utils.model_from_args(args, device="cpu")
assert isinstance(loop, MyTestInference)
class MyTestInference:
def __init__(self, package, device, **kwargs):
self.kwargs = kwargs
self.device = device
metadata_with_entrypoint = schema.Model(
entrypoint=schema.InferenceEntrypoint(
name="tests.test_models:MyTestInference", kwargs=dict(param=1)
)
)
def test__load_package_entrypoint():
package = Package("", seperator="/")
obj = networks._load_package(package, metadata_with_entrypoint, device="cpu")
assert isinstance(obj, MyTestInference)
assert obj.kwargs == metadata_with_entrypoint.entrypoint.kwargs
assert obj.device == "cpu"
| earth2mip-main | tests/test_models.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.initial_conditions import cds
from earth2mip import schema
import datetime
import random
import pytest
@pytest.mark.slow
def test_cds_data_source():
try:
client = cds.Client()
except Exception:
pytest.skip("Could not initialize client")
time = datetime.datetime(2018, 1, 1)
channels = ["q1000", "t2m"]
source = cds.DataSource(channels, client=client)
dataset = source[time]
assert source.channel_names == channels
correct_dims = {"time": 1, "channel": len(channels), "lat": 721, "lon": 1440}
assert dataset.dims == tuple(correct_dims.keys())
assert dataset.shape == tuple(correct_dims.values())
def test_make_request(regtest):
time = datetime.datetime(2018, 1, 1)
channels = ["q1000", "z1000", "u1000", "t2m", "q10"]
codes = [cds.parse_channel(c) for c in channels]
for req in cds._get_cds_requests(codes, time, format="grib"):
print(req, file=regtest)
def test_parse_channel_with_level():
channel_level = random.randint(0, 10000)
channel_string = f"u{channel_level}"
assert cds.parse_channel(channel_string).level == channel_level
@pytest.mark.parametrize("c", schema.ChannelSet.var73.list_channels())
def test_parse_known_channels(c):
assert cds.parse_channel(c)
| earth2mip-main | tests/test_cds.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class Identity(torch.nn.Module):
def forward(self, x):
return x
def load(package, pretrained=True):
return Identity()
| earth2mip-main | tests/mock_plugin.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import geometry
from earth2mip.weather_events import Window
import torch
import numpy as np
import pytest
@pytest.mark.xfail
def test_select_space():
domain = Window(name="latitude", lat_min=-15, lat_max=15, diagnostics=[])
lat = np.array([-20, 0, 20])
lon = np.array([0, 1, 2])
data = torch.ones((1, 1, len(lat), len(lon))).float()
lat, lon, output = geometry.select_space(data, lat, lon, domain)
assert tuple(output.shape[2:]) == (len(lat), len(lon))
assert np.all(np.abs(lat) <= 15)
@pytest.mark.xfail
def test_get_bounds_window():
domain = Window(name="latitude", lat_min=-15, lat_max=15, diagnostics=[])
lat = np.array([-20, 0, 20])
lon = np.array([0, 1, 2])
lat_sl, _ = geometry.get_bounds_window(domain, lat, lon)
assert lat[lat_sl].shape == (1,)
assert lat[lat_sl][0] == 0
| earth2mip-main | tests/test_geometry.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
import torch
import subprocess
import xarray
from earth2mip.datasets.hindcast import open_forecast
import pytest
DIR = os.path.dirname(__file__)
os.environ["PYTHONPATH"] = DIR + ":" + os.getenv("PYTHONPATH", ":")
def create_model_package(tmp_path):
package_dir = tmp_path / "mock_package"
package_dir.mkdir()
# Create metadata.json
metadata = {
"architecture_entrypoint": "mock_plugin:load",
"n_history": 0,
"channel_set": "73var",
"grid": "721x1440",
"in_channels": list(range(73)),
"out_channels": list(range(73)),
}
with open(package_dir.joinpath("metadata.json"), "w") as f:
json.dump(metadata, f)
# Create numpy arrays
global_means = np.zeros((1, 73, 1, 1))
global_stds = np.ones((1, 73, 1, 1))
np.save(package_dir.joinpath("global_means.npy"), global_means)
np.save(package_dir.joinpath("global_stds.npy"), global_stds)
return package_dir
@pytest.mark.slow
@pytest.mark.xfail
def test_time_collection(tmp_path):
if not torch.cuda.is_available():
pytest.skip("needs gpu and data")
torch.init_process_group()
model_package = create_model_package(tmp_path)
config = os.path.join(DIR, "configs/medium-test.json")
model = f"file://{model_package.as_posix()}"
root = str(tmp_path / "test")
subprocess.check_call(["python3", "-m", "earth2mip.make_job", model, config, root])
subprocess.check_call(
[
"torchrun",
"--nnodes",
"1",
"--nproc_per_node",
str(min([torch.cuda.device_count(), 2])),
"-m",
"earth2mip.time_collection",
root,
]
)
ds = open_forecast(root, group="mean.zarr")
assert isinstance(ds, xarray.Dataset)
| earth2mip-main | tests/test_time_collection.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from earth2mip.datasets.era5 import time
def test_datetime_range():
times = time.datetime_range(2018, datetime.timedelta(hours=6), 2)
assert times == [datetime.datetime(2018, 1, 1, 0), datetime.datetime(2018, 1, 1, 6)]
def test_filename_to_year():
assert 2018 == time.filename_to_year("some/long/path/2018.h5")
| earth2mip-main | tests/datasets/test_time.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import h5py
from earth2mip.datasets import era5
import pytest
@pytest.mark.slow
@pytest.mark.xfail
def test_open_34_vars(tmp_path: pathlib.Path):
path = tmp_path / "1979.h5"
with h5py.File(path, "w") as f:
f.create_dataset("fields", shape=[1, 34, 721, 1440])
ds = era5.open_34_vars(path)
# ensure that data can be grabbed
ds.mean().compute()
assert set(ds.coords) == {"time", "channel", "lat", "lon"}
@pytest.mark.slow
@pytest.mark.xfail
def test_open_all_hdf5(tmp_path):
folder = tmp_path / "train"
folder.mkdir()
path = folder / "1979.h5"
shape = [1, 34, 721, 1440]
with h5py.File(path, "w") as f:
f.create_dataset("fields", shape=shape)
with era5.open_all_hdf5(tmp_path.as_posix()) as array:
assert array.shape == (1, *shape)
| earth2mip-main | tests/datasets/test_era5.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.initial_conditions.ifs import _get_filename, get
from earth2mip import schema
import datetime
import pytest
def test__get_filename():
expected = "20230310/00z/0p4-beta/oper/20230310000000-0h-oper-fc.grib2"
time = datetime.datetime(2023, 3, 10, 0)
assert _get_filename(time, "0h") == expected
@pytest.mark.slow
@pytest.mark.xfail
def test_get():
# uses I/O and old ICs are not available forever.
time = datetime.datetime(2023, 3, 10, 0)
ds = get(time, schema.ChannelSet.var34)
print(ds)
| earth2mip-main | tests/initial_conditions/test_ifs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.initial_conditions import get
from earth2mip import schema
import datetime
import pytest
@pytest.mark.slow
@pytest.mark.xfail
def test_get():
# uses I/O and old ICs are not available forever.
time = datetime.datetime(2023, 3, 10, 0)
dataset = get(
0, time, schema.ChannelSet.var34, source=schema.InitialConditionSource.cds
)
# check dims
correct_dims = {"time": 1, "channel": 34, "lat": 721, "lon": 1440}
assert dataset.dims == tuple(correct_dims.keys())
assert dataset.shape == tuple(correct_dims.values())
| earth2mip-main | tests/initial_conditions/test_cds.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that copyright headers exists"""
import itertools
import re
import json
from datetime import datetime
from pathlib import Path
def get_top_comments(_data):
"""
Get all lines where comments should exist
"""
lines_to_extract = []
for i, line in enumerate(_data):
# If empty line, skip
if line in ["", "\n", "", "\r", "\r\n"]:
continue
# If it is a comment line, we should get it
if line.startswith("#"):
lines_to_extract.append(i)
# Assume all copyright headers occur before any import or from statements
# and not enclosed in a comment block
elif "import" in line:
break
elif "from" in line:
break
comments = []
for line in lines_to_extract:
comments.append(_data[line])
return comments
def main():
with open(Path(__file__).parent.resolve() / Path("config.json")) as f:
config = json.loads(f.read())
print(f"License check config:")
print(json.dumps(config, sort_keys=True, indent=4))
current_year = int(datetime.today().year)
starting_year = 2023
python_header_path = Path(__file__).parent.resolve() / Path(
config["copyright_file"]
)
working_path = Path(__file__).parent.resolve() / Path(config["dir"])
exts = config["include-ext"]
with open(python_header_path, "r", encoding="utf-8") as original:
pyheader = original.read().split("\n")
pyheader_lines = len(pyheader)
# Build list of files to check
exclude_paths = [
(Path(__file__).parent / Path(path)).resolve().rglob("*")
for path in config["exclude-dir"]
]
all_exclude_paths = itertools.chain.from_iterable(exclude_paths)
exclude_filenames = [p for p in all_exclude_paths if p.suffix in exts]
filenames = [p for p in working_path.resolve().rglob("*") if p.suffix in exts]
filenames = [
filename for filename in filenames if filename not in exclude_filenames
]
problematic_files = []
gpl_files = []
for filename in filenames:
with open(str(filename), "r", encoding="utf-8") as original:
data = original.readlines()
data = get_top_comments(data)
if data and "# ignore_header_test" in data[0]:
continue
if len(data) < pyheader_lines - 1:
print(f"{filename} has less header lines than the copyright template")
problematic_files.append(filename)
continue
found = False
for i, line in enumerate(data):
if re.search(re.compile("Copyright.*NVIDIA.*", re.IGNORECASE), line):
found = True
# Check 1st line manually
year_good = False
for year in range(starting_year, current_year + 1):
year_line = pyheader[0].format(CURRENT_YEAR=year)
if year_line in data[i]:
year_good = True
break
year_line_aff = year_line.split(".")
year_line_aff = (
year_line_aff[0] + " & AFFILIATES." + year_line_aff[1]
)
if year_line_aff in data[i]:
year_good = True
break
if not year_good:
problematic_files.append(filename)
print(f"{filename} had an error with the year")
break
# while "opyright" in data[i]:
# i += 1
# for j in range(1, pyheader_lines):
# if pyheader[j] not in data[i + j - 1]:
# problematic_files.append(filename)
# print(f"{filename} missed the line: {pyheader[j]}")
# break
if found:
break
if not found:
print(f"{filename} did not match the regex: `Copyright.*NVIDIA.*`")
problematic_files.append(filename)
# test if GPL license exists
for lines in data:
if "gpl" in lines.lower():
gpl_files.append(filename)
break
if len(problematic_files) > 0:
print("The following files that might not have a copyright header:")
for _file in problematic_files:
print(_file)
if len(gpl_files) > 0:
print("test_header.py found the following files that might have GPL copyright:")
for _file in gpl_files:
print(_file)
assert len(problematic_files) == 0, "header test failed!"
assert len(gpl_files) == 0, "found gpl license, header test failed!"
print(f"Success: File headers of {len(filenames)} files look good!")
if __name__ == "__main__":
main()
| earth2mip-main | tests/_license/header_check.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import numpy as np
def convert_to_datetime(time) -> datetime.datetime:
dt = datetime.datetime.fromisoformat(np.datetime_as_string(time, "s"))
return dt
| earth2mip-main | earth2mip/time.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Protocol, List, Iterator, Tuple, Any, Optional
import datetime
import torch
from earth2mip.schema import Grid
ChannelNameT = str
class TimeLoop(Protocol):
"""Abstract protocol that a custom time loop must follow
This is a callable which yields time and output information. Some attributes
are required to define the input and output data required.
The expectation is that this class and the data passed to it are on the same
device. While torch modules can be moved between devices easily, this is not
true for all frameworks.
Attributes:
in_channel_names:
out_channel_names:
grid:
n_history_levels:
history_time_step:
time_step:
device:
"""
in_channel_names: List[ChannelNameT]
out_channel_names: List[ChannelNameT]
grid: Grid
n_history_levels: int = 1
history_time_step: datetime.timedelta = datetime.timedelta(hours=0)
time_step: datetime.timedelta
device: torch.device
dtype: torch.dtype = torch.float32
def __call__(
self, time: datetime.datetime, x: torch.Tensor, restart: Optional[Any] = None
) -> Iterator[Tuple[datetime.datetime, torch.Tensor, Any]]:
"""
Args:
x: an initial condition. has shape (B, n_history_levels,
len(in_channel_names), Y, X). (Y, X) should be consistent with
``grid``. ``x[:, -i]`` is the data correspond to
``time - (i-1) * self.history_time_step``. Note this means that
``time`` corresponds to ``x[:, -1]``...not ``x[:, 0]``.
time: the datetime to start with
restart: if provided this restart information (typically some torch
Tensor) can be used to restart the time loop
Yields:
(time, output, restart) tuples. ``output`` is a tensor with
shape (B, len(out_channel_names), Y, X) which will be used for
diagnostics. Restart data should encode the state of the time
loop.
"""
pass
| earth2mip-main | earth2mip/time_loop.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import xarray as xr
import torch
import argparse
import numpy as np
import datetime
import sys
from earth2mip import config
from earth2mip import schema, time_loop
from earth2mip.initial_conditions.era5 import HDF5DataSource
from earth2mip import _cli_utils
from modulus.distributed.manager import DistributedManager
__all__ = ["score_deterministic"]
def get_times():
# the IFS data Jaideep downloaded only has 668 steps (up to end of november 2018)
nsteps = 668
times = [
datetime.datetime(2018, 1, 1) + k * datetime.timedelta(hours=12)
for k in range(nsteps)
]
return times
class RMSE:
def __init__(self, weight=None):
self._xy = {}
self.weight = weight
def _mean(self, x):
if self.weight is not None:
x = self.weight * x
denom = self.weight.mean(-1).mean(-1)
else:
denom = 1
num = x.mean(0).mean(-1).mean(-1)
return num / denom
def call(self, truth, pred):
xy = self._mean((truth - pred) ** 2)
return xy.cpu()
def gather(self, seq):
return torch.sqrt(sum(seq) / len(seq))
class ACC:
def __init__(self, mean, weight=None):
self.mean = mean
self._xy = {}
self._xx = {}
self._yy = {}
self.weight = weight
def _mean(self, x):
if self.weight is not None:
x = self.weight * x
denom = self.weight.mean(-1).mean(-1)
else:
denom = 1
num = x.mean(0).mean(-1).mean(-1)
return num / denom
def call(self, truth, pred):
xx = self._mean((truth - self.mean) ** 2).cpu()
yy = self._mean((pred - self.mean) ** 2).cpu()
xy = self._mean((pred - self.mean) * (truth - self.mean)).cpu()
return xx, yy, xy
def gather(self, seq):
"""seq is an iterable of (xx, yy, xy) tuples"""
# transpose seq
xx, yy, xy = zip(*seq)
xx = sum(xx)
xy = sum(xy)
yy = sum(yy)
return xy / torch.sqrt(xx) / torch.sqrt(yy)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("inference")
def flat_map(func, seq, *args):
for x in seq:
yield from func(x, *args)
def run_forecast(
model: time_loop.TimeLoop, n, initial_times, device, data_source, mean
):
mean = mean.squeeze()
assert mean.ndim == 3
nlat = {schema.Grid.grid_720x1440: 720, schema.Grid.grid_721x1440: 721}[model.grid]
channels = [
data_source.channel_names.index(name) for name in model.out_channel_names
]
mean = mean[channels, :nlat]
mean = torch.from_numpy(mean).to(device)
ds = data_source[initial_times[0]]
lat = np.deg2rad(ds.lat).values
assert lat.ndim == 1
weight = np.cos(lat)[:, np.newaxis]
weight_torch = torch.from_numpy(weight).to(device)
if model.grid == schema.Grid.grid_720x1440:
weight_torch = weight_torch[:720, :]
acc = ACC(mean, weight=weight_torch)
metrics = {"acc": acc, "rmse": RMSE(weight=weight_torch)}
def process(initial_time):
logger.info(f"Running {initial_time}")
initial_condition = data_source[initial_time]
logger.debug("Initial Condition Loaded.")
x = torch.from_numpy(initial_condition.values[None, :, channels]).to(device)
i = -1
for valid_time, data, _ in model(x=x, time=initial_time):
assert data.shape[1] == len(model.out_channel_names)
i += 1
if i > n:
break
lead_time = valid_time - initial_time
logger.debug(f"{valid_time}")
# TODO may need to fix n_history here
v = data_source[valid_time]
verification = v.values[:, channels, :nlat, :]
verification_torch = torch.from_numpy(verification).to(device)
output = {}
for name, metric in metrics.items():
output[name] = metric.call(verification_torch, data)
yield (initial_time, lead_time), output
# collect outputs for lead_times
my_channels = np.array(model.out_channel_names)
return metrics, my_channels, list(flat_map(process, initial_times))
def gather(seq, metrics, model_name, channels):
outputs_by_lead_time = {}
initial_times = set()
for (initial_time, lead_time), metric_values in seq:
forecasts_at_lead_time = outputs_by_lead_time.setdefault(lead_time, [])
forecasts_at_lead_time.append(metric_values)
initial_times.add(initial_time)
def to_dataset(metric, name):
outputs = {
k: [v[name] for v in snapshots]
for k, snapshots in outputs_by_lead_time.items()
}
times, accs = zip(*outputs.items())
times = list(times)
acc_arr = [metric.gather(acc) for acc in accs]
stacked = torch.stack(acc_arr, 0)
stacked = stacked.cpu().numpy()
return xr.DataArray(
stacked,
dims=["lead_time", "channel"],
coords={"lead_time": times, "channel": channels},
).to_dataset(name=name)
ds = xr.merge(to_dataset(metric, name) for name, metric in metrics.items())
ds = ds.assign(
initial_times=xr.DataArray(list(initial_times), dims=["initial_time"])
)
return ds
def score_deterministic(
model: time_loop.TimeLoop, n: int, initial_times, data_source, time_mean
):
"""Compute deterministic accs and rmses
Args:
model: the inference class
n: the number of lead times
initial_times: the initial_times to compute over
data_source: a mapping from time to dataset, used for the initial
condition and the scoring
time_mean: a (channel, lat, lon) numpy array containing the time_mean.
Used for ACC.
Returns:
metrics
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
device = f"cuda:{rank % world_size}"
else:
rank = 0
world_size = 1
device = "cuda:0"
local_initial_times = initial_times[rank::world_size]
metrics, channels, seq = run_forecast(
model,
n=n,
device=device,
initial_times=local_initial_times,
data_source=data_source,
mean=time_mean,
)
if world_size > 1:
output_list = [None] * world_size
torch.distributed.all_gather_object(output_list, seq)
else:
output_list = [seq]
if rank == 0:
seq = []
for item in output_list:
seq.extend(item)
return gather(
seq,
metrics=metrics,
model_name=model,
channels=channels,
)
def main():
parser = argparse.ArgumentParser()
_cli_utils.add_model_args(parser, required=True)
parser.add_argument("output")
parser.add_argument("-n", type=int, default=4)
parser.add_argument("--test", action="store_true")
# TODO refactor this to a shared place
parser.add_argument(
"--data", type=str, help="path to hdf5 root directory containing data.json"
)
args = parser.parse_args()
DistributedManager.initialize()
dist = DistributedManager()
initial_times = get_times()
if args.test:
initial_times = initial_times[-dist.world_size :]
model = _cli_utils.model_from_args(args, dist.device)
data_source = HDF5DataSource.from_path(args.data or config.ERA5_HDF5_73)
# time mean
ds = score_deterministic(
model, args.n, initial_times, data_source, time_mean=data_source.time_means
)
if dist.rank == 0:
ds.attrs["model"] = args.model
ds.attrs["history"] = " ".join(sys.argv)
output = os.path.abspath(args.output)
dirname = os.path.dirname(args.output)
if dirname:
os.makedirs(dirname, exist_ok=True)
ds.to_netcdf(output)
if __name__ == "__main__":
main()
| earth2mip-main | earth2mip/inference_medium_range.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np # noqa
import h5py # noqa
import os # noqa
from einops import rearrange # noqa
from earth2mip import schema # noqa
import torch_harmonics as th # noqa
from earth2mip.networks import Inference # noqa
from datetime import datetime
from timeit import default_timer # noqa
from typing import Union
def generate_noise_correlated(shape, *, reddening, device, noise_amplitude):
return noise_amplitude * brown_noise(shape, reddening).to(device)
def brown_noise(shape, reddening=2):
noise = torch.normal(torch.zeros(shape), torch.ones(shape))
x_white = torch.fft.fft2(noise)
S = (
torch.abs(torch.fft.fftfreq(noise.shape[-2]).reshape(-1, 1)) ** reddening
+ torch.abs(torch.fft.fftfreq(noise.shape[-1])) ** reddening
)
S = torch.where(S == 0, 0, 1 / S)
S = S / torch.sqrt(torch.mean(S**2))
x_shaped = x_white * S
noise_shaped = torch.fft.ifft2(x_shaped).real
return noise_shaped
def generate_bred_vector(
x: torch.Tensor,
model: Inference,
noise_amplitude: float = 0.15,
time: Union[datetime, None] = None,
integration_steps: int = 40,
inflate=False,
):
# Assume x has shape [ENSEMBLE, TIME, CHANNEL, LAT, LON]
x0 = x[:1]
# Get control forecast
for data in model.run_steps(x0, n=1, normalize=False, time=time):
xd = data
# Unsqueeze if time has been collapsed.
if xd.ndim != x0.ndim:
xd = xd.unsqueeze(1)
dx = noise_amplitude * torch.randn(x.shape, device=x.device, dtype=x.dtype)
for _ in range(integration_steps):
x1 = x + dx
for data in model.run_steps(x1, n=1, normalize=False, time=time):
x2 = data
# Unsqueeze if time has been collapsed.
if x2.ndim != x1.ndim:
x2 = x2.unsqueeze(1)
dx = x2 - xd
if inflate:
dx += noise_amplitude * (dx - dx.mean(dim=0))
gamma = torch.norm(x) / torch.norm(x + dx)
return noise_amplitude * dx * gamma
| earth2mip-main | earth2mip/ensemble_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import shutil
import os
import typer
from modulus.distributed.manager import DistributedManager
import torch.distributed
from distributed import Client
from earth2mip import inference_ensemble, networks, score_ensemble_outputs
from earth2mip.schema import EnsembleRun
logging.basicConfig(
format="%(asctime)s:%(levelname)-s:%(name)s:%(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__file__)
WD = os.getcwd()
def get_distributed_client(rank):
scheduler_file = "scheduler.json"
if rank == 0:
client = Client(n_workers=32, threads_per_worker=1)
client.write_scheduler_file(scheduler_file)
if torch.distributed.is_initialized():
torch.distributed.barrier()
if rank != 0:
client = Client(scheduler_file=scheduler_file)
return client
def main(
root: str,
shard: int = 0,
n_shards: int = 1,
):
"""
Args:
root: the root directory of the output
shard: index of the shard
n_shards: split the input times into this many shards
"""
assert shard < n_shards
time = datetime.datetime(1, 1, 1)
config_path = os.path.join(root, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
DistributedManager.initialize()
model = networks.get_model(config["model"])
dist = DistributedManager()
protocol = config["protocol"]
lines = protocol["times"][shard::n_shards]
logger.info(
f"Working on shard {shard+1}/{n_shards}. {len(lines)} initial times to run."
)
run = EnsembleRun.parse_obj(protocol["inference_template"])
n_ensemble_batches = run.ensemble_members // run.ensemble_batch_size
ranks_per_time = min(n_ensemble_batches, dist.world_size)
ranks_per_time = ranks_per_time - dist.world_size % ranks_per_time
time_rank = int(dist.rank // ranks_per_time)
n_time_groups = int(dist.world_size // ranks_per_time)
group_ranks = list(
range(time_rank * ranks_per_time, (time_rank + 1) * ranks_per_time)
)
logger.info(
"distributed info: " + str((dist.rank, time_rank, group_ranks, ranks_per_time))
)
if torch.distributed.is_initialized():
group = torch.distributed.new_group(group_ranks)
group_rank = torch.distributed.get_group_rank(group, dist.rank)
lines = lines[time_rank::n_time_groups]
# setup dask client for post processing
client = get_distributed_client(dist.rank)
post_process_task = None
count = 0
for line in lines:
count += 1
initial_time = datetime.datetime.fromisoformat(line.strip())
start = time.now()
output = f"{root}/{initial_time.isoformat()}"
if os.path.exists(output):
continue
d = output + ".tmp"
if torch.distributed.is_initialized():
torch.distributed.barrier(group)
perturb = inference_ensemble.get_initializer(
model,
run,
)
run.weather_event.properties.start_time = initial_time
run.output_path = d
inference_ensemble.run_inference(
model, run, group=group, progress=False, perturb=perturb
)
if group_rank == 0:
def post_process(d):
output_path = f"{d}/output/"
score_ensemble_outputs.main(
input_path=d,
output_path=output_path,
time_averaging_window=protocol.get("time_averaging_window", ""),
score=protocol.get("score", False),
save_ensemble=protocol.get("save_ensemble", False),
)
shutil.move(output_path, output)
shutil.rmtree(d, ignore_errors=True)
# do not work on more than one post processing job at once
if post_process_task is not None:
post_process_task.result()
post_process_task = client.submit(post_process, d)
stop = time.now()
elapsed = stop - start
remaining = elapsed * (len(lines) - count)
logger.info(
f"{count}/{len(lines)}: {initial_time} done. Elapsed: {elapsed.total_seconds()}s. Remaining: {remaining}s" # noqa
)
# finish up final task
if group_rank == 0 and post_process_task is not None:
post_process_task.result()
# keep barrier at end so
# dask distributed client is not cleaned up
if torch.distributed.is_initialized():
torch.distributed.barrier()
logging.info(f"rank {dist.rank} Finished.")
if __name__ == "__main__":
typer.run(main)
| earth2mip-main | earth2mip/time_collection.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import networks, schema
import argparse
def add_model_args(parser: argparse.ArgumentParser, required=False):
if required:
parser.add_argument("model", type=str)
else:
parser.add_argument("--model", type=str)
parser.add_argument(
"--model-metadata",
type=str,
help="metadata.json file. Defaults to the metadata.json in the model package.",
default="",
)
def model_from_args(args, device):
if args.model_metadata:
with open(args.model_metadata) as f:
metadata = schema.Model.parse_raw(f.read())
else:
metadata = None
return networks.get_model(args.model, device=device, metadata=metadata)
| earth2mip-main | earth2mip/_cli_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import os
import argparse
import xarray
import pathlib
import earth2mip.time
from earth2mip import weather_events, schema
from earth2mip.initial_conditions.era5 import open_era5_xarray
import xskillscore
import logging
logger = logging.getLogger(__file__)
def save_dataset(out, path):
out.to_zarr(path + ".zarr", mode="w")
def _open(f, domain, chunks={"time": 1}):
root = xarray.open_dataset(f, decode_times=True)
ds = xarray.open_dataset(f, chunks=chunks, group=domain)
ds.attrs.update(root.attrs)
return ds.assign_coords(root.coords)
def open_ensemble(path, group):
path = pathlib.Path(path)
ensemble_files = sorted(list(path.glob("ensemble_out_*.nc")))
return xarray.concat([_open(f, group) for f in ensemble_files], dim="ensemble")
def open_verification(time, channel_set):
v = open_era5_xarray(time, channel_set)
v = v.to_dataset("channel")
v = v.chunk({"time": 1})
return v
def read_weather_event(dir):
ncfile = os.path.join(dir, "ensemble_out_0.nc")
ds = xarray.open_dataset(ncfile)
weather_event = weather_events.WeatherEvent.parse_raw(ds.weather_event)
return weather_event
def main(
input_path: str,
output_path: Optional[str] = None,
time_averaging_window: str = "",
score: bool = True,
save_ensemble: bool = False,
) -> None:
if output_path is None:
output_path = args.input_path
# use single-threaded scheduler to avoid deadlocks when writing to netCDF.
# processes doesn't work because locks can't be shared and threaded
# deadlocks, dask distributed works but isn't any faster, probably because
# these are I/O bound computations. It is probably better to use zarr as an
# output.
pathlib.Path(output_path).mkdir(exist_ok=True)
weather_event = read_weather_event(input_path)
for domain in weather_event.domains:
if domain.type != "Window":
continue
ds = open_ensemble(input_path, domain.name)
channel_set = schema.ChannelSet.var73
ds.attrs["time_averaging_window"] = time_averaging_window
if time_averaging_window:
ds = ds.resample(time=time_averaging_window).mean(
dim="time", keep_attrs=True, skipna=False, keepdims=True
)
logger.info("Computing mean")
ensemble_mean = ds.mean(dim="ensemble", keep_attrs=True)
save_dataset(ensemble_mean, os.path.join(output_path + "mean"))
if ds.sizes["ensemble"] > 1:
logger.info("Computing variance")
variance = ds.var(dim="ensemble", keep_attrs=True)
save_dataset(variance, os.path.join(output_path + "variance"))
if score:
logger.info("Scoring")
date_obj = earth2mip.time.convert_to_datetime(ds.time[0])
v = open_verification(date_obj, channel_set=channel_set)
shared = set(v) & set(ds)
verification = v[list(shared)]
ds = ds[list(shared)]
ds, verification, ensemble_mean = xarray.align(
ds, verification, ensemble_mean
)
if time_averaging_window:
verification = verification.resample(time=time_averaging_window).mean(
dim="time", keep_attrs=True, skipna=False, keepdims=True
)
ensemble_mse = (verification - ensemble_mean) ** 2.0
save_dataset(ensemble_mse, os.path.join(output_path, "ensemble_mse"))
deterministic_mse = (verification - ds.isel(ensemble=0)) ** 2.0
deterministic_mse.attrs.update(verification.attrs)
save_dataset(
deterministic_mse, os.path.join(output_path, "deterministic_mse")
)
crps = xskillscore.crps_ensemble(
verification,
ds.chunk(dict(ensemble=-1)),
issorted=False,
member_dim="ensemble",
dim=(),
keep_attrs=True,
)
save_dataset(crps, os.path.join(output_path, "crps"))
if save_ensemble:
logger.info("Saving ensemble")
save_dataset(ds, os.path.join(output_path, "ensemble"))
return
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
from distributed import Client
Client()
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_path",
type=str,
help="full path to the ensemble simulation directory",
)
parser.add_argument(
"--output_path",
type=str,
help="full path to the ensemble score output directory",
)
parser.add_argument(
"--time_averaging_window",
type=str,
help="a string arg for the time averaging as np.datetime64 format, i.e. 2W",
default="",
)
parser.add_argument(
"--no-score",
action="store_false",
dest="score",
default=True,
help="Turn off scoring if provided",
)
parser.add_argument(
"--save-ensemble", action="store_true", help="Save out all ensemble members"
)
args = parser.parse_args()
main(
args.input_path,
args.output_path,
args.time_averaging_window,
args.score,
args.save_ensemble,
)
| earth2mip-main | earth2mip/score_ensemble_outputs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from earth2mip.weather_events import CWBDomain, Window, MultiPoint
from earth2mip import weather_events
from earth2mip.schema import Grid
from netCDF4._netCDF4 import Group
from typing import Union
class Diagnostics:
def __init__(
self,
group: Group,
domain: Union[CWBDomain, Window, MultiPoint],
grid: Grid,
diagnostic: weather_events.Diagnostic,
lat: np.ndarray,
lon: np.ndarray,
device: torch.device,
):
self.group, self.domain, self.grid, self.lat, self.lon = (
group,
domain,
grid,
lat,
lon,
)
self.diagnostic = diagnostic
self.device = device
self._init_subgroup()
self._init_dimensions()
self._init_variables()
def _init_subgroup(
self,
):
if self.diagnostic.type == "raw":
self.subgroup = self.group
else:
self.subgroup = self.group.createGroup(self.diagnostic.type)
def _init_dimensions(
self,
):
if self.domain.type == "MultiPoint":
self.domain_dims = ("npoints",)
else:
self.domain_dims = ("lat", "lon")
def _init_variables(
self,
):
dims = self.get_dimensions()
dtypes = self.get_dtype()
for channel in self.diagnostic.channels:
if self.diagnostic.type == "histogram":
pass
else:
self.subgroup.createVariable(
channel, dtypes[self.diagnostic.type], dims[self.diagnostic.type]
)
def get_dimensions(
self,
):
raise NotImplementedError
def get_dtype(
self,
):
raise NotImplementedError
def get_variables(
self,
):
raise NotImplementedError
def update(
self,
):
raise NotImplementedError
def finalize(
self,
):
raise NotImplementedError
class Raw(Diagnostics):
def __init__(
self,
group: Group,
domain: Union[CWBDomain, Window, MultiPoint],
grid: Grid,
diagnostic: weather_events.Diagnostic,
lat: np.ndarray,
lon: np.ndarray,
device: torch.device,
):
super().__init__(group, domain, grid, diagnostic, lat, lon, device)
def get_dimensions(self):
return {"raw": ("ensemble", "time") + self.domain_dims}
def get_dtype(self):
return {"raw": float}
def update(
self, output: torch.Tensor, time_index: int, batch_id: int, batch_size: int
):
for c, channel in enumerate(self.diagnostic.channels):
self.subgroup[channel][batch_id : batch_id + batch_size, time_index] = (
output[:, c].cpu().numpy()
)
def finalize(self, *args):
pass
DiagnosticTypes = {
"raw": Raw,
}
| earth2mip-main | earth2mip/diagnostics.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import fsspec
import fsspec.implementations.cached
import s3fs
import builtins
import urllib.request
import os
import hashlib
import logging
logger = logging.getLogger(__name__)
LOCAL_CACHE = os.environ["HOME"] + "/.cache/fcn-mip"
def _cache_fs(fs):
return fsspec.implementations.cached.CachingFileSystem(
fs=fs, cache_storage=LOCAL_CACHE
)
def _get_fs(path):
if path.startswith("s3://"):
return s3fs.S3FileSystem(client_kwargs=dict(endpoint_url="https://pbss.s8k.io"))
else:
return fsspec.filesystem("file")
def open(path, mode="r"):
if path.startswith("s3://"):
fs = _get_fs(path)
cached_fs = _cache_fs(fs)
return cached_fs.open(path, mode)
else:
return builtins.open(path, mode)
def download_cached(path: str, recursive: bool = False) -> str:
sha = hashlib.sha256(path.encode())
filename = sha.hexdigest()
os.makedirs(LOCAL_CACHE, exist_ok=True)
cache_path = os.path.join(LOCAL_CACHE, filename)
url = urllib.parse.urlparse(path)
# TODO watch for race condition here
if not os.path.exists(cache_path):
logger.debug("Downloading %s to cache: %s", path, cache_path)
if path.startswith("s3://"):
fs = _get_fs(path)
fs.get(path, cache_path)
elif url.scheme == "http" or url.scheme == "https":
# TODO: Check if this supports directory fetches
urllib.request.urlretrieve(path, cache_path)
elif url.scheme == "file":
path = os.path.join(url.netloc, url.path)
return path
else:
return path
else:
logger.debug("Opening from cache: %s", cache_path)
return cache_path
def pipe(dest, value):
"""Save string to dest"""
fs = _get_fs(dest)
fs.pipe(dest, value)
def _to_url(scheme, path):
if scheme:
return scheme + "://" + path
else:
return path
def glob(pattern: str) -> List[str]:
fs = _get_fs(pattern)
url = urllib.parse.urlparse(pattern)
return [_to_url(url.scheme, path) for path in fs.glob(pattern)]
def ls(path):
fs = _get_fs(path)
return fs.ls(path)
| earth2mip-main | earth2mip/filesystem.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Protocol
class LoaderProtocol(Protocol):
def __call__(self, package, pretrained=True) -> None:
return
def pickle(package, pretrained=True):
"""
load a checkpoint into a model
"""
assert pretrained
p = package.get("weights.tar")
return torch.load(p)
def torchscript(package, pretrained=True):
"""
load a checkpoint into a model
"""
p = package.get("scripted_model.pt")
import json
config = package.get("config.json")
with open(config) as f:
config = json.load(f)
model = torch.jit.load(p)
if config["add_zenith"]:
from earth2mip.networks import CosZenWrapper
import numpy as np
lat = 90 - np.arange(721) * 0.25
lon = np.arange(1440) * 0.25
model = CosZenWrapper(model, lon, lat)
return model
| earth2mip-main | earth2mip/loaders.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip._config import Settings
from earth2mip.model_registry import ModelRegistry
__version__ = "23.8.15"
config = Settings()
registry = ModelRegistry(config.MODEL_REGISTRY)
| earth2mip-main | earth2mip/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines to save domains to a netCDF file
"""
import numpy as np
import torch
from typing import List, Iterable
import xarray as xr
import cftime
from earth2mip import geometry
from earth2mip.weather_events import Domain
from earth2mip.diagnostics import DiagnosticTypes, Diagnostics
__all__ = ["initialize_netcdf", "update_netcdf", "finalize_netcdf"]
def _assign_lat_attributes(nc_variable):
nc_variable.units = "degrees_north"
nc_variable.standard_name = "latitude"
nc_variable.long_name = "latitude"
def _assign_lon_attributes(nc_variable):
nc_variable.units = "degrees_east"
nc_variable.standard_name = "longitude"
nc_variable.long_name = "longitude"
def init_dimensions(domain: Domain, group, lat, lon):
if domain.type == "CWBDomain":
cwb_path = "/lustre/fsw/sw_climate_fno/nbrenowitz/2023-01-24-cwb-4years.zarr"
lat = xr.open_zarr(cwb_path)["XLAT"][:, 0]
lon = xr.open_zarr(cwb_path)["XLONG"][0, :]
nlat = lat.size
nlon = lon.size
group.createDimension("lat", nlat)
group.createDimension("lon", nlon)
v = group.createVariable("lat", np.float32, ("lat"))
_assign_lat_attributes(v)
v = group.createVariable("lon", np.float32, ("lon"))
_assign_lon_attributes(v)
group["lat"][:] = lat
group["lon"][:] = lon
elif domain.type == "Window":
lat_sl, lon_sl = geometry.get_bounds_window(domain, lat, lon)
group.createVariable("imin", int, ())
group.createVariable("imax", int, ())
group.createVariable("jmin", int, ())
group.createVariable("jmax", int, ())
group["imin"][:] = lat_sl.start
group["imax"][:] = lat_sl.stop
group["jmin"][:] = lon_sl.start
group["jmax"][:] = lon_sl.stop
nlat = np.r_[lat_sl].size
nlon = np.r_[lon_sl].size
group.createDimension("lat", nlat)
group.createDimension("lon", nlon)
v = group.createVariable("lat", np.float32, ("lat"))
_assign_lat_attributes(v)
v = group.createVariable("lon", np.float32, ("lon"))
_assign_lon_attributes(v)
group["lat"][:] = lat[lat_sl]
group["lon"][:] = lon[lon_sl]
elif domain.type == "MultiPoint":
assert len(domain.lat) == len(
domain.lon
), "Lat and Lon arrays must be of same size!"
group.createDimension("npoints", len(domain.lon))
v = group.createVariable("lat_point", np.float32, ("npoints"))
_assign_lat_attributes(v)
v = group.createVariable("lon_point", np.float32, ("npoints"))
_assign_lon_attributes(v)
for diagnostic in domain.diagnostics:
group.createDimension("n_channel", len(diagnostic.channels))
group["lat_point"][:] = domain.lat
group["lon_point"][:] = domain.lon
else:
raise NotImplementedError(f"domain type {domain.type} not supported")
return
def initialize_netcdf(
nc, domains: Iterable[Domain], grid, lat, lon, n_ensemble, device
) -> List[List[Diagnostics]]:
nc.createVLType(str, "vls")
nc.createDimension("time", None)
nc.createDimension("ensemble", n_ensemble)
nc.createVariable("time", np.float32, ("time"))
total_diagnostics = []
for domain in domains:
group = nc.createGroup(domain.name)
init_dimensions(domain, group, lat, lon)
diagnostics = []
for d in domain.diagnostics:
diagnostic = DiagnosticTypes[d.type](
group, domain, grid, d, lat, lon, device
)
diagnostics.append(diagnostic)
total_diagnostics.append(diagnostics)
return total_diagnostics
def update_netcdf(
data: torch.Tensor,
total_diagnostics: List[List[Diagnostics]],
domains: List[Domain],
batch_id,
time_count,
model,
lat,
lon,
channel,
):
assert len(total_diagnostics) == len(domains), (total_diagnostics, domains)
batch_size = geometry.get_batch_size(data)
for d_index, domain in enumerate(domains):
lat, lon, regional_data = geometry.select_space(data, lat, lon, domain)
domain_diagnostics = total_diagnostics[d_index]
for diagnostic in domain_diagnostics:
output = geometry.sel_channel(
model, channel, regional_data, diagnostic.diagnostic.channels
)
diagnostic.update(output, time_count, batch_id, batch_size)
return
def finalize_netcdf(total_diagnostics, nc, domains, weather_event, channel_set):
times = cftime.num2date(nc["time"][:], nc["time"].units)
for d_index, domain in enumerate(domains):
domain_diagnostics = total_diagnostics[d_index]
for diagnostic in domain_diagnostics:
diagnostic.finalize(times, weather_event, channel_set)
for diagnostic in domain_diagnostics:
if hasattr(diagnostic, "tmpdir"):
diagnostic.tmpdir.cleanup()
return
| earth2mip-main | earth2mip/netcdf.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forecast abstractions
A forecast is a discrete array of ``(n_initial_times, n_lead_times)``. However
because a forecast evolves forward in time, and we do not store the whole
forecast necessarily, algorithms in fcn-mip should access ``n_lead_times`` in
sequential order. This is the purpose of the abstractions here.
"""
from typing import Sequence, Any, Protocol, Iterator, List
import datetime
import torch
import xarray
import logging
import numpy
from earth2mip import time_loop
import asyncio
logger = logging.getLogger(__name__)
class Forecast(Protocol):
channel_names: List[str]
def __getitem__(self, i: int) -> Iterator[torch.Tensor]:
pass
class Persistence:
"""persistence forecast. This forecast always returns the initial condition.
Yields (channel, lat, lon)
"""
def __init__(self, observations: Any):
self.observations = observations
@property
def channel_names(self):
x = asyncio.run(self.obserations[0])
return x.channel.tolist()
async def __getitem__(self, i: int):
x = await self.observations[i]
while True:
yield x
class TimeLoopForecast:
"""Wrap an fcn-mip TimeLoop object as a forecast"""
def __init__(
self,
time_loop: time_loop.TimeLoop,
times: Sequence[datetime.datetime],
observations: Any,
):
assert len(times) == len(observations)
self.observations = observations
self.time_loop = time_loop
self._times = times
@property
def channel_names(self):
return self.time_loop.out_channel_names
async def __getitem__(self, i):
# TODO clean-up this interface. pick a consistent type for ``x``.
x = await self.observations[i]
x = x.sel(channel=self.time_loop.in_channel_names)
x = torch.from_numpy(x.values).cuda()
x = x[None]
count = 0
dt = self._times[1] - self._times[0]
yield_every = int(dt // self.time_loop.time_step)
assert yield_every * self.time_loop.time_step == dt
for time, data, _ in self.time_loop(x=x, time=self._times[i]):
if count % yield_every == 0:
logger.info("forecast %s", time)
yield data
count += 1
class XarrayForecast:
"""Turn an xarray into a forecast-like dataset"""
def __init__(
self, ds: xarray.Dataset, fields, times: Sequence[datetime.datetime], xp=numpy
):
self._dataset = ds
self._fields = fields
self._times = times
self.xp = xp
@property
def channel_names(self):
return self._dataset.channel.values.tolist()
async def __getitem__(self, i):
initial_time = self._times[i]
j = i - 1
all_data = (
self._dataset.sel(initial_time=initial_time)[self._fields]
.to_array(dim="channel")
.sel(channel=self._fields)
.load()
)
while True:
j += 1
time = self._times[j]
data = all_data.sel(time=time - initial_time)
data.data = self.xp.asarray(data.data)
yield j, data
| earth2mip-main | earth2mip/forecasts.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import datetime
import typer
import json
def get_times_2018():
nsteps = 728
times = [
datetime.datetime(2018, 1, 1) + k * datetime.timedelta(hours=12)
for k in range(nsteps)
]
return times
def get_times_s2s_test():
time = datetime.datetime(2020, 1, 2)
dt = datetime.timedelta(days=7)
while time.year < 2021:
yield time
time += dt
def get_time_s2s_calibration():
times_file = pathlib.Path(__file__).parent / "times" / "calibration.txt"
with times_file.open() as f:
for line in f:
line = line.strip()
time = datetime.datetime.fromisoformat(line)
yield time
get_times = {
"2018": get_times_2018,
"s2s_test": get_times_s2s_test,
"s2s_calibration": get_time_s2s_calibration,
}
def get_time(times):
if isinstance(times, list):
times = [datetime.datetime.fromisoformat(s) for s in times]
else:
times = get_times[times]()
# convert generator to list
times = list(times)
return times
def main(
model: str,
config: str,
output: str,
):
os.makedirs(output, exist_ok=True)
with open(config) as f:
protocol_config = json.load(f)
config = {"protocol": protocol_config, "model": model}
times = get_time(protocol_config["times"])
config["protocol"]["times"] = [time.isoformat() for time in times]
config_path = os.path.join(output, "config.json")
with open(config_path, "w") as f:
json.dump(config, f)
if __name__ == "__main__":
typer.run(main)
| earth2mip-main | earth2mip/make_job.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for working with geometry"""
import numpy as np
import torch
import xarray as xr
LAT_AVERAGE = "LatitudeAverage"
def sel_channel(model, channel_info, data, channels):
channels = np.asarray(channels)
# TODO: Whats the point of model.channels here, needs clearer name!!!
if model.channels is not None:
torch_indices = list(model.channels)
channels_in_data = np.asarray(channel_info)[torch_indices].tolist()
else:
channels_in_data = np.asarray(channel_info).tolist()
index_to_select = [channels_in_data.index(ch) for ch in channels]
return data[:, index_to_select]
def get_batch_size(data):
return data.shape[0]
def get_bounds_window(geom, lat, lon):
i_min = np.where(lat <= geom.lat_max)[0][0]
i_max = np.where(lat >= geom.lat_min)[0][-1]
j_min = np.where(lon >= geom.lon_min)[0][0]
j_max = np.where(lon <= geom.lon_max)[0][-1]
return slice(i_min, i_max + 1), slice(j_min, j_max + 1)
def select_space(data, lat, lon, domain):
lat = np.asarray(lat)
lon = np.asarray(lon)
assert data.ndim == 4, data.ndim
assert data.shape[2] == lat.size, lat.size
assert data.shape[3] == lon.size, lon.size
domain_type = domain.type
if domain_type == "Window" or domain_type == LAT_AVERAGE or domain_type == "global":
lat_sl, lon_sl = get_bounds_window(domain, lat, lon)
domain_lat = lat[lat_sl]
domain_lon = lon[lon_sl]
return domain_lat, domain_lon, data[:, :, lat_sl, lon_sl]
elif domain_type == "MultiPoint":
# Convert lat-long points to array index (just got to closest 0.25 degree)
i = lat.size - np.searchsorted(lat[::-1], domain.lat, side="right")
j = np.searchsorted(lon, domain.lon, side="left")
# TODO refactor this assertion to a test
np.testing.assert_array_equal(domain.lat, lat[i])
np.testing.assert_array_equal(domain.lon, lon[j])
return lat[i], lon[j], data[:, :, i, j]
elif domain_type == "CWBDomain":
cwb_path = "/lustre/fsw/sw_climate_fno/nbrenowitz/2023-01-24-cwb-4years.zarr"
xlat = xr.open_zarr(cwb_path)["XLAT"]
xlong = xr.open_zarr(cwb_path)["XLONG"]
array = data.cpu().numpy()
diagnostic = domain["diagnostics"][0]
darray = xr.DataArray(
array,
dims=["batch", "channel", "lat", "lon"],
coords={"lat": lat, "lon": lon, "channel": diagnostic.channels},
)
interpolated = darray.interp(lat=xlat, lon=xlong)
return xlat, xlong, torch.from_numpy(interpolated.values)
else:
raise ValueError(
f"domain {domain_type} is not supported. Check the weather_events.json"
)
def bilinear(data: torch.tensor, dims, source_coords, target_coords):
return
| earth2mip-main | earth2mip/geometry.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import Enum
from pydantic import BaseModel
from typing import Literal, List, Union, Optional
import datetime
class InitialConditionSource(Enum):
ifs: str = "ifs"
era5: str = "era5"
cds: str = "cds"
gfs: str = "gfs"
hrmip: str = "hrmip"
# https://docs.pydantic.dev/usage/types/#discriminated-unions-aka-tagged-unions
class WeatherEventProperties(BaseModel):
"""
Attributes:
netcdf: load the initial conditions from this path if given
"""
name: str
start_time: Optional[datetime.datetime]
initial_condition_source: InitialConditionSource = InitialConditionSource.era5
netcdf: str = ""
# TODO do not require IC other than restart (currently loads data w/ normal mechanisms regardless) # noqa
restart: str = ""
class Diagnostic(BaseModel):
type: str
function: str = ""
channels: List[str]
nbins: int = 10
class Window(BaseModel):
type: Literal["Window"] = "Window"
name: str
lat_min: float = -90
lat_max: float = 90
lon_min: float = 0
lon_max: float = 360
diagnostics: List[Diagnostic]
class CWBDomain(BaseModel):
type: Literal["CWBDomain"]
name: str
path: str = "/lustre/fsw/sw_climate_fno/nbrenowitz/2023-01-24-cwb-4years.zarr"
diagnostics: List[Diagnostic]
class MultiPoint(BaseModel):
type: Literal["MultiPoint"]
name: str
lat: List[float]
lon: List[float]
diagnostics: List[Diagnostic]
Domain = Union[Window, CWBDomain, MultiPoint]
class WeatherEvent(BaseModel):
properties: WeatherEventProperties
domains: List[Domain]
def _read():
with open("weather_events.json") as f:
return json.load(f)
def list_():
events = _read()
return list(events)
def read(forecast_name: str) -> WeatherEvent:
weather_events = _read()
weather_event = weather_events[forecast_name]
for domain in weather_event["domains"]:
if domain["name"] == "global":
domain["type"] = "Window"
domain["lat_min"] = -90
domain["lat_max"] = 90
domain["lon_min"] = 0
domain["lon_max"] = 360
elif domain["name"] == "northern_hemisphere":
domain["lat_min"] = 0
domain["lat_max"] = 90
domain["lon_min"] = 0
domain["lon_max"] = 360
elif domain["name"] == "southern_hemisphere":
domain["lat_min"] = -90
domain["lat_max"] = 0
domain["lon_min"] = 0
domain["lon_max"] = 360
elif domain["name"] == "CWBDomain":
if len(domain["diagnostics"]) > 1:
print("CWBDomain only supports one diagnostic")
domain["diagnostics"] = domain["diagnostics"][0]
event = WeatherEvent.parse_obj(weather_event)
return event
| earth2mip-main | earth2mip/weather_events.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from pydantic import BaseSettings
from earth2mip import schema
class Settings(BaseSettings):
# not needed anymore
# AFNO_26_WEIGHTS: Optional[str] = None
# AFNO_26_MEAN: str
# AFNO_26_SCALE: str
# only used in earth2mip.diagnostics
# TODO add defaults (maybe scope in that module)
MEAN: str = ""
SCALE: str = ""
# Key configurations
ERA5_HDF5_34: str = ""
ERA5_HDF5_73: str = ""
MODEL_REGISTRY: str = ""
LOCAL_CACHE: str = ""
# used for scoring (score-ifs.py, inference-medium-range)
TIME_MEAN: str = ""
TIME_MEAN_73: str = ""
# used in score-ifs.py
# TODO refactor to a command line argument of that script
IFS_ROOT: str = None
# only used in test suite
# TODO add a default option.
TEST_DIAGNOSTICS: List[str] = ()
# where to store regridding files
MAP_FILES: str = ""
class Config:
env_file = ".env"
def get_data_root(self, channel_set: schema.ChannelSet) -> str:
if channel_set == schema.ChannelSet.var34:
val = self.ERA5_HDF5_34
if not val:
raise ValueError(
"Please configure ERA5_HDF5_34 to point to the 34 channel data." # noqa
)
return val
elif channel_set == schema.ChannelSet.var73:
val = self.ERA5_HDF5_73
if not val:
raise ValueError("Please configure ERA5_HDF5_73.")
else:
raise NotImplementedError(channel_set)
return val
def get_time_mean(self, channel_set: schema.ChannelSet) -> str:
return {
schema.ChannelSet.var34: self.TIME_MEAN,
schema.ChannelSet.var73: self.TIME_MEAN_73,
}[channel_set]
| earth2mip-main | earth2mip/_config.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import xarray
import cftime
import json
import numpy as np
import torch
import tqdm
from typing import Optional, Any
from datetime import datetime
from modulus.distributed.manager import DistributedManager
from netCDF4 import Dataset as DS
__all__ = ["run_inference"]
# need to import initial conditions first to avoid unfortunate
# GLIBC version conflict when importing xarray. There are some unfortunate
# issues with the environment.
from earth2mip import initial_conditions, time_loop
from earth2mip.ensemble_utils import (
generate_noise_correlated,
generate_bred_vector,
)
from earth2mip.netcdf import finalize_netcdf, initialize_netcdf, update_netcdf
from earth2mip.networks import get_model, Inference
from earth2mip.schema import EnsembleRun, Grid, PerturbationStrategy
from earth2mip.time import convert_to_datetime
from earth2mip import regrid
logger = logging.getLogger("inference")
def get_checkpoint_path(rank, batch_id, path):
directory = os.path.join(path, f"{rank}")
filename = f"{batch_id}.pth"
return os.path.join(directory, filename)
def save_restart(restart, rank, batch_id, path):
path = get_checkpoint_path(rank, batch_id, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
logger.info(f"Saving restart file to {path}.")
torch.save(restart, path)
def run_ensembles(
*,
n_steps: int,
weather_event,
model,
perturb,
nc,
domains,
ds,
n_ensemble: int,
batch_size: int,
device: str,
rank: int,
output_frequency: int,
output_grid: Optional[Grid],
date_obj: datetime,
restart_frequency: Optional[int],
output_path: str,
restart_initial_directory: str = "",
progress: bool = True,
):
if not output_grid:
output_grid = model.grid
regridder = regrid.get_regridder(model.grid, output_grid).to(device)
# TODO infer this from the model
ds = ds.astype(np.float32)
assert not np.any(np.isnan(ds))
if output_grid == model.grid:
lat = ds.lat.values
lon = ds.lon.values
else:
lat, lon = regridder.lat, regridder.lon
diagnostics = initialize_netcdf(
nc, domains, output_grid, lat, lon, n_ensemble, device
)
time = convert_to_datetime(ds.time[-1])
time_units = time.strftime("hours since %Y-%m-%d %H:%M:%S")
nc["time"].units = time_units
nc["time"].calendar = "standard"
for batch_id in range(0, n_ensemble, batch_size):
logger.info(f"ensemble members {batch_id+1}-{batch_id+batch_size}/{n_ensemble}")
time = convert_to_datetime(ds.time[-1])
batch_size = min(batch_size, n_ensemble - batch_id)
x = torch.from_numpy(ds.values)[None].to(device)
x = model.normalize(x)
x = x.repeat(batch_size, 1, 1, 1, 1)
perturb(x, rank, batch_id, device)
# restart_dir = weather_event.properties.restart
# TODO: figure out if needed
# if restart_dir:
# path = get_checkpoint_path(rank, batch_id, restart_dir)
# # TODO use logger
# logger.info(f"Loading from restart from {path}")
# kwargs = torch.load(path)
# else:
# kwargs = dict(
# x=x,
# normalize=False,
# time=time,
# )
iterator = model(time, x, normalize=False)
# Check if stdout is connected to a terminal
if sys.stderr.isatty() and progress:
iterator = tqdm.tqdm(iterator, total=n_steps)
time_count = -1
# for time, data, restart in iterator:
for k, (time, data, _) in enumerate(iterator):
# if restart_frequency and k % restart_frequency == 0:
# save_restart(
# restart,
# rank,
# batch_id,
# path=os.path.join(output_path, "restart", time.isoformat()),
# )
# Saving the output
if output_frequency and k % output_frequency == 0:
time_count += 1
logger.debug(f"Saving data at step {k} of {n_steps}.")
nc["time"][time_count] = cftime.date2num(time, nc["time"].units)
update_netcdf(
regridder(data),
diagnostics,
domains,
batch_id,
time_count,
model,
lat,
lon,
ds.channel,
)
if k == n_steps:
break
# if restart_frequency is not None:
# save_restart(
# restart,
# rank,
# batch_id,
# path=os.path.join(output_path, "restart", "end"),
# )
finalize_netcdf(diagnostics, nc, domains, weather_event, model.channel_set)
def main(config=None):
logging.basicConfig(level=logging.INFO)
if config is None:
parser = argparse.ArgumentParser()
parser.add_argument("config")
parser.add_argument("--weather_model", default=None)
args = parser.parse_args()
config = args.config
# If config is a file
if os.path.exists(config):
config: EnsembleRun = EnsembleRun.parse_file(config)
# If string, assume JSON string
elif isinstance(config, str):
config: EnsembleRun = EnsembleRun.parse_obj(json.loads(config))
# Otherwise assume parsable obj
else:
raise ValueError(
f"Passed config parameter {config} should be valid file or JSON string"
)
# if args and args.weather_model:
# config.weather_model = args.weather_model
# Set up parallel
DistributedManager.initialize()
device = DistributedManager().device
group = torch.distributed.group.WORLD
logging.info(f"Earth-2 MIP config loaded {config}")
logging.info(f"Loading model onto device {device}")
model = get_model(config.weather_model, device=device)
logging.info(f"Constructing initializer data source")
perturb = get_initializer(
model,
config,
)
logging.info(f"Running inference")
run_inference(model, config, perturb, group)
def get_initializer(
model,
config,
):
def perturb(x, rank, batch_id, device):
shape = x.shape
if config.perturbation_strategy == PerturbationStrategy.gaussian:
noise = config.noise_amplitude * torch.normal(
torch.zeros(shape), torch.ones(shape)
).to(device)
elif config.perturbation_strategy == PerturbationStrategy.correlated:
noise = generate_noise_correlated(
shape,
reddening=config.noise_reddening,
device=device,
noise_amplitude=config.noise_amplitude,
)
elif config.perturbation_strategy == PerturbationStrategy.bred_vector:
noise = generate_bred_vector(
x,
model,
config.noise_amplitude,
time=config.weather_event.properties.start_time,
)
if rank == 0 and batch_id == 0: # first ens-member is deterministic
noise[0, :, :, :, :] = 0
x += noise
return x
return perturb
def run_basic_inference(model: time_loop.TimeLoop, n: int, data_source, time):
"""Run a basic inference"""
ds = data_source[time].sel(channel=model.in_channel_names)
# TODO make the dtype flexible
x = torch.from_numpy(ds.values).cuda().type(torch.float)
# need a batch dimension of length 1
x = x[None]
arrays = []
times = []
for k, (time, data, _) in enumerate(model(time, x)):
arrays.append(data.cpu().numpy())
times.append(time)
if k == n:
break
stacked = np.stack(arrays)
coords = {**ds.coords}
coords["channel"] = model.out_channel_names
coords["time"] = times
return xarray.DataArray(
stacked, dims=["time", "history", "channel", "lat", "lon"], coords=coords
)
def run_inference(
model: Inference,
config: EnsembleRun,
perturb: Any = None,
group: Any = None,
progress: bool = True,
# TODO add type hints
data_source: Any = None,
):
"""Run an ensemble inference for a given config and a perturb function
Args:
group: the torch distributed group to use for the calculation
progress: if True use tqdm to show a progress bar
data_source: a Mapping object indexed by datetime and returning an
xarray.Dataset object.
"""
if not perturb:
perturb = get_initializer(model, config)
if not group and torch.distributed.is_initialized():
group = torch.distributed.group.WORLD
weather_event = config.get_weather_event()
if not data_source:
data_source = initial_conditions.get_data_source(
model.n_history,
model.grid,
model.channel_set,
initial_condition_source=weather_event.properties.initial_condition_source,
netcdf=weather_event.properties.netcdf,
)
ds = data_source[weather_event.properties.start_time]
dist = DistributedManager()
n_ensemble_global = config.ensemble_members
n_ensemble = n_ensemble_global // dist.world_size
if n_ensemble == 0:
logger.warning("World size is larger than global number of ensembles.")
n_ensemble = n_ensemble_global
# Set random seed
seed = config.seed
torch.manual_seed(seed + dist.rank)
np.random.seed(seed + dist.rank)
date_obj = convert_to_datetime(ds.time[-1])
if config.output_dir:
date_str = "{:%Y_%m_%d_%H_%M_%S}".format(date_obj)
name = weather_event.properties.name
output_path = (
f"{config.output_dir}/"
f"Output.{config.weather_model}."
f"{name}.{date_str}"
)
else:
output_path = config.output_path
if not os.path.exists(output_path):
# Avoid race condition across ranks
os.makedirs(output_path, exist_ok=True)
if dist.rank == 0:
# Only rank 0 copies config files over
config_path = os.path.join(output_path, "config.json")
with open(config_path, "w") as f:
f.write(config.json())
model.to(dist.device)
group_rank = torch.distributed.get_group_rank(group, dist.rank)
output_file_path = os.path.join(output_path, f"ensemble_out_{group_rank}.nc")
with DS(output_file_path, "w", format="NETCDF4") as nc:
# assign global attributes
nc.model = config.weather_model
nc.config = config.json()
nc.weather_event = weather_event.json()
nc.date_created = datetime.now().isoformat()
nc.history = " ".join(sys.argv)
nc.institution = "NVIDIA"
nc.Conventions = "CF-1.10"
run_ensembles(
weather_event=weather_event,
model=model,
perturb=perturb,
nc=nc,
domains=weather_event.domains,
ds=ds,
n_ensemble=n_ensemble,
n_steps=config.simulation_length,
output_frequency=config.output_frequency,
batch_size=config.ensemble_batch_size,
rank=dist.rank,
device=dist.device,
date_obj=date_obj,
restart_frequency=config.restart_frequency,
output_path=output_path,
output_grid=config.output_grid,
progress=progress,
)
if torch.distributed.is_initialized():
torch.distributed.barrier(group)
logger.info(f"Ensemble forecast finished, saved to: {output_file_path}")
if __name__ == "__main__":
main()
| earth2mip-main | earth2mip/inference_ensemble.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create-read-update-delete (CRUD) operations for the FCN model registry
The location of the registry is configured using `config.MODEL_REGISTRY`. Both
s3:// and local paths are supported.
The top-level structure of the registry is like this::
afno_26ch_v/
baseline_afno_26/
gfno_26ch_sc3_layers8_tt64/
hafno_baseline_26ch_edim512_mlp2/
modulus_afno_20/
sfno_73ch/
tfno_no-patching_lr5e-4_full_epochs/
The name of the model is the folder name. Each of these folders has the
following structure::
sfno_73ch/about.txt # optional information (e.g. source path)
sfno_73ch/global_means.npy
sfno_73ch/global_stds.npy
sfno_73ch/weights.tar # model checkpoint
sfno_73ch/metadata.json
The `metadata.json` file contains data necessary to use the model for forecasts::
{
"architecture": "sfno_73ch",
"n_history": 0,
"channel_set": "73var",
"grid": "721x1440",
"in_channels": [
0,
1
],
"out_channels": [
0,
1
]
}
Its schema is provided by the :py:class:`earth2mip.schema.Model`.
The checkpoint file `weights.tar` should have a dictionary of model weights and
parameters in the `model_state` key. For backwards compatibility with FCN
checkpoints produced as of March 1, 2023 the keys should include prefixed
`module.` prefix. This checkpoint format may change in the future.
Scoring FCNs under active development
-------------------------------------
One can use fcn-mip to score models not packaged in fcn-mip using a metadata
file like this::
{
"architecture": "pickle",
...
}
This will load ``weights.tar`` using `torch.load`. This is not recommended for
long-time archival of model checkpoints but does allow scoring models under
active development. Once a reasonable skill is achieved the model's source code
can be stabilized and packaged within fcn-mip for long-term archival.
"""
import os
from earth2mip import schema
from earth2mip import filesystem
METADATA = "metadata.json"
class Package:
"""A model package
Simple file system operations and quick metadata access
"""
def __init__(self, root: str, seperator: str):
self.root = root
self.seperator = seperator
def get(self, path, recursive: bool = False):
return filesystem.download_cached(self._fullpath(path), recursive=recursive)
def _fullpath(self, path):
return self.root + self.seperator + path
def metadata(self) -> schema.Model:
metadata_path = self._fullpath(METADATA)
local_path = filesystem.download_cached(metadata_path)
with open(local_path) as f:
return schema.Model.parse_raw(f.read())
class ModelRegistry:
SEPERATOR: str = "/"
def __init__(self, path: str):
self.path = path
def list_models(self):
return [os.path.basename(f) for f in filesystem.ls(self.path)]
def get_model(self, name: str):
return Package(self.get_path(name), seperator=self.SEPERATOR)
def get_path(self, name, *args):
return self.SEPERATOR.join([self.path, name, *args])
def get_model_path(self, name: str):
return self.get_path(name)
def get_weight_path(self, name: str):
return self.get_path(name, "weights.tar")
def get_scale_path(self, name: str):
return self.get_path(name, "global_stds.npy")
def get_center_path(self, name: str):
return self.get_path(name, "global_means.npy")
def put_metadata(self, name: str, metadata: schema.Model):
metadata_path = self.get_path(name, METADATA)
filesystem.pipe(metadata_path, metadata.json().encode())
def get_metadata(self, name: str) -> schema.Model:
return self.get_model(name).metadata()
| earth2mip-main | earth2mip/model_registry.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Mapping, Any
import pydantic
from earth2mip import weather_events
from earth2mip.weather_events import InitialConditionSource, WeatherEvent
from enum import Enum
import datetime
__all__ = ["InitialConditionSource", "WeatherEvent"]
class Grid(Enum):
grid_721x1440 = "721x1440"
grid_720x1440 = "720x1440"
s2s_challenge = "s2s"
@property
def shape(self):
if self == Grid.grid_721x1440:
return (721, 1440)
elif self == Grid.grid_720x1440:
return (720, 1440)
elif self == Grid.s2s_challenge:
return (181, 360)
else:
raise ValueError(f"Unknown grid {self}")
# Enum of channels
class ChannelSet(Enum):
"""An Enum of standard sets of channels
These correspond to the post-processed outputs in .h5 files like this:
73var: /lustre/fsw/sw_climate_fno/test_datasets/73var-6hourly
34var: /lustre/fsw/sw_climate_fno/34Vars
This concept is needed to map from integer channel numbers (e.g. [0, 1, 2]
to physical variables).
"""
var34 = "34var"
var73 = "73var"
var_pangu = "var_pangu"
def list_channels(self) -> List[str]:
"""List channel names corresponding to the vocabulary"""
return _channels[self]
_channels = {
ChannelSet.var73: [
"u10m",
"v10m",
"u100m",
"v100m",
"t2m",
"sp",
"msl",
"tcwv",
"u50",
"u100",
"u150",
"u200",
"u250",
"u300",
"u400",
"u500",
"u600",
"u700",
"u850",
"u925",
"u1000",
"v50",
"v100",
"v150",
"v200",
"v250",
"v300",
"v400",
"v500",
"v600",
"v700",
"v850",
"v925",
"v1000",
"z50",
"z100",
"z150",
"z200",
"z250",
"z300",
"z400",
"z500",
"z600",
"z700",
"z850",
"z925",
"z1000",
"t50",
"t100",
"t150",
"t200",
"t250",
"t300",
"t400",
"t500",
"t600",
"t700",
"t850",
"t925",
"t1000",
"r50",
"r100",
"r150",
"r200",
"r250",
"r300",
"r400",
"r500",
"r600",
"r700",
"r850",
"r925",
"r1000",
],
ChannelSet.var_pangu: [
"z1000",
"z925",
"z850",
"z700",
"z600",
"z500",
"z400",
"z300",
"z250",
"z200",
"z150",
"z100",
"z50",
"q1000",
"q925",
"q850",
"q700",
"q600",
"q500",
"q400",
"q300",
"q250",
"q200",
"q150",
"q100",
"q50",
"t1000",
"t925",
"t850",
"t700",
"t600",
"t500",
"t400",
"t300",
"t250",
"t200",
"t150",
"t100",
"t50",
"u1000",
"u925",
"u850",
"u700",
"u600",
"u500",
"u400",
"u300",
"u250",
"u200",
"u150",
"u100",
"u50",
"v1000",
"v925",
"v850",
"v700",
"v600",
"v500",
"v400",
"v300",
"v250",
"v200",
"v150",
"v100",
"v50",
"msl",
"u10m",
"v10m",
"t2m",
],
ChannelSet.var34: [
"u10m",
"v10m",
"t2m",
"sp",
"msl",
"t850",
"u1000",
"v1000",
"z1000",
"u850",
"v850",
"z850",
"u500",
"v500",
"z500",
"t500",
"z50",
"r500",
"r850",
"tcwv",
"u100m",
"v100m",
"u250",
"v250",
"z250",
"t250",
"u100",
"v100",
"z100",
"t100",
"u900",
"v900",
"z900",
"t900",
],
}
class InferenceEntrypoint(pydantic.BaseModel):
"""
Attrs:
name: an entrypoint string like ``my_package:model_entrypoint``.
this points to a function ``model_entrypoint(package)`` which returns an
``Inference`` object given a package
kwargs: the arguments to pass to the constructor
"""
name: str = ""
kwargs: Mapping[Any, Any] = pydantic.Field(default_factory=dict)
class Model(pydantic.BaseModel):
"""Metadata for using a ERA5 time-stepper model
Attrs:
entrypoint: if provided, will be used to load a custom time-loop
implementation.
"""
n_history: int = 0
channel_set: ChannelSet = ChannelSet.var34
grid: Grid = Grid.grid_720x1440
in_channels: List[int] = pydantic.Field(default_factory=list)
out_channels: List[int] = pydantic.Field(default_factory=list)
architecture: str = ""
architecture_entrypoint: str = ""
time_step: datetime.timedelta = datetime.timedelta(hours=6)
entrypoint: Optional[InferenceEntrypoint] = None
class PerturbationStrategy(Enum):
correlated = "correlated"
gaussian = "gaussian"
bred_vector = "bred_vector"
class EnsembleRun(pydantic.BaseModel):
"""A configuration for running an ensemble weather forecast
Attributes:
weather_model: The name of the fully convolutional neural network (FCN) model to use for the forecast.
ensemble_members: The number of ensemble members to use in the forecast.
noise_amplitude: The amplitude of the Gaussian noise to add to the initial conditions.
noise_reddening: The noise reddening amplitude, 2.0 was the defualt set by A.G. work.
simulation_length: The length of the simulation in timesteps.
output_frequency: The frequency at which to write the output to file, in timesteps.
use_cuda_graphs: Whether to use CUDA graphs to optimize the computation.
seed: The random seed for the simulation.
ensemble_batch_size: The batch size to use for the ensemble.
autocast_fp16: Whether to use automatic mixed precision (AMP) with FP16 data types.
perturbation_strategy: The strategy to use for perturbing the initial conditions.
forecast_name (optional): The name of the forecast to use (alternative to `weather_event`).
weather_event (optional): The weather event to use for the forecast (alternative to `forecast_name`).
output_dir (optional): The directory to save the output files in (alternative to `output_path`).
output_path (optional): The path to the output file (alternative to `output_dir`).
restart_frequency: if provided save at end and at the specified frequency. 0 = only save at end.
""" # noqa
weather_model: str
simulation_length: int
# TODO make perturbation_strategy an Enum (see ChannelSet)
perturbation_strategy: PerturbationStrategy = PerturbationStrategy.correlated
single_value_perturbation: bool = True
noise_reddening: float = 2.0
noise_amplitude: float = 0.05
output_frequency: int = 1
output_grid: Optional[Grid] = None
ensemble_members: int = 1
seed: int = 1
ensemble_batch_size: int = 1
# alternatives for specifiying forecast
forecast_name: Optional[str] = None
weather_event: Optional[weather_events.WeatherEvent] = None
# alternative for specifying output
output_dir: Optional[str] = None
output_path: Optional[str] = None
restart_frequency: Optional[int] = None
def get_weather_event(self) -> weather_events.WeatherEvent:
if self.forecast_name:
return weather_events.read(self.forecast_name)
else:
return self.weather_event
| earth2mip-main | earth2mip/schema.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netCDF4 as nc
import einops
import torch
import pathlib
from earth2mip.schema import Grid
from earth2mip._config import Settings
class TempestRegridder(torch.nn.Module):
def __init__(self, file_path):
super().__init__()
dataset = nc.Dataset(file_path)
self.lat = dataset["latc_b"][:]
self.lon = dataset["lonc_b"][:]
i = dataset["row"][:] - 1
j = dataset["col"][:] - 1
M = dataset["S"][:]
i = i.data
j = j.data
M = M.data
self.M = torch.sparse_coo_tensor((i, j), M, [max(i) + 1, max(j) + 1]).float()
def to(self, device):
self.M = self.M.to(device)
return self
def forward(self, x):
xr = einops.rearrange(x, "b c x y -> b c (x y)")
yr = xr @ self.M.T
y = einops.rearrange(
yr, "b c (x y) -> b c x y", x=self.lat.size, y=self.lon.size
)
return y
class Identity(torch.nn.Module):
def forward(self, x):
return x
def _get_tempest_regridder(src: Grid, dest: Grid) -> TempestRegridder:
# TODO map data needs to be available for S2S scoring
config = Settings()
# TODO add instructions for how to make the tempest map file
map_file = (
pathlib.Path(config.MAP_FILES) / src.value / dest.value / "tempest_map.nc"
)
return TempestRegridder(map_file.as_posix())
def get_regridder(src: Grid, dest: Grid):
if src == dest:
return Identity()
else:
return _get_tempest_regridder(src, dest)
raise NotImplementedError()
| earth2mip-main | earth2mip/regrid.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for opening fcn-mip hindcast outputs
"""
import datetime
import os
import json
import xarray
from zarr.storage import FSStore
import pandas as pd
from earth2mip import filesystem
from earth2mip.datasets.zarr_directory import NestedDirectoryStore
def open_forecast(root, group, chunks=None):
"""Open a fcn-mip forecast as single xarray object
The directory structure should contain items like this:
{root}/2018-01-01T00:00:00/mean.zarr/
{root}/2018-01-02T00:00:00/mean.zarr/
"""
if isinstance(root, str):
map_ = FSStore(url=root)
else:
map_ = root
config_path = os.path.join(root, "config.json")
local_config = filesystem.download_cached(config_path)
with open(local_config) as f:
config = json.load(f)
items = config["protocol"]["times"]
times = []
for f in items:
try:
datetime.datetime.fromisoformat(f)
except ValueError:
pass
else:
times.append(f)
times = sorted(times)
store = NestedDirectoryStore(
map=map_,
group=group,
directories=items,
concat_dim="initial_time",
static_coords=("lat", "lon"),
dim_rename={"time": "lead_time"},
)
# TODO this only works locally
example = xarray.open_zarr(os.path.join(root, f"{items[0]}/{group}"))
ds = xarray.open_zarr(store, chunks=None).assign_coords(
{dim: example[dim] for dim in store.static_coords}
)
ds["initial_time"] = pd.to_datetime(ds.initial_time)
if "time" in example.variables:
ds["lead_time"] = (ds.time - ds.initial_time).isel(initial_time=0)
ds = ds.rename(time="valid_time", lead_time="time")
return ds
| earth2mip-main | earth2mip/datasets/hindcast.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| earth2mip-main | earth2mip/datasets/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections.abc import MutableMapping
import xarray
class NestedDirectoryStore(MutableMapping):
"""Reads data like this::
{root}/a/{group}/{variable}
{root}/b/{group}/{variable}
The data are assumed to have identical shape and chunking and require a
.zmetadata file. This store maps
{root}/a/{group}/{variable}/0 -> {group}/{variable}/0.0
"""
def __init__(
self,
map,
directories,
group,
concat_dim="initial_time",
static_coords=(),
dim_rename=None,
):
"""
Args:
map: a mutable mapping to base off
directories: a list of directories containing identical data
concat_dim: ``directories`` will be put in a dimension named ``concat_dim``
static_coords: A list of variables that should not be
concated...read from the first example.
dim_rename: if provided rename the dimensions of the source data
"""
self._map = map
self.concat_dim = concat_dim
self.group = group
self.dim_rename = dim_rename or {}
self.static_coords = static_coords
self._local = {}
self.directories = directories
ds = xarray.Dataset()
ds[self.concat_dim] = [self.concat_dim], directories
ds.to_zarr(self._local)
def _get_new_key_chunk(self, k):
chunk = k.split("/")[-1]
variable = "/".join(k.split("/")[:-1])
index, *sub_chunks = chunk.split(".")
time = self.directories[int(index)]
new_chunk = ".".join(sub_chunks)
full_path = f"{time}/{self.group}/{variable}/{new_chunk}"
return full_path
def _get_new_key(self, k):
if os.path.basename(k) == ".zarray":
time = self.directories[0]
# TODO parameterize "mean.zarr" or map it to a "group"
full_path = f"{time}/{self.group}/{os.path.dirname(k)}/.zarray"
return full_path
elif os.path.basename(k) == ".zattrs":
time = self.directories[0]
full_path = f"{time}/{self.group}/{os.path.dirname(k)}/.zarray"
return full_path
elif os.path.basename(k) == ".zgroup":
time = self.directories[0]
full_path = f"{time}/{self.group}/.zgroup"
return full_path
elif os.path.basename(k) == ".zmetadata":
return k
else:
return self._get_new_key_chunk(k)
def _modify_zarray(self, v):
config = json.loads(v)
config["chunks"] = [1, *config["chunks"]]
config["shape"] = [len(self.directories), *config["shape"]]
return json.dumps(config)
def _modify_zattrs(self, v):
config = json.loads(v)
xarray_dim_name = "_ARRAY_DIMENSIONS"
dims = config.get(xarray_dim_name, [])
dims_renamed = [self.dim_rename.get(dim, dim) for dim in dims]
config[xarray_dim_name] = [self.concat_dim, *dims_renamed]
return json.dumps(config)
def __getitem__(self, k):
if k.startswith(self.concat_dim):
return self._local[k]
key = self._get_new_key(k)
if os.path.basename(k) == ".zarray":
return self._modify_zarray(self._map[key])
elif os.path.basename(k) == ".zgroup":
return self._map[key]
elif os.path.basename(k) == ".zmetadata":
return json.dumps(self._get_metadata())
else:
return self._map[key]
def __delitem__(self, k):
k = self._get_new_key(self, k)
del self._map[k]
def __iter__(self):
raise NotImplementedError()
def __contains__(self, k):
return (
(self._get_new_key(k) in self._map)
or (os.path.basename(k) == ".zmetadata")
or (k.startswith(self.concat_dim) and k in self._local)
)
def __len__(self, k):
raise NotImplementedError()
def __setitem__(self, k, v):
k = self._get_new_key(k)
self._map[k] = v
def _get_metadata(self):
meta = self._map[os.path.join(self.directories[0], self.group, ".zmetadata")]
meta = json.loads(meta)
assert meta["zarr_consolidated_format"] == 1
metadata_dict = meta["metadata"]
# use same class to modify the .zarray and other data
new_meta = {}
for k in self._local:
if os.path.basename(k) == ".zarray":
new_meta[k] = self._local[k].decode()
elif os.path.basename(k) == (".zattrs"):
new_meta[k] = self._local[k].decode()
for key in metadata_dict:
if os.path.dirname(key) in self.static_coords:
continue
if os.path.basename(key) == ".zarray":
new_meta[key] = self._modify_zarray(json.dumps(metadata_dict[key]))
elif os.path.basename(key) == (".zattrs"):
new_meta[key] = self._modify_zattrs(json.dumps(metadata_dict[key]))
else:
new_meta[key] = json.dumps(metadata_dict[key])
return {"metadata": new_meta, "zarr_consolidated_format": 1}
| earth2mip-main | earth2mip/datasets/zarr_directory.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
METADATA34 = pathlib.Path(__file__).parent / "data34var.json"
METADATA73 = pathlib.Path(__file__).parent / "data73var.json"
| earth2mip-main | earth2mip/datasets/gfs/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import os
import datetime
def filename_to_year(path: str) -> int:
filename = os.path.basename(path)
return int(filename[:4])
def datetime_range(
year: int, time_step: datetime.timedelta, n: int
) -> List[datetime.datetime]:
initial_time = datetime.datetime(year=year, month=1, day=1)
return [initial_time + time_step * i for i in range(n)]
| earth2mip-main | earth2mip/datasets/era5/time.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import datetime
import glob
import json
import os
import pathlib
import tempfile
from typing import Any, Iterable, Optional
import h5py
import numpy as np
import xarray
from earth2mip.datasets.era5 import time
__all__ = ["open_34_vars", "open_hdf5"]
METADATA = pathlib.Path(__file__).parent / "data.json"
def open_hdf5(*, path, f=None, metadata, time_step=datetime.timedelta(hours=6)):
dims = metadata["dims"]
h5_path = metadata["h5_path"]
ds = xarray.open_dataset(f or path, engine="h5netcdf", phony_dims="sort")
array = ds[h5_path]
ds = array.rename(dict(zip(array.dims, dims)))
year = time.filename_to_year(path)
n = array.shape[0]
ds = ds.assign_coords(
time=time.datetime_range(year, time_step=time_step, n=n), **metadata["coords"]
)
ds = ds.assign_attrs(metadata["attrs"], path=path)
return ds
@contextlib.contextmanager
def open_all_hdf5(root: str) -> Iterable[xarray.DataArray]:
"""A context manager to open hdf5 ERA5 data as a single logical xarray
Args:
root: A **local** directory where the dataset is stored. Metadata should
be stored at ``root/data.json``. HDF5 data will be read from
subdirectories, typically ``train``, ``test``, and
``out_of_sample``.
Returns:
an xarray dataset
"""
try:
metadata_path = pathlib.Path(root) / "data.json"
metadata = json.loads(metadata_path.read_text())
except FileNotFoundError:
metadata = json.loads(METADATA.read_text())
with tempfile.NamedTemporaryFile("wb") as f:
_create_virtual_dataset(root, f.name)
with xarray.open_dataset(f.name, chunks=None) as ds:
dims = ["year", "step"] + metadata["dims"][1:]
ds = ds.rename(dict(zip(ds.dims, dims)))
step = np.timedelta64(6, "h") * np.arange(ds.sizes["step"])
ds = ds.assign_coords(step=step).assign_coords(metadata["coords"])
yield ds.fields
def _create_virtual_dataset(root: str, virtual_dataset_path: str):
file_paths = glob.glob(root + "/*/*.h5")
file_paths = sorted(file_paths, key=os.path.basename)
# Open the first file to extract the dataset shape
with h5py.File(file_paths[0], "r") as f:
dataset_shape = f["fields"].shape
# Create the virtual dataset
with h5py.File(virtual_dataset_path, "w", libver="latest") as f:
# Define the virtual dataset layout
layout = h5py.VirtualLayout(shape=(len(file_paths),) + dataset_shape, dtype="f")
year_d = f.create_dataset("year", shape=len(file_paths), dtype="i")
for i, file_path in enumerate(file_paths):
# Define the virtual source dataset
source = h5py.VirtualSource(file_path, "fields", shape=dataset_shape)
# Assign the virtual source dataset to the virtual layout
layout[i, ...] = source
filename = os.path.basename(file_path)
base, _ = os.path.splitext(filename)
year_d[i] = int(base)
# Create the virtual dataset
f.create_virtual_dataset("fields", layout)
def open_34_vars(path: str, f: Optional[Any] = None) -> xarray.DataArray:
"""Open 34Vars hdf5 file
Args:
path: local path to hdf5 file
f: an optional file-like object to load the data from. Useful for
remote data and fsspec.
Examples:
>>> import earth2mip.datasets
>>> path = "/out_of_sample/2018.h5"
>>> datasets.era5.open_34_vars(path)
<xarray.DataArray 'fields' (time: 1460, channel: 34, lat: 721, lon: 1440)>
dask.array<array, shape=(1460, 34, 721, 1440), dtype=float32, chunksize=(1, 1, 721, 1440), chunktype=numpy.ndarray> # noqa
Coordinates:
* time (time) datetime64[ns] 2018-01-01 ... 2018-12-31T18:00:00
* lat (lat) float64 90.0 89.75 89.5 89.25 ... -89.25 -89.5 -89.75 -90.0
* lon (lon) float64 0.0 0.25 0.5 0.75 1.0 ... 359.0 359.2 359.5 359.8
* channel (channel) <U5 'u10' 'v10' 't2m' 'sp' ... 'v900' 'z900' 't900'
Attributes:
selene_path: /lustre/fsw/sw_climate_fno/34Var
description: ERA5 data at 6 hourly frequency with snapshots at 0000, 060...
path: /out_of_sample/2018.h5
"""
metadata = json.loads(METADATA.read_text())
return open_hdf5(path=path, f=f, metadata=metadata)
| earth2mip-main | earth2mip/datasets/era5/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %%
import xarray
import numpy as np
from earth2mip.datasets.hindcast import open_forecast
__all__ = ["apply", "compute_edges"]
def cleanup_metadata(fct_p):
fct_p = fct_p.drop("category_edge").rename(lat="latitude", lon="longitude")
fct_p = fct_p[["t2m"]]
fct_p = fct_p.sel(lead_time=[14, 28])
# TODO check why the latitude is off
fct_p = fct_p.reindex(
latitude=np.linspace(90, -90, fct_p.sizes["latitude"]),
longitude=np.linspace(0, 360, fct_p.sizes["longitude"], endpoint=False),
)
fct_p["lead_time"] = fct_p.lead_time * np.timedelta64(1, "D")
translate = np.vectorize(lambda x: "near normal" if x == "normal" else x)
fct_p["category"] = translate(fct_p.category)
return fct_p
def apply(path, tercile_edges, output):
ens = open_forecast(
path, group="ensemble.zarr", chunks={"initial_time": 1, "ensemble": 1}
)
# %% [markdown]
# Moderately expensive: run time = 90 seconds
#
# %%
tercile = xarray.open_dataset(tercile_edges)
terciles_as_forecast = tercile.sel(week=ens.initial_time.dt.week)
terciles_as_forecast
# %%
below_normal = (ens < terciles_as_forecast.isel(category_edge=0)).mean("ensemble")
above_normal = (ens >= terciles_as_forecast.isel(category_edge=1)).mean("ensemble")
normal = 1 - below_normal - above_normal
terciled = xarray.concat(
[below_normal, normal, above_normal],
dim=xarray.Variable("category", ["below normal", "normal", "above normal"]),
coords="minimal",
compat="override",
)
print(terciled)
# rename to match contest metadata
terciled = terciled.rename(initial_time="forecast_time")
terciled["lead_time"] = 14 * terciled.lead_time
terciled = cleanup_metadata(terciled)
terciled.to_netcdf(output)
def compute_edges(path, output):
ds = open_forecast(
path,
group="ensemble.zarr",
chunks={"initial_time": -1, "ensemble": -1, "lon": "auto"},
)
ds = ds[["t2m"]]
tercile = (
ds.groupby(ds.initial_time.dt.week)
.quantile(q=[1.0 / 3.0, 2.0 / 3.0], dim=["initial_time", "ensemble"])
.rename({"quantile": "category_edge"})
.astype("float32")
)
tercile = tercile.load()
tercile.to_netcdf(output)
| earth2mip-main | earth2mip/s2s/terciles.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip.s2s import terciles # noqa
from earth2mip.s2s import score # noqa
| earth2mip-main | earth2mip/s2s/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xarray
import xskillscore
import numpy as np
import pandas as pd
import altair as alt
def score(terciles: xarray.Dataset, truth: xarray.Dataset) -> pd.DataFrame:
"""area-weighted RPSS scores
Regions: global, northern hemisphere, and southern hemisphere
Args:
terciles: predicted terciles. must have `longitude`, `latitude`, `forecast_time` coordinates/dims.
truth: true terciles. same format as terciles
Returns:
dataframe with scores for different regions and climatology. Example::
lead_time forecast_time t2m valid_time week source region tp
0 21.0 2018-01-02 1.455134 2018-01-23 1.0 sfno Global NaN
1 35.0 2018-01-02 1.357457 2018-02-06 1.0 sfno Global NaN
2 21.0 2018-01-02 1.308716 2018-01-23 NaN clim Global 1.310107
3 35.0 2018-01-02 1.306281 2018-02-06 NaN clim Global 1.312259
4 21.0 2018-01-02 1.331612 2018-01-23 1.0 sfno Northern Hem. NaN
5 35.0 2018-01-02 1.211101 2018-02-06 1.0 sfno Northern Hem. NaN
6 21.0 2018-01-02 1.184829 2018-01-23 NaN clim Northern Hem. 1.237482
7 35.0 2018-01-02 1.180459 2018-02-06 NaN clim Northern Hem. 1.241959
8 21.0 2018-01-02 1.575785 2018-01-23 1.0 sfno Southern Hem. NaN
9 35.0 2018-01-02 1.497714 2018-02-06 1.0 sfno Southern Hem. NaN
10 21.0 2018-01-02 1.431765 2018-01-23 NaN clim Southern Hem. 1.381933
11 35.0 2018-01-02 1.430871 2018-02-06 NaN clim Southern Hem. 1.381818
""" # noqa
sfno = xarray.open_dataset(terciles)
obs = xarray.open_dataset(truth)
sfno, obs = xarray.align(sfno, obs)
assert obs.sizes["forecast_time"] > 0
clim = xarray.ones_like(obs) / 3.0
# %%
masks = {}
cos_lat = np.cos(np.deg2rad(obs.latitude))
masks["Global"] = cos_lat
masks["Northern Hem."] = cos_lat.where(obs.latitude > 0, 0.0)
masks["Southern Hem."] = cos_lat.where(obs.latitude < 0, 0.0)
scores = []
for mask in masks:
cos_lat = masks[mask]
iscores = {}
iscores["sfno"] = xskillscore.rps(
sfno,
obs,
category_edges=None,
input_distributions="p",
dim=["latitude", "longitude"],
weights=cos_lat,
)
iscores["clim"] = xskillscore.rps(
clim,
obs,
category_edges=None,
input_distributions="p",
dim=["latitude", "longitude"],
weights=cos_lat,
)
for key in iscores:
v = iscores[key]
scores.append(v.assign(source=key, region=mask))
df = pd.concat([d.to_dataframe() for d in scores])
df = df.reset_index()
df["lead_time"] = df.lead_time + pd.Timedelta("7D")
df["valid_time"] = df.lead_time + df.forecast_time
df["lead_time"] = df.lead_time / pd.Timedelta("1D")
return df
def plot(df, output, timeUnit="yearmonthdate"):
import vl_convert
alt.data_transformers.disable_max_rows()
# https://davidmathlogic.com/colorblind/#%23000000-%23E69F00-%2356B4E9-%23009E73-%23F0E442-%230072B2-%23D55E00-%23CC79A7
wong_palette = [
"#000000",
"#E69F00",
"#56B4E9",
"#009E73",
"#F0E442",
"#0072B2",
"#D55E00",
"#CC79A7",
]
# altair
def my_theme():
return {"config": {"range": {"category": wong_palette}}}
alt.themes.register("my_theme", my_theme)
alt.themes.enable("my_theme")
c = (
alt.Chart(df)
.mark_line()
.encode(
x=alt.X(field="valid_time", timeUnit=timeUnit),
y=alt.Y("mean(t2m)", title="RPSS T2M"),
column="region",
color="source",
row=alt.Row("lead_time", type="ordinal", title="Lead Time (days)"),
)
.properties(width=200, height=200 / 1.61)
)
png_data = vl_convert.vegalite_to_png(vl_spec=c.to_json(), scale=3)
with open(output, "wb") as f:
f.write(png_data)
| earth2mip-main | earth2mip/s2s/score.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from earth2mip import schema
from earth2mip.datasets.gfs import METADATA34, METADATA73
from modulus.utils.filesystem import LOCAL_CACHE
import json
import xarray
import numpy as np
import shutil
import pathlib
import os
import requests
import warnings
from dataclasses import dataclass
from typing import List, Union
from tqdm import tqdm
# Max byte check of any one field
# Will error if larger
MAX_BYTE_SIZE = 2000000
# Location to cache grib files
GFS_CACHE = LOCAL_CACHE + "/earth2mip/gfs"
@dataclass
class GFSChunk:
variable_name: str = "phoo"
meta_data: str = ""
start_byte: int = 0
end_byte: int = 0
@property
def byte_range(self) -> int:
return self.end_byte - self.start_byte
@property
def channel_id(self) -> str:
return ":".join([self.variable_name, self.meta_data])
def gfs_available(
time: datetime.datetime,
) -> bool:
nearest_hour = time.hour - time.hour % 6
time_gfs = datetime.datetime(time.year, time.month, time.day, nearest_hour)
index_url = (
f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/"
+ f'prod/gfs.{time_gfs.strftime("%Y%m%d")}/{time_gfs.strftime("%H")}'
+ f'/atmos/gfs.t{time_gfs.strftime("%H")}z.pgrb2.0p25.f000.idx'
)
try:
r = requests.get(index_url, timeout=5)
r.raise_for_status()
except requests.exceptions.RequestException:
return False
return True
def get_gfs_chunks(
time: datetime.datetime,
):
index_url = (
f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/"
+ f'prod/gfs.{time.strftime("%Y%m%d")}/{time.strftime("%H")}'
+ f'/atmos/gfs.t{time.strftime("%H")}z.pgrb2.0p25.f000.idx'
)
try:
r = requests.get(index_url, timeout=5)
r.raise_for_status()
except requests.exceptions.RequestException as e: # This is the correct syntax
raise SystemExit(e)
if len(r.text) == 0:
raise ValueError(f"Empty index file: {r.text}")
index_lines = r.text.splitlines()
index_lines = index_lines[:-1]
output = [GFSChunk()]
for i, line in enumerate(index_lines):
lsplit = line.split(":")
if len(lsplit) < 7:
continue
chunk = GFSChunk(
variable_name=lsplit[3],
meta_data=lsplit[4],
start_byte=int(lsplit[1]),
end_byte=None,
)
output.append(chunk)
# Update previous chunk with end position based on start of current chunk
output[-2].end_byte = int(lsplit[1]) - 1
if MAX_BYTE_SIZE < output[-2].byte_range:
raise ValueError(
"Byte range in index field found to be too large."
+ f" Parsed byte range {output[-2].byte_range}, max byte"
+ f" range {MAX_BYTE_SIZE}"
)
# Pop place holder
output.pop(0)
return output
def get_gfs_grib_file(
time: datetime.datetime,
gfs_chunks: List[GFSChunk],
channel_id: str,
output_file: str,
):
gfs_url = (
f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/"
+ f'prod/gfs.{time.strftime("%Y%m%d")}/{time.strftime("%H")}'
+ f'/atmos/gfs.t{time.strftime("%H")}z.pgrb2.0p25.f000'
)
# Get chunk data for this variable
gfs_chunk = None
for chunk in gfs_chunks:
if channel_id in chunk.channel_id:
gfs_chunk = chunk
break
if gfs_chunk is None:
raise ValueError(f"Variable {channel_id} not found in index")
start_str = str(gfs_chunk.start_byte) if gfs_chunk.start_byte else "0"
end_str = str(gfs_chunk.end_byte) if gfs_chunk.end_byte else ""
headers = {"Range": f"bytes={start_str}-{end_str}"}
# Send request to GFS
try:
with requests.get(gfs_url, headers=headers, stream=True, timeout=10) as r:
with open(f"{output_file}.tmp", "wb") as f:
shutil.copyfileobj(r.raw, f)
r.raise_for_status()
except requests.exceptions.RequestException as e:
raise SystemExit(e)
# Finally rename the file
try:
os.rename(f"{output_file}.tmp", f"{output_file}")
except FileNotFoundError:
raise FileNotFoundError(
f"{output_file}.tmp not found in GFS cache. "
+ "its likely failed to download"
)
def get(
time: Union[datetime.datetime, None],
channel_set: schema.ChannelSet,
) -> xarray.DataArray:
# If no time is provided, use current time
if time is None:
time = datetime.datetime.now()
# Check if most recent time is available, if not fall back 6 hours
if not gfs_available(time):
warnings.warn("Closest 6 hour interval not available, falling back 6 hours")
time = time - datetime.timedelta(hours=6)
nearest_hour = time.hour - time.hour % 6
time_gfs = datetime.datetime(time.year, time.month, time.day, nearest_hour)
if not gfs_available(time_gfs):
raise ValueError(
f"Nearest 6 hour time {time_gfs} is not available right now "
+ "(needs to be past 10 days)"
)
if channel_set == schema.ChannelSet.var34:
# move to earth2mip.channels
metadata = json.loads(METADATA34.read_text())
channels = metadata["coords"]["channel"]
gfs_channels = metadata["gfs_coords"]["channel"]
elif channel_set == schema.ChannelSet.var73:
# move to earth2mip.channels
metadata = json.loads(METADATA73.read_text())
channels = metadata["coords"]["channel"]
gfs_channels = metadata["gfs_coords"]["channel"]
else:
raise NotImplementedError(channel_set)
# Make temp grib folder
pathlib.Path(GFS_CACHE).mkdir(parents=True, exist_ok=True)
# Get index file
gfs_chunks = get_gfs_chunks(time_gfs)
# Loop through channels and download grib of each
print(f"Downloading {len(channels)} grib files:")
for idname, outname in zip(tqdm(gfs_channels), channels):
get_gfs_grib_file(time_gfs, gfs_chunks, idname, f"{GFS_CACHE}/{outname}.grb")
# Convert gribs to xarray dataset
data = np.empty((1, len(channels), 721, 1440))
gfsds = xarray.Dataset(
{"fields": (["time", "channel", "lat", "lon"], data)},
coords={
"time": [time_gfs],
"channel": metadata["coords"]["channel"],
"lat": metadata["coords"]["lat"],
"lon": metadata["coords"]["lon"],
},
)
print(f"Processing {len(channels)} grib files:")
for i, name in enumerate(tqdm(channels)):
ds = xarray.open_dataset(f"{GFS_CACHE}/{name}.grb", engine="cfgrib")
field = ds[list(ds.keys())[0]]
# If geopotential height multiply by gravity to get geopotential
if name[0] == "z":
field = field * 9.81
gfsds["fields"][0, i] = field
# Clean up
shutil.rmtree(GFS_CACHE)
return gfsds["fields"]
| earth2mip-main | earth2mip/initial_conditions/gfs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import json
import logging
import os
import warnings
from typing import Any
import s3fs
import xarray
import numpy as np
from earth2mip import config, filesystem, schema
from earth2mip.datasets import era5
__all__ = ["open_era5_xarray"]
logger = logging.getLogger(__name__)
# TODO move to earth2mip/datasets/era5?
@dataclasses.dataclass
class HDF5DataSource:
root: str
metadata: Any
n_history: int = 0
@classmethod
def from_path(cls, root: str, **kwargs: Any) -> "HDF5DataSource":
metadata_path = os.path.join(root, "data.json")
metadata_path = filesystem.download_cached(metadata_path)
with open(metadata_path) as mf:
metadata = json.load(mf)
return cls(root, metadata, **kwargs)
@property
def channel_names(self):
return self.metadata["coords"]["channel"]
@property
def time_means(self):
time_mean_path = os.path.join(self.root, "stats", "time_means.npy")
time_mean_path = filesystem.download_cached(time_mean_path)
return np.load(time_mean_path)
def __getitem__(self, time: datetime.datetime):
n_history = self.n_history
path = _get_path(self.root, time)
if path.startswith("s3://"):
fs = s3fs.S3FileSystem(
client_kwargs=dict(endpoint_url="https://pbss.s8k.io")
)
f = fs.open(path)
else:
f = None
logger.debug(f"Opening {path} for {time}.")
ds = era5.open_hdf5(path=path, f=f, metadata=self.metadata)
subset = ds.sel(time=slice(None, time))
# TODO remove n_history from this API?
subset = subset[-n_history - 1 :]
num_time = subset.sizes["time"]
if num_time != n_history + 1:
a = ds.time.min().values
b = ds.time.max().values
raise ValueError(
f"{num_time} found. Expected: {n_history + 1} ."
f"Time requested: {time}. Time range in data: {a} -- {b}."
)
return subset.load()
def _get_path(path: str, time) -> str:
filename = time.strftime("%Y.h5")
h5_files = filesystem.glob(os.path.join(path, "*/*.h5"))
files = {os.path.basename(f): f for f in h5_files}
return files[filename]
def open_era5_xarray(
time: datetime.datetime, channel_set: schema.ChannelSet
) -> xarray.DataArray:
warnings.warn(DeprecationWarning("This function will be removed"))
root = config.get_data_root(channel_set)
path = _get_path(root, time)
logger.debug(f"Opening {path} for {time}.")
if path.endswith(".h5"):
if path.startswith("s3://"):
fs = s3fs.S3FileSystem(
client_kwargs=dict(endpoint_url="https://pbss.s8k.io")
)
f = fs.open(path)
else:
f = None
if channel_set == schema.ChannelSet.var34:
ds = era5.open_34_vars(path, f=f)
else:
metadata_path = os.path.join(config.ERA5_HDF5_73, "data.json")
metadata_path = filesystem.download_cached(metadata_path)
with open(metadata_path) as mf:
metadata = json.load(mf)
ds = era5.open_hdf5(path=path, f=f, metadata=metadata)
elif path.endswith(".nc"):
ds = xarray.open_dataset(path).fields
return ds
| earth2mip-main | earth2mip/initial_conditions/era5.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import xarray
import json
from earth2mip import filesystem, schema, config
import logging
import numpy as np
import h5py
logger = logging.getLogger(__name__)
def _get_path(path: str, time) -> str:
filename = time.strftime("%Y.h5")
h5_files = filesystem.glob(os.path.join(path, "*.h5"))
files = {os.path.basename(f): f for f in h5_files}
return files[filename]
def _get_time(time: datetime.datetime) -> int:
day_of_year = time.timetuple().tm_yday - 1
hour_of_day = time.timetuple().tm_hour
hours_since_jan_01 = 24 * day_of_year + hour_of_day
return int(hours_since_jan_01 / 6)
def _get_hdf5(path: str, metadata, time: datetime.datetime) -> xarray.DataArray:
dims = metadata["dims"]
h5_path = metadata["h5_path"]
variables = []
ic = _get_time(time)
with h5py.File(path, "r") as f:
for nm in h5_path:
if nm == "pl":
pl = f[nm][ic : ic + 1]
elif nm == "sl":
sl = f[nm][ic : ic + 1]
assert "pl" in locals() and "sl" in locals()
pl_list = []
for var_idx in range(pl.shape[1]):
pl_list.append(pl[:, var_idx])
pl = np.concatenate(pl_list, axis=1) # pressure level vars flattened
data = np.concatenate([pl, sl], axis=1)
ds = xarray.DataArray(
data,
dims=["time", "channel", "lat", "lon"],
coords={
"time": [time],
"channel": metadata["coords"]["channel"],
"lat": metadata["coords"]["lat"],
"lon": metadata["coords"]["lon"],
},
name="fields",
)
return ds
def get(time: datetime.datetime, channel_set: schema.ChannelSet) -> xarray.DataArray:
root = config.get_data_root(channel_set)
path = _get_path(root, time)
logger.debug(f"Opening {path} for {time}.")
metadata_path = os.path.join(config.ERA5_HDF5_73, "data.json")
metadata_path = filesystem.download_cached(metadata_path)
with open(metadata_path) as mf:
metadata = json.load(mf)
ds = _get_hdf5(path=path, metadata=metadata, time=time)
return ds
| earth2mip-main | earth2mip/initial_conditions/hrmip.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from earth2mip import config
import xarray
import datetime
from earth2mip import schema
import joblib
import numpy as np
from earth2mip.initial_conditions.era5 import open_era5_xarray, HDF5DataSource
from earth2mip.initial_conditions import ifs
from earth2mip.initial_conditions import cds
from earth2mip.initial_conditions import gfs
from earth2mip.initial_conditions import hrmip
# TODO remove this fcn-mip import
from earth2mip.datasets.era5 import METADATA
import json
__all__ = ["open_era5_xarray", "get", "get_data_source"]
def get_data_source(
n_history,
grid,
channel_set,
netcdf="",
initial_condition_source=schema.InitialConditionSource.era5,
):
if initial_condition_source == schema.InitialConditionSource.era5:
root = config.get_data_root(channel_set)
return HDF5DataSource.from_path(root)
else:
return LegacyDataSource(
n_history,
grid,
channel_set,
netcdf=netcdf,
initial_condition_source=initial_condition_source,
)
class LegacyDataSource:
def __init__(
self,
n_history,
grid,
channel_set,
netcdf="",
initial_condition_source=schema.InitialConditionSource.era5,
):
self.n_history = n_history
self.grid = grid
self.channel_set = channel_set
self.initial_condition_source = initial_condition_source
self.netcdf = ""
def __getitem__(self, time):
if self.netcdf:
return xarray.open_dataset(self.netcdf)["fields"]
else:
return ic(
n_history=self.n_history,
grid=self.grid,
time=time,
channel_set=self.channel_set,
source=self.initial_condition_source,
)
def get(
n_history: int,
time: datetime.datetime,
channel_set: schema.ChannelSet,
source: schema.InitialConditionSource = schema.InitialConditionSource.era5,
) -> xarray.DataArray:
if source == schema.InitialConditionSource.hrmip:
ds = hrmip.get(time, channel_set)
return ds
elif source == schema.InitialConditionSource.ifs:
if n_history > 0:
raise NotImplementedError("IFS initializations only work with n_history=0.")
ds = ifs.get(time, channel_set)
ds = ds.expand_dims("time", axis=0)
# move to earth2mip.channels
# TODO refactor interpolation to another place
metadata = json.loads(METADATA.read_text())
lat = np.array(metadata["coords"]["lat"])
lon = np.array(metadata["coords"]["lon"])
ds = ds.roll(lon=len(ds.lon) // 2, roll_coords=True)
ds["lon"] = ds.lon.where(ds.lon >= 0, ds.lon + 360)
assert min(ds.lon) >= 0, min(ds.lon)
return ds.interp(lat=lat, lon=lon, kwargs={"fill_value": "extrapolate"})
elif source == schema.InitialConditionSource.cds:
if n_history > 0:
raise NotImplementedError("CDS initializations only work with n_history=0.")
ds = cds.get(time, channel_set)
return ds
elif source == schema.InitialConditionSource.gfs:
if n_history > 0:
raise NotImplementedError("GFS initializations only work with n_history=0.")
return gfs.get(time, channel_set)
else:
raise NotImplementedError(source)
if config.LOCAL_CACHE:
memory = joblib.Memory(config.LOCAL_CACHE)
get = memory.cache(get)
def ic(
time: datetime,
grid,
n_history: int,
channel_set: schema.ChannelSet,
source: schema.InitialConditionSource,
):
ds = get(n_history, time, channel_set, source)
# TODO collect grid logic in one place
if grid == schema.Grid.grid_720x1440:
return ds.isel(lat=slice(0, -1))
elif grid == schema.Grid.grid_721x1440:
return ds
else:
raise NotImplementedError(f"Grid {grid} not supported")
| earth2mip-main | earth2mip/initial_conditions/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import eccodes
from typing import List, Union
import datetime
import dataclasses
from earth2mip import schema
import xarray
import numpy as np
import tempfile
import os
from concurrent.futures import ThreadPoolExecutor
from cdsapi import Client
import logging
logging.getLogger("cdsapi").setLevel(logging.WARNING)
import urllib3
urllib3.disable_warnings(
urllib3.exceptions.InsecureRequestWarning
) # Hack to disable SSL warnings
# codes database: https://codes.ecmwf.int/grib/param-db/?filter=grib2
CHANNEL_TO_CODE = {
"z": 129,
"u": 131,
"v": 132,
"t": 130,
"q": 133,
"r": 157,
"t2m": 167,
"u10m": 165,
"v10m": 166,
"u100m": 228246,
"v100m": 228247,
"tcwv": 137,
"sp": 134,
"msl": 151,
# total precip
"tp": 228,
}
@dataclasses.dataclass
class PressureLevelCode:
id: int
level: int = 0
@dataclasses.dataclass
class SingleLevelCode:
id: int
def parse_channel(channel: str) -> Union[PressureLevelCode, SingleLevelCode]:
if channel in CHANNEL_TO_CODE:
return SingleLevelCode(CHANNEL_TO_CODE[channel])
else:
code = CHANNEL_TO_CODE[channel[0]]
level = int(channel[1:])
return PressureLevelCode(code, level=int(level))
@dataclasses.dataclass
class DataSource:
channel_names: List[str]
client: Client = dataclasses.field(
default_factory=lambda: Client(progress=False, quiet=False)
)
@property
def time_means(self):
raise NotImplementedError()
def __getitem__(self, time: datetime.datetime):
return _get_channels(self.client, time, self.channel_names)
def get(time: datetime.datetime, channel_set: schema.ChannelSet):
warnings.warn(
DeprecationWarning("Will be removed. Please use CDSDataSource instead.")
)
channels = channel_set.list_channels()
ds = DataSource(channels)
return ds[time]
def _get_cds_requests(codes, time, format):
grid = (0.25, 0.25)
area = (90, -180, -90, 180)
# create a list of arguments for each call to retrieve_channel_data
levels = set()
pressure_level_names = set()
single_level_names = set()
for v in codes:
if isinstance(v, PressureLevelCode): # it's a pressure level variable
levels.add(v.level)
pressure_level_names.add(v.id)
elif isinstance(v, SingleLevelCode): # it's a single level variable
single_level_names.add(v.id)
if pressure_level_names and levels:
yield (
"reanalysis-era5-pressure-levels",
{
"product_type": "reanalysis",
"variable": list(pressure_level_names),
"pressure_level": sorted(levels),
"year": time.strftime("%Y"),
"month": time.strftime("%m"),
"day": time.strftime("%d"),
"time": time.strftime("%H:%M"),
"area": area,
"grid": grid,
"format": format,
},
)
if single_level_names:
yield (
"reanalysis-era5-single-levels",
{
"product_type": "reanalysis",
"variable": sorted(single_level_names),
"year": time.strftime("%Y"),
"month": time.strftime("%m"),
"day": time.strftime("%d"),
"time": time.strftime("%H:%M"),
"area": area,
"grid": grid,
"format": format,
},
)
def _parse_files(
codes: List[Union[SingleLevelCode, PressureLevelCode]], files: List[str]
) -> xarray.DataArray:
"""Retrieve ``codes`` from a list of ``files``
Returns:
a data array of all the codes
"""
arrays = [None] * len(codes)
for path in files:
with open(path) as f:
while True:
gid = eccodes.codes_grib_new_from_file(f)
if gid is None:
break
id = eccodes.codes_get(gid, "paramId")
level = eccodes.codes_get(gid, "level")
type_of_level = eccodes.codes_get(gid, "typeOfLevel")
if type_of_level == "surface":
code = SingleLevelCode(id)
else:
code = PressureLevelCode(id, level=level)
nlat = eccodes.codes_get(gid, "Nj")
nlon = eccodes.codes_get(gid, "Ni")
lat = eccodes.codes_get_array(gid, "latitudes").reshape(nlat, nlon)
lon = eccodes.codes_get_array(gid, "longitudes").reshape(nlat, nlon)
vals = eccodes.codes_get_values(gid).reshape(nlat, nlon)
eccodes.codes_release(gid)
try:
i = codes.index(code)
except ValueError:
continue
arrays[i] = vals
array = np.stack(arrays)
coords = {}
coords["lat"] = lat[:, 0]
coords["lon"] = lon[0, :]
return xarray.DataArray(array, dims=["channel", "lat", "lon"], coords=coords)
def _download_codes(client, codes, time):
with tempfile.TemporaryDirectory() as d:
files = []
format = "grib"
def download(arg):
f = tempfile.mktemp(dir=d, suffix="." + format)
name, req = arg
path = os.path.join(d, f)
client.retrieve(name, req, path)
return path
requests = _get_cds_requests(codes, time, format)
with ThreadPoolExecutor(4) as pool:
files = pool.map(download, requests)
darray = _parse_files(codes, files)
return darray
def _get_channels(client, time: datetime.datetime, channels: List[str]):
codes = [parse_channel(c) for c in channels]
darray = _download_codes(client, codes, time)
return (
darray.assign_coords(channel=channels)
.assign_coords(time=time)
.expand_dims("time")
.transpose("time", "channel", "lat", "lon")
.assign_coords(lon=darray["lon"] + 180.0)
.roll(lon=1440 // 2)
)
| earth2mip-main | earth2mip/initial_conditions/cds.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from modulus.utils import filesystem
from earth2mip import schema
from earth2mip.datasets.era5 import METADATA
import json
import xarray
import numpy as np
def _get_filename(time: datetime.datetime, lead_time: str):
date_format = f"%Y%m%d/%Hz/0p4-beta/oper/%Y%m%d%H%M%S-{lead_time}-oper-fc.grib2"
return time.strftime(date_format)
def _get_channel(c: str, **kwargs) -> xarray.DataArray:
"""
Parameters:
-----------
c: channel id
**kwargs: variables in ecmwf data
"""
# handle 2d inputs
if c in kwargs:
return kwargs[c]
else:
varcode, pressure_level = c[0], int(c[1:])
return kwargs[varcode].interp(isobaricInhPa=pressure_level)
def get(time: datetime.datetime, channel_set: schema.ChannelSet):
root = "https://ecmwf-forecasts.s3.eu-central-1.amazonaws.com/"
path = root + _get_filename(time, "0h")
local_path = filesystem.download_cached(path)
dataset_0h = xarray.open_dataset(local_path, engine="cfgrib")
# get t2m and other things from 12 hour forecast initialized 12 hours before
# The HRES is only initialized every 12 hours
path = root + _get_filename(time - datetime.timedelta(hours=12), "12h")
local_path = filesystem.download_cached(path)
forecast_12h = xarray.open_dataset(local_path, engine="cfgrib")
if channel_set == schema.ChannelSet.var34:
# move to earth2mip.channels
metadata = json.loads(METADATA.read_text())
channels = metadata["coords"]["channel"]
else:
raise NotImplementedError(channel_set)
channel_data = [
_get_channel(
c,
u10m=dataset_0h.u10,
v10m=dataset_0h.v10,
u100m=dataset_0h.u10,
v100m=dataset_0h.v10,
sp=dataset_0h.sp,
t2m=forecast_12h.t2m,
msl=forecast_12h.msl,
tcwv=forecast_12h.tciwv,
t=dataset_0h.t,
u=dataset_0h.u,
v=dataset_0h.v,
r=dataset_0h.r,
z=dataset_0h.gh * 9.81,
)
for c in channels
]
array = np.stack([d for d in channel_data], axis=0)
darray = xarray.DataArray(
array,
dims=["channel", "lat", "lon"],
coords={
"channel": channels,
"lon": dataset_0h.longitude.values,
"lat": dataset_0h.latitude.values,
"time": time,
},
)
return darray
| earth2mip-main | earth2mip/initial_conditions/ifs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import torch
import numpy as np
import xarray
import json
from earth2mip import registry, schema, networks, config, initial_conditions, geometry
from earth2mip.time_loop import TimeLoop
from earth2mip.schema import Grid
from modulus.models.fcn_mip_plugin import _fix_state_dict_keys
from modulus.models.dlwp import DLWP
from modulus.utils.filesystem import Package
from modulus.utils.sfno.zenith_angle import cos_zenith_angle
# TODO: Added here explicitly for better access. This will be imported from:
# modulus repo after this PR is merged: https://github.com/NVIDIA/modulus/pull/138
class _DLWPWrapper(torch.nn.Module):
def __init__(
self,
model,
lsm,
longrid,
latgrid,
topographic_height,
ll_to_cs_mapfile_path,
cs_to_ll_mapfile_path,
):
super(_DLWPWrapper, self).__init__()
self.model = model
self.lsm = lsm
self.longrid = longrid
self.latgrid = latgrid
self.topographic_height = topographic_height
# load map weights
self.input_map_wts = xarray.open_dataset(ll_to_cs_mapfile_path)
self.output_map_wts = xarray.open_dataset(cs_to_ll_mapfile_path)
@property
def channel_names():
return ["t850", "z1000", "z700", "z500", "z300", "tcwv", "t2m"]
def prepare_input(self, input, time):
device = input.device
dtype = input.dtype
i = self.input_map_wts.row.values - 1
j = self.input_map_wts.col.values - 1
data = self.input_map_wts.S.values
M = torch.sparse_coo_tensor(np.array((i, j)), data).type(dtype).to(device)
bs, t, chan = input.shape[0], input.shape[1], input.shape[2]
input = input.reshape(bs * t * chan, -1) @ M.T
input = input.reshape(bs, t, chan, 6, 64, 64)
input_list = list(torch.split(input, 1, dim=1))
input_list = [tensor.squeeze(1) for tensor in input_list]
repeat_vals = (input.shape[0], -1, -1, -1, -1) # repeat along batch dimension
for i in range(len(input_list)):
tisr = np.maximum(
cos_zenith_angle(
time
- datetime.timedelta(hours=6 * (input.shape[0] - 1))
+ datetime.timedelta(hours=6 * i),
self.longrid,
self.latgrid,
),
0,
) - (
1 / np.pi
) # subtract mean value
tisr = (
torch.tensor(tisr, dtype=dtype)
.to(device)
.unsqueeze(dim=0)
.unsqueeze(dim=0)
) # add channel and batch size dimension
tisr = tisr.expand(*repeat_vals) # TODO - find better way to batch TISR
input_list[i] = torch.cat(
(input_list[i], tisr), dim=1
) # concat along channel dim
input_model = torch.cat(
input_list, dim=1
) # concat the time dimension into channels
lsm_tensor = torch.tensor(self.lsm, dtype=dtype).to(device).unsqueeze(dim=0)
lsm_tensor = lsm_tensor.expand(*repeat_vals)
topographic_height_tensor = (
torch.tensor((self.topographic_height - 3.724e03) / 8.349e03, dtype=dtype)
.to(device)
.unsqueeze(dim=0)
)
topographic_height_tensor = topographic_height_tensor.expand(*repeat_vals)
input_model = torch.cat(
(input_model, lsm_tensor, topographic_height_tensor), dim=1
)
return input_model
def prepare_output(self, output):
device = output.device
dtype = output.dtype
output = torch.split(output, output.shape[1] // 2, dim=1)
output = torch.stack(output, dim=1) # add time dimension back in
i = self.output_map_wts.row.values - 1
j = self.output_map_wts.col.values - 1
data = self.output_map_wts.S.values
M = torch.sparse_coo_tensor(np.array((i, j)), data).type(dtype).to(device)
output = output.reshape(output.shape[0], 2, output.shape[2], -1) @ M.T
output = output.reshape(output.shape[0], 2, output.shape[2], 721, 1440)
return output
def forward(self, x, time):
x = self.prepare_input(x, time)
y = self.model(x)
return self.prepare_output(y)
def load(package, *, pretrained=True, device="cuda"):
assert pretrained
# load static datasets
lsm = xarray.open_dataset(package.get("land_sea_mask_rs_cs.nc"))["lsm"].values
topographic_height = xarray.open_dataset(package.get("geopotential_rs_cs.nc"))[
"z"
].values
latlon_grids = xarray.open_dataset(package.get("latlon_grid_field_rs_cs.nc"))
latgrid, longrid = latlon_grids["latgrid"].values, latlon_grids["longrid"].values
# load maps
ll_to_cs_mapfile_path = package.get("map_LL721x1440_CS64.nc")
cs_to_ll_mapfile_path = package.get("map_CS64_LL721x1440.nc")
with torch.cuda.device(device):
# p = package.get("model.onnx")
with open(package.get("config.json")) as json_file:
config = json.load(json_file)
core_model = DLWP(
nr_input_channels=config["nr_input_channels"],
nr_output_channels=config["nr_output_channels"],
)
if pretrained:
weights_path = package.get("weights.pt")
weights = torch.load(weights_path)
fixed_weights = _fix_state_dict_keys(weights, add_module=False)
core_model.load_state_dict(fixed_weights)
model = _DLWPWrapper(
core_model,
lsm,
longrid,
latgrid,
topographic_height,
ll_to_cs_mapfile_path,
cs_to_ll_mapfile_path,
)
channel_names = ["t850", "z1000", "z700", "z500", "z300", "tcwv", "t2m"]
center = np.load(package.get("global_means.npy"))
scale = np.load(package.get("global_stds.npy"))
grid = schema.Grid.grid_721x1440
dt = datetime.timedelta(hours=12)
inference = networks.Inference(
model,
channels=None,
center=center,
scale=scale,
grid=grid,
channel_names=channel_names,
time_step=dt,
n_history=1,
)
inference.to(device)
return inference
| earth2mip-main | earth2mip/networks/dlwp.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import urllib
import warnings
import itertools
from typing import Optional, Tuple, Any, Iterator
import sys
import datetime
import torch
import einops
import numpy as np
import contextlib
from earth2mip import registry, ModelRegistry, model_registry
from earth2mip import filesystem, loaders, time_loop, schema
import modulus
from modulus.utils.sfno.zenith_angle import cos_zenith_angle
from modulus.distributed.manager import DistributedManager
from earth2mip.loaders import LoaderProtocol
if sys.version_info < (3, 10):
from importlib_metadata import EntryPoint
else:
from importlib.metadata import EntryPoint
__all__ = ["get_model"]
def depends_on_time(f):
"""
A function to detect if the function `f` takes an argument `time`.
Args:
f: a function.
Returns:
bool: True if the function takes a second argument `time`, False otherwise.
"""
# check if model is a torchscript model
if isinstance(f, torch.jit.ScriptModule):
return False
else:
import inspect
signature = inspect.signature(f)
parameters = signature.parameters
return "time" in parameters
class Wrapper(torch.nn.Module):
"""Makes sure the parameter names are the same as the checkpoint"""
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, *args, **kwargs):
"""x: (batch, history, channel, x, y)"""
return self.module(*args, **kwargs)
class CosZenWrapper(torch.nn.Module):
def __init__(self, model, lon, lat):
super().__init__()
self.model = model
self.lon = lon
self.lat = lat
def forward(self, x, time):
lon_grid, lat_grid = np.meshgrid(self.lon, self.lat)
cosz = cos_zenith_angle(time, lon_grid, lat_grid)
cosz = cosz.astype(np.float32)
z = torch.from_numpy(cosz).to(device=x.device)
# assume no history
x = torch.cat([x, z[None, None]], dim=1)
return self.model(x)
class _SimpleModelAdapter(torch.nn.Module):
"""Takes model of (b, c, y, x) to (b, h, y, x) where h == 1"""
def __init__(self, model, time_dependent, has_history):
super().__init__()
self.model = model
self.time_dependent = time_dependent
self.has_history = has_history
def forward(self, x, time):
if not self.has_history:
x = x[:, 0]
if self.time_dependent:
y = self.model.forward(x, time)
else:
y = self.model.forward(x)
if not self.has_history:
y = y[:, None]
return y
class Inference(torch.nn.Module, time_loop.TimeLoop):
def __init__(
self,
model,
center: np.array,
scale: np.array,
grid: schema.Grid,
channels=None,
channel_set: Optional[schema.ChannelSet] = None,
n_history: int = 0,
time_step=datetime.timedelta(hours=6),
channel_names=None,
):
"""
Args:
model: a model, with signature model(x, time) or model(x). With n_history == 0, x is a
torch tensor with shape (batch, nchannel, lat, lon). With
n_history > 0 x has the shape (batch, nchannel, lat, lon).
`time` is a datetime object, which is passed if model.forward has time as an argument.
center: a 1d numpy array with shape (n_channels in data) containing
the means. The shape is NOT `len(channels)`.
scale: a 1d numpy array with shape (n_channels in data) containing
the stds. The shape is NOT `len(channels)`.
grid: metadata about the grid, which should be used to pass the
correct data to this object.
channels: a list of integers taken from [0, n_channels in data -
1]. This is used to subset the input data.
channel_set: optional metadata about the channel-set, that can be
used to figure out what the channel names are.
channel_names: if provided overrides the ``channel_set`` and
``channels``. If this is provided then mean/scale are assumed to
match this.
n_history: whether `model` was trained with history.
time_step: the time-step `model` was trained with.
""" # noqa
super().__init__()
self.channel_set = channel_set
self.time_dependent = depends_on_time(model.forward)
# TODO probably delete this line
# if not isinstance(model, modulus.Module):
# model = Wrapper(model)
# TODO extract this to another place
model = _SimpleModelAdapter(
model, time_dependent=self.time_dependent, has_history=n_history > 0
)
self.model = model
self.channel_set = channel_set
self.channel_names = channel_names
self.grid = grid
self.time_step = time_step
self.n_history = n_history
center = torch.from_numpy(np.squeeze(center)).float()
scale = torch.from_numpy(np.squeeze(scale)).float()
self.register_buffer("scale_org", scale)
self.register_buffer("center_org", center)
# infer channel names
if channel_names is not None:
self.in_channel_names = self.out_channel_names = channel_names
self.channels = list(range(len(channel_names)))
self.register_buffer("scale", scale[:, None, None])
self.register_buffer("center", center[:, None, None])
elif channel_set is not None:
data_channel_names = channel_set.list_channels()
self.channels = channels
self.in_channel_names = self.out_channel_names = [
data_channel_names[i] for i in channels
]
self.register_buffer("scale", scale[self.channels, None, None])
self.register_buffer("center", center[self.channels, None, None])
else:
raise ValueError(
"Cannot infer channel names. Please provide channel_names or both channels and channel_set." # noqa
)
@property
def n_history_levels(self) -> int:
"""The expected size of the second dimension"""
return self.n_history + 1
def device(self) -> torch.device:
return self.scale.device
def normalize(self, x):
warnings.warn(
DeprecationWarning(
".normalize does not follow the TimeLoop API. It will be removed soon."
)
)
return (x - self.center_org[None, :, None, None]) / self.scale_org[
None, :, None, None
]
def run_steps(self, x, n, normalize=True, time=None):
warnings.warn(
DeprecationWarning(
".run_steps does not follow the TimeLoop API. It will be removed soon."
)
)
for _, data, _ in self.run_steps_with_restart(x, n, normalize, time):
yield data
def __call__(
self,
time: datetime.datetime,
x: torch.Tensor,
restart: Optional[Any] = None,
normalize=True,
) -> Iterator[Tuple[datetime.datetime, torch.Tensor, Any]]:
"""
Args:
x: an initial condition. has shape (B, n_history_levels,
len(in_channel_names), Y, X). (Y, X) should be consistent with
``grid``.
time: the datetime to start with
restart: if provided this restart information (typically some torch
Tensor) can be used to restart the time loop
Yields:
(time, output, restart) tuples. ``output`` is a tensor with
shape (B, len(out_channel_names), Y, X) which will be used for
diagnostics. Restart data should encode the state of the time
loop.
"""
if restart:
yield from self._iterate(**restart)
else:
yield from self._iterate(x=x, time=time, n=None, normalize=normalize)
def run_steps_with_restart(self, x, n, normalize=True, time=None):
warnings.warn(
DeprecationWarning(
".run_steps_with_restart does not follow the TimeLoop API. It will be removed soon." # noqa
)
)
x = x[:, :, self.channels]
yield from self._iterate(x, n, normalize=normalize, time=time)
def _iterate(self, x, n, normalize=True, time=None):
"""Yield (time, unnormalized data, restart) tuples
restart = (time, unnormalized data)
"""
if self.time_dependent and not time:
raise ValueError("Time dependent models require ``time``.")
time = time or datetime.datetime(1900, 1, 1)
with torch.no_grad():
# drop all but the last time point
# remove channels
_, n_time_levels, n_channels, _, _ = x.shape
assert n_time_levels == self.n_history + 1
if normalize:
x = (x - self.center) / self.scale
# yield initial time for convenience
restart = dict(x=x, normalize=False, time=time)
yield time, self.scale * x[:, -1] + self.center, restart
for i in range(n) if n else itertools.count():
x = self.model(x, time)
time = time + self.time_step
# create args and kwargs for future use
restart = dict(x=x, normalize=False, time=time)
out = self.scale * x[:, -1] + self.center
yield time, out, restart
def _default_inference(package, metadata, device):
if metadata.architecture == "pickle":
loader = loaders.pickle
elif metadata.architecture_entrypoint:
ep = EntryPoint(name=None, group=None, value=metadata.architecture_entrypoint)
loader: LoaderProtocol = ep.load()
else:
raise NotImplementedError()
model = loader(package, pretrained=True)
center_path = package.get("global_means.npy")
scale_path = package.get("global_stds.npy")
inference = Inference(
model=model,
channels=metadata.in_channels,
center=np.load(center_path),
scale=np.load(scale_path),
grid=metadata.grid,
channel_set=metadata.channel_set,
n_history=metadata.n_history,
time_step=metadata.time_step,
)
inference.to(device)
return inference
def _load_package(package, metadata, device) -> time_loop.TimeLoop:
if metadata is None:
local_path = package.get("metadata.json")
with open(local_path) as f:
metadata = schema.Model.parse_raw(f.read())
if metadata.entrypoint:
ep = EntryPoint(name=None, group=None, value=metadata.entrypoint.name)
inference_loader = ep.load()
return inference_loader(package, device=device, **metadata.entrypoint.kwargs)
else:
warnings.warn("No loading entry point found, using default inferencer")
return _default_inference(package, metadata, device=device)
def get_model(
model: str,
registry: ModelRegistry = registry,
device="cpu",
metadata: Optional[schema.Model] = None,
) -> time_loop.TimeLoop:
"""
Function to construct an inference model and load the appropriate
checkpoints from the model registry
Parameters
----------
model : The model name to open in the ``registry``. If a url is passed (e.g.
s3://bucket/model), then this location will be opened directly.
Supported urls protocols include s3:// for PBSS access, and file:// for
local files.
registry: A model registry object. Defaults to the global model registry
metadata: If provided, this model metadata will be used to load the model.
By default this will be loaded from the file ``metadata.json`` in the
model package.
device: the device to load on, by default the 'cpu'
Returns
-------
Inference model
"""
url = urllib.parse.urlparse(model)
if url.scheme == "":
package = registry.get_model(model)
else:
package = model_registry.Package(root=model, seperator="/")
return _load_package(package, metadata, device)
class Identity(torch.nn.Module):
def forward(self, x):
return x
def persistence(package, pretrained=True):
model = Identity()
center = np.zeros((3))
scale = np.zeros((3))
grid = schema.Grid.grid_721x1440
return Inference(
model,
channel_names=["a", "b", "c"],
center=center,
scale=scale,
grid=grid,
n_history=0,
)
| earth2mip-main | earth2mip/networks/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FCN v2 Small adapter
This model is an outdated version of FCN v2 (SFNO), a more recent one is present in Modulus.
"""
from typing import List
import logging
import os
import datetime
import torch
import json
import pathlib
import numpy as np
import onnxruntime as ort
import dataclasses
from earth2mip import registry, schema, networks, config, initial_conditions, geometry
from modulus.models.fcn_mip_plugin import _fix_state_dict_keys
# TODO: Update to new arch in Modulus!
import earth2mip.networks.fcnv2 as fcnv2
def load(package, *, pretrained=True, device="cuda"):
assert pretrained
config_path = pathlib.Path(__file__).parent / "fcnv2" / "sfnonet.yaml"
params = fcnv2.YParams(config_path.as_posix(), "sfno_73ch")
params.img_crop_shape_x = 721
params.img_crop_shape_y = 1440
params.N_in_channels = 73
params.N_out_channels = 73
core_model = fcnv2.FourierNeuralOperatorNet(params).to(device)
local_center = np.load(package.get("global_means.npy"))
local_std = np.load(package.get("global_stds.npy"))
weights_path = package.get("weights.tar")
weights = torch.load(weights_path, map_location=device)
fixed_weights = _fix_state_dict_keys(weights["model_state"], add_module=False)
core_model.load_state_dict(fixed_weights)
grid = schema.Grid.grid_721x1440
channel_set = schema.ChannelSet.var73
dt = datetime.timedelta(hours=6)
inference = networks.Inference(
core_model,
channels=None,
center=local_center,
scale=local_std,
grid=grid,
channel_names=channel_set.list_channels(),
channel_set=channel_set,
time_step=dt,
)
inference.to(device)
return inference
| earth2mip-main | earth2mip/networks/fcnv2_sm.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pangu Weather adapter
adapted from https://raw.githubusercontent.com/ecmwf-lab/ai-models-panguweather/main/ai_models_panguweather/model.py
# (C) Copyright 2023 European Centre for Medium-Range Weather Forecasts.
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
"""
# %%
from typing import List
import logging
import os
import datetime
import torch
import numpy as np
import onnxruntime as ort
import dataclasses
from earth2mip import registry, schema, networks, config, initial_conditions, geometry
class PanguWeather:
# Download
download_url = (
"https://get.ecmwf.int/repository/test-data/ai-models/pangu-weather/{file}"
)
download_files = ["pangu_weather_24.onnx", "pangu_weather_6.onnx"]
# Input
area = [90, 0, -90, 360]
grid = [0.25, 0.25]
param_sfc = ["msl", "u10m", "v10m", "t2m"]
param_level_pl = (
["z", "q", "t", "u", "v"],
[1000, 925, 850, 700, 600, 500, 400, 300, 250, 200, 150, 100, 50],
)
# Output
expver = "pguw"
# providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
def __init__(self, path):
self.path = path
options = ort.SessionOptions()
options.enable_cpu_mem_arena = False
options.enable_mem_pattern = False
options.enable_mem_reuse = False
options.intra_op_num_threads = 1
pangu_weather = self.path
# That will trigger a FileNotFoundError
self.device_index = torch.cuda.current_device()
os.stat(self.path)
providers = [
(
"CUDAExecutionProvider",
{
"device_id": self.device_index,
},
)
]
self.ort_session = ort.InferenceSession(
self.path,
sess_options=options,
providers=providers,
)
def __call__(self, fields_pl, fields_sfc):
assert fields_pl.dtype == torch.float32
assert fields_sfc.dtype == torch.float32
# from https://onnxruntime.ai/docs/api/python/api_summary.html
binding = self.ort_session.io_binding()
def bind_input(name, x):
x = x.contiguous()
binding.bind_input(
name=name,
device_type="cuda",
device_id=self.device_index,
element_type=np.float32,
shape=tuple(x.shape),
buffer_ptr=x.data_ptr(),
)
def bind_output(name, like):
x = torch.empty_like(like).contiguous()
binding.bind_output(
name=name,
device_type="cuda",
device_id=self.device_index,
element_type=np.float32,
shape=tuple(x.shape),
buffer_ptr=x.data_ptr(),
)
return x
bind_input("input", fields_pl)
bind_input("input_surface", fields_sfc)
output = bind_output("output", like=fields_pl)
output_sfc = bind_output("output_surface", like=fields_sfc)
self.ort_session.run_with_iobinding(binding)
return output, output_sfc
class PanguStacked:
def __init__(self, model: PanguWeather):
self.model = model
def channel_names(self):
variables, levels = self.model.param_level_pl
names = []
for v in variables:
for lev in levels:
names.append(v + str(lev))
for v in self.model.param_sfc:
names.append(v)
return names
def __call__(self, x):
return self.forward(x)
def to(self):
pass
def forward(self, x):
assert x.shape[0] == 1
assert x.shape[1] == len(self.channel_names())
pl_shape = (5, 13, 721, 1440)
nchan = pl_shape[0] * pl_shape[1]
pl = x[:, :nchan]
surface = x[:, nchan:]
pl = pl.resize(*pl_shape)
sl = surface[0]
plo, slo = self.model(pl, sl)
return torch.cat(
[
plo.resize(1, nchan, 721, 1440),
slo.resize(1, x.size(1) - nchan, 721, 1440),
],
dim=1,
)
class PanguInference(torch.nn.Module):
n_history_levels = 1
time_step = datetime.timedelta(hours=6)
def __init__(self, model_6: PanguStacked, model_24: PanguStacked):
super().__init__()
self.model_6 = model_6
self.model_24 = model_24
self.channels = None
def to(self, device):
return self
def cuda(self, device=None):
return self
@property
def in_channel_names(self):
return self.channel_names
@property
def out_channel_names(self):
return self.channel_names
@property
def grid(self):
return schema.Grid.grid_721x1440
@property
def channel_set(self):
return schema.ChannelSet.var_pangu
@property
def channel_names(self):
return schema.ChannelSet.var_pangu.list_channels()
@property
def n_history(self):
return 0
def normalize(self, x):
# No normalization for pangu
return x
def run_steps_with_restart(self, x, n, normalize=True, time=None):
"""Yield (time, unnormalized data, restart) tuples
restart = (time, unnormalized data)
"""
assert normalize, "normalize=False not supported"
# do not implement restart capability
restart_data = None
for k, data in enumerate(self(time, x)):
yield data
if k == n:
break
yield from self.__call__(time, x)
def __call__(self, time, x, restart=None):
"""Yield (time, unnormalized data, restart) tuples
restart = (time, unnormalized data)
"""
if restart:
raise NotImplementedError("Restart capability not implemented.")
# do not implement restart capability
restart_data = None
with torch.no_grad():
x0 = x[:, -1].clone()
yield time, x0, restart_data
time0 = time
while True:
x1 = x0
time1 = time0
for i in range(3):
time1 += datetime.timedelta(hours=6)
x1 = self.model_6(x1)
yield time1, x1, restart_data
time0 += datetime.timedelta(hours=24)
x0 = self.model_24(x0)
yield time0, x0, restart_data
def load(package, *, pretrained=True, device="doesn't matter"):
"""Load the sub-stepped pangu weather inference"""
assert pretrained
p6 = package.get("pangu_weather_6.onnx")
p24 = package.get("pangu_weather_24.onnx")
model_6 = PanguStacked(PanguWeather(p6))
model_24 = PanguStacked(PanguWeather(p24))
return PanguInference(model_6, model_24)
def load_single_model(
package, *, time_step_hours: int = 24, pretrained=True, device="cuda:0"
):
"""Load a single time-step pangu weather"""
assert pretrained
with torch.cuda.device(device):
if time_step_hours == 6:
p = package.get("pangu_weather_6.onnx")
elif time_step_hours == 24:
p = package.get("pangu_weather_24.onnx")
else:
raise ValueError(f"time_step_hours must be 6 or 24, got {time_step_hours}")
model = PanguStacked(PanguWeather(p))
channel_names = model.channel_names()
center = np.zeros([len(channel_names)])
scale = np.ones([len(channel_names)])
grid = schema.Grid.grid_721x1440
dt = datetime.timedelta(hours=time_step_hours)
inference = networks.Inference(
model,
channels=None,
center=center,
scale=scale,
grid=grid,
channel_names=channel_names,
channel_set=schema.ChannelSet.var_pangu,
time_step=dt,
)
inference.to(device)
return inference
| earth2mip-main | earth2mip/networks/pangu.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
class ComplexReLU(nn.Module):
def __init__(self, negative_slope=0.0, mode="cartesian", bias_shape=None):
super(ComplexReLU, self).__init__()
# store parameters
self.mode = mode
if self.mode in ["modulus", "halfplane"]:
if bias_shape is not None:
self.bias = nn.Parameter(torch.zeros(bias_shape, dtype=torch.float32))
else:
self.bias = nn.Parameter(torch.zeros((1), dtype=torch.float32))
else:
bias = torch.zeros((1), dtype=torch.float32)
self.register_buffer("bias", bias)
self.negative_slope = negative_slope
self.act = nn.LeakyReLU(negative_slope=negative_slope)
def forward(self, z: torch.Tensor) -> torch.Tensor:
if self.mode == "cartesian":
zr = torch.view_as_real(z)
za = self.act(zr)
out = torch.view_as_complex(za)
elif self.mode == "modulus":
zabs = torch.sqrt(torch.square(z.real) + torch.square(z.imag))
out = self.act(zabs + self.bias) * torch.exp(1.0j * z.angle())
elif self.mode == "halfplane":
# bias is an angle parameter in this case
modified_angle = torch.angle(z) - self.bias
condition = torch.logical_and(
(0.0 <= modified_angle), (modified_angle < torch.pi / 2.0)
)
out = torch.where(condition, z, self.negative_slope * z)
elif self.mode == "real":
zr = torch.view_as_real(z)
outr = zr.clone()
outr[..., 0] = self.act(zr[..., 0])
out = torch.view_as_complex(outr)
else:
# identity
out = z
return out
class ComplexActivation(nn.Module):
def __init__(self, activation, mode="cartesian", bias_shape=None):
super(ComplexActivation, self).__init__()
# store parameters
self.mode = mode
if self.mode == "modulus":
if bias_shape is not None:
self.bias = nn.Parameter(torch.zeros(bias_shape, dtype=torch.float32))
else:
self.bias = nn.Parameter(torch.zeros((1), dtype=torch.float32))
else:
bias = torch.zeros((1), dtype=torch.float32)
self.register_buffer("bias", bias)
# real valued activation
self.act = activation
def forward(self, z: torch.Tensor) -> torch.Tensor:
if self.mode == "cartesian":
zr = torch.view_as_real(z)
za = self.act(zr)
out = torch.view_as_complex(za)
elif self.mode == "modulus":
zabs = torch.sqrt(torch.square(z.real) + torch.square(z.imag))
out = self.act(zabs + self.bias) * torch.exp(1.0j * z.angle())
else:
# identity
out = z
return out
| earth2mip-main | earth2mip/networks/fcnv2/activations.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .yparams import YParams
from .sfnonet import FourierNeuralOperatorNet
| earth2mip-main | earth2mip/networks/fcnv2/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from functools import partial
import torch
import torch.nn as nn
from torch_harmonics import *
class Preprocessor2D(nn.Module):
def __init__(self, params, img_size=(720, 1440)):
super(Preprocessor2D, self).__init__()
self.n_history = params.n_history
self.transform_to_nhwc = params.enable_nhwc
# self.poltor_decomp = params.poltor_decomp
# self.img_size = (params.img_shape_x, params.img_shape_y) if hasattr(params, "img_shape_x") and hasattr(params, "img_shape_y") else img_size
# self.input_grid = "equiangular"
# self.output_grid = "equiangular"
# process static features
static_features = None
# needed for sharding
start_x = params.img_local_offset_x
end_x = start_x + params.img_local_shape_x
start_y = params.img_local_offset_y
end_y = start_y + params.img_local_shape_y
if params.add_grid:
tx = torch.linspace(0, 1, params.img_shape_x + 1, dtype=torch.float32)[0:-1]
ty = torch.linspace(0, 1, params.img_shape_y + 1, dtype=torch.float32)[0:-1]
x_grid, y_grid = torch.meshgrid(tx, ty, indexing="ij")
x_grid, y_grid = x_grid.unsqueeze(0).unsqueeze(0), y_grid.unsqueeze(
0
).unsqueeze(0)
grid = torch.cat([x_grid, y_grid], dim=1)
# now shard:
grid = grid[:, :, start_x:end_x, start_y:end_y]
static_features = grid
# self.register_buffer("grid", grid)
if params.add_orography:
from utils.conditioning_inputs import get_orography
oro = torch.tensor(
get_orography(params.orography_path), dtype=torch.float32
)
oro = torch.reshape(oro, (1, 1, oro.shape[0], oro.shape[1]))
# shard
oro = oro[:, :, start_x:end_x, start_y:end_y]
if static_features is None:
static_features = oro
else:
static_features = torch.cat([static_features, oro], dim=1)
if params.add_landmask:
from utils.conditioning_inputs import get_land_mask
lsm = torch.tensor(get_land_mask(params.landmask_path), dtype=torch.long)
# one hot encode and move channels to front:
lsm = torch.permute(torch.nn.functional.one_hot(lsm), (2, 0, 1)).to(
torch.float32
)
lsm = torch.reshape(lsm, (1, lsm.shape[0], lsm.shape[1], lsm.shape[2]))
# shard
lsm = lsm[:, :, start_x:end_x, start_y:end_y]
if static_features is None:
static_features = lsm
else:
static_features = torch.cat([static_features, lsm], dim=1)
self.add_static_features = False
if static_features is not None:
self.add_static_features = True
self.register_buffer("static_features", static_features)
# if self.poltor_decomp:
# assert(hasattr(params, 'wind_channels'))
# wind_channels = torch.as_tensor(params.wind_channels)
# self.register_buffer("wind_channels", wind_channels)
# self.forward_transform = RealVectorSHT(*self.img_size, grid=self.input_grid).float()
# self.inverse_transform = InverseRealSHT(*self.img_size, grid=self.output_grid).float()
def _flatten_history(self, x, y):
# flatten input
if x.dim() == 5:
b_, t_, c_, h_, w_ = x.shape
x = torch.reshape(x, (b_, t_ * c_, h_, w_))
# flatten target
if (y is not None) and (y.dim() == 5):
b_, t_, c_, h_, w_ = y.shape
y = torch.reshape(y, (b_, t_ * c_, h_, w_))
return x, y
def _add_static_features(self, x, y):
# we need to replicate the grid for each batch:
static = torch.tile(self.static_features, dims=(x.shape[0], 1, 1, 1))
x = torch.cat([x, static], dim=1)
return x, y
def _nchw_to_nhwc(self, x, y):
x = x.to(memory_format=torch.channels_last)
if y is not None:
y = y.to(memory_format=torch.channels_last)
return x, y
def append_history(self, x1, x2):
# without history, just return the second tensor
# with grid if requested
if self.n_history == 0:
return x2
# if grid is added, strip it off first
if self.add_static_features:
nfeat = self.static_features.shape[1]
x1 = x1[:, :-nfeat, :, :]
# this is more complicated
if x1.dim() == 4:
b_, c_, h_, w_ = x1.shape
x1 = torch.reshape(
x1, (b_, (self.n_history + 1), c_ // (self.n_history + 1), h_, w_)
)
if x2.dim() == 4:
b_, c_, h_, w_ = x2.shape
x2 = torch.reshape(x2, (b_, 1, c_, h_, w_))
# append
res = torch.cat([x1[:, 1:, :, :, :], x2], dim=1)
# flatten again
b_, t_, c_, h_, w_ = res.shape
res = torch.reshape(res, (b_, t_ * c_, h_, w_))
return res
# def _poltor_decompose(self, x, y):
# b_, c_, h_, w_ = x.shape
# xu = x[:, self.wind_channels, :, :]
# xu = xu.reshape(b_, -1, 2, h_, w_)
# xu = self.inverse_transform(self.forward_transform(xu))
# xu = xu.reshape(b_, -1, h_, w_)
# x[:, self.wind_channels, :, :] = xu
# return x, y
# forward method for additional variable fiels in x and y,
# for example zenith angle:
# def forward(self, x, y, xz, yz):
# x = torch.cat([x, xz], dim=2)
#
# return x, y
def append_channels(self, x, xc):
if x.dim() == 4:
b_, c_, h_, w_ = x.shape
x = torch.reshape(
x, (b_, (self.n_history + 1), c_ // (self.n_history + 1), h_, w_)
)
xo = torch.cat([x, xc], dim=2)
if x.dim() == 4:
xo, _ = self._flatten_history(xo, None)
return xo
def forward(self, x, y=None, xz=None, yz=None):
if xz is not None:
x = self.append_channels(x, xz)
return self._forward(x, y)
def _forward(self, x, y):
# we always want to flatten the history, even if its a singleton
x, y = self._flatten_history(x, y)
if self.add_static_features:
x, y = self._add_static_features(x, y)
# if self.poltor_decomp:
# x, y = self._poltor_decompose(x, y)
if self.transform_to_nhwc:
x, y = self._nchw_to_nhwc(x, y)
return x, y
def get_preprocessor(params):
return Preprocessor2D(params)
| earth2mip-main | earth2mip/networks/fcnv2/preprocessor.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ruamel.yaml import YAML
import logging
class YParams:
"""Yaml file parser"""
def __init__(self, yaml_filename, config_name, print_params=False):
self._yaml_filename = yaml_filename
self._config_name = config_name
self.params = {}
if print_params:
print("------------------ Configuration ------------------")
with open(yaml_filename) as _file:
for key, val in YAML().load(_file)[config_name].items():
if print_params:
print(key, val)
if val == "None":
val = None
self.params[key] = val
self.__setattr__(key, val)
if print_params:
print("---------------------------------------------------")
def __getitem__(self, key):
return self.params[key]
def __setitem__(self, key, val):
self.params[key] = val
self.__setattr__(key, val)
def __contains__(self, key):
return key in self.params
def update_params(self, config):
for key, val in config.items():
self.params[key] = val
self.__setattr__(key, val)
def log(self):
logging.info("------------------ Configuration ------------------")
logging.info("Configuration file: " + str(self._yaml_filename))
logging.info("Configuration name: " + str(self._config_name))
for key, val in self.params.items():
logging.info(str(key) + " " + str(val))
logging.info("---------------------------------------------------")
| earth2mip-main | earth2mip/networks/fcnv2/yparams.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from collections import OrderedDict
from copy import Error, deepcopy
from re import S
from numpy.lib.arraypad import pad
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
from torch.nn.modules.container import Sequential
from torch.utils.checkpoint import checkpoint, checkpoint_sequential
from torch.cuda import amp
from typing import Optional
import math
from torch_harmonics import *
from earth2mip.networks.fcnv2.contractions import *
from earth2mip.networks.fcnv2.activations import *
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
@torch.jit.script
def drop_path(
x: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor:
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1.0 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2d ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
def __init__(
self, img_size=(224, 224), patch_size=(16, 16), in_chans=3, embed_dim=768
):
super(PatchEmbed, self).__init__()
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
# gather input
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# new: B, C, H*W
x = self.proj(x).flatten(2)
return x
class MLP(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
output_bias=True,
drop_rate=0.0,
checkpointing=False,
):
super(MLP, self).__init__()
self.checkpointing = checkpointing
out_features = out_features or in_features
hidden_features = hidden_features or in_features
fc1 = nn.Conv2d(in_features, hidden_features, 1, bias=True)
act = act_layer()
fc2 = nn.Conv2d(hidden_features, out_features, 1, bias=output_bias)
if drop_rate > 0.0:
drop = nn.Dropout(drop_rate)
self.fwd = nn.Sequential(fc1, act, drop, fc2, drop)
else:
self.fwd = nn.Sequential(fc1, act, fc2)
@torch.jit.ignore
def checkpoint_forward(self, x):
return checkpoint(self.fwd, x)
def forward(self, x):
if self.checkpointing:
return self.checkpoint_forward(x)
else:
return self.fwd(x)
class RealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None):
super(RealFFT2, self).__init__()
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
self.num_batches = 1
assert self.lmax % 2 == 0
def forward(self, x):
# do batched FFT
xs = torch.split(x, x.shape[1] // self.num_batches, dim=1)
ys = []
for xt in xs:
yt = torch.fft.rfft2(xt, dim=(-2, -1), norm="ortho")
ys.append(
torch.cat(
(
yt[..., : math.ceil(self.lmax / 2), : self.mmax],
yt[..., -math.floor(self.lmax / 2) :, : self.mmax],
),
dim=-2,
)
)
# connect
y = torch.cat(ys, dim=1).contiguous()
# y = torch.fft.rfft2(x, dim=(-2, -1), norm="ortho")
# y = torch.cat((y[..., :math.ceil(self.lmax/2), :self.mmax], y[..., -math.floor(self.lmax/2):, :self.mmax]), dim=-2)
return y
class InverseRealFFT2(nn.Module):
"""
Helper routine to wrap FFT similarly to the SHT
"""
def __init__(self, nlat, nlon, lmax=None, mmax=None):
super(InverseRealFFT2, self).__init__()
self.nlat = nlat
self.nlon = nlon
self.lmax = lmax or self.nlat
self.mmax = mmax or self.nlon // 2 + 1
self.num_batches = 1
def forward(self, x):
# do batched FFT
xs = torch.split(x, x.shape[1] // self.num_batches, dim=1)
ys = []
for xt in xs:
ys.append(
torch.fft.irfft2(
xt, dim=(-2, -1), s=(self.nlat, self.nlon), norm="ortho"
)
)
out = torch.cat(ys, dim=1).contiguous()
# out = torch.fft.irfft2(x, dim=(-2, -1), s=(self.nlat, self.nlon), norm="ortho")
return out
class SpectralConv2d(nn.Module):
"""
Spectral Convolution as utilized in
"""
def __init__(
self,
forward_transform,
inverse_transform,
hidden_size,
sparsity_threshold=0.0,
hard_thresholding_fraction=1,
use_complex_kernels=False,
compression=None,
rank=0,
bias=False,
):
super(SpectralConv2d, self).__init__()
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.hard_thresholding_fraction = hard_thresholding_fraction
self.scale = 1 / hidden_size**2
self.contract_handle = (
compl_contract2d_fwd_c if use_complex_kernels else compl_contract2d_fwd
)
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.output_dims = (self.inverse_transform.nlat, self.inverse_transform.nlon)
modes_lat = self.inverse_transform.lmax
modes_lon = self.inverse_transform.mmax
self.modes_lat = int(modes_lat * self.hard_thresholding_fraction)
self.modes_lon = int(modes_lon * self.hard_thresholding_fraction)
# new simple linear layer
self.w = nn.Parameter(
self.scale
* torch.randn(
self.hidden_size, self.hidden_size, self.modes_lat, self.modes_lon, 2
)
)
# optional bias
if bias:
self.b = nn.Parameter(
self.scale * torch.randn(1, self.hidden_size, *self.output_dims)
)
def forward(self, x):
dtype = x.dtype
# x = x.float()
B, C, H, W = x.shape
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = self.forward_transform(x)
x = torch.view_as_real(x)
x = x.to(dtype)
# do spectral conv
modes = torch.zeros(x.shape, device=x.device)
# modes[:, :, :self.modes_lat, :self.modes_lon, :] = self.contract_handle(x[:, :, :self.modes_lat, :self.modes_lon, :], self.wh)
# modes[:, :, -self.modes_lat:, :self.modes_lon, :] = self.contract_handle(x[:, :, -self.modes_lat:, :self.modes_lon, :], self.wl)
modes = self.contract_handle(x, self.w)
# finalize
x = F.softshrink(modes, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = torch.view_as_complex(x)
x = self.inverse_transform(x)
x = x.to(dtype)
if hasattr(self, "b"):
x = x + self.b
return x
class SpectralConvS2(nn.Module):
"""
Spectral Convolution as utilized in
"""
def __init__(
self,
forward_transform,
inverse_transform,
hidden_size,
sparsity_threshold=0.0,
use_complex_kernels=False,
compression=None,
rank=128,
bias=False,
):
super(SpectralConvS2, self).__init__()
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.scale = 0.02
self.forward_transform = forward_transform
self.inverse_transform = inverse_transform
self.modes_lat = self.forward_transform.lmax
self.modes_lon = self.forward_transform.mmax
assert self.inverse_transform.lmax == self.modes_lat
assert self.inverse_transform.mmax == self.modes_lon
# remember the lower triangular indices
ii, jj = torch.tril_indices(self.modes_lat, self.modes_lon)
self.register_buffer("ii", ii)
self.register_buffer("jj", jj)
if compression == "tt":
self.rank = rank
# tensortrain coefficients
g1 = nn.Parameter(self.scale * torch.randn(self.hidden_size, self.rank, 2))
g2 = nn.Parameter(
self.scale * torch.randn(self.rank, self.hidden_size, self.rank, 2)
)
g3 = nn.Parameter(self.scale * torch.randn(self.rank, len(ii), 2))
self.w = nn.ParameterList([g1, g2, g3])
self.contract_handle = (
contract_tt # if use_complex_kernels else raise(NotImplementedError)
)
else:
self.w = nn.Parameter(
self.scale * torch.randn(self.hidden_size, self.hidden_size, len(ii), 2)
)
self.contract_handle = (
compl_contract_fwd_c if use_complex_kernels else compl_contract_fwd
)
if bias:
self.b = nn.Parameter(
self.scale * torch.randn(1, self.hidden_size, *self.output_dims)
)
def forward(self, x):
dtype = x.dtype
# x = x.float()
B, C, H, W = x.shape
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = self.forward_transform(x)
x = torch.view_as_real(x)
x = x.to(dtype)
# do spectral conv
modes = torch.zeros(x.shape, device=x.device)
modes[:, :, self.ii, self.jj, :] = self.contract_handle(
x[:, :, self.ii, self.jj, :], self.w
)
# finalize
x = F.softshrink(modes, lambd=self.sparsity_threshold)
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = torch.view_as_complex(x)
x = self.inverse_transform(x)
x = x.to(dtype)
if hasattr(self, "b"):
x = x + self.b
return x
class SpectralAttention2d(nn.Module):
"""
2d Spectral Attention layer
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=0.0,
hidden_size_factor=2,
use_complex_network=True,
use_complex_kernels=False,
complex_activation="real",
bias=False,
spectral_layers=1,
drop_rate=0.0,
):
super(SpectralAttention2d, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.hidden_size = int(hidden_size_factor * self.embed_dim)
self.scale = 0.02
self.spectral_layers = spectral_layers
self.mul_add_handle = (
compl_muladd2d_fwd_c if use_complex_kernels else compl_muladd2d_fwd
)
self.mul_handle = compl_mul2d_fwd_c if use_complex_kernels else compl_mul2d_fwd
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform.forward
self.inverse_transform = inverse_transform.forward
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
# weights
w = [self.scale * torch.randn(self.embed_dim, self.hidden_size, 2)]
# w = [self.scale * torch.randn(self.embed_dim + 2*self.embed_freqs, self.hidden_size, 2)]
# w = [self.scale * torch.randn(self.embed_dim + 4*self.embed_freqs, self.hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.hidden_size, self.hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(self.hidden_size, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.wout = nn.Parameter(
self.scale * torch.randn(self.hidden_size, self.embed_dim, 2)
)
self.drop = nn.Dropout(drop_rate) if drop_rate > 0.0 else nn.Identity()
self.activation = ComplexReLU(
mode=complex_activation, bias_shape=(self.hidden_size, 1, 1)
)
def forward_mlp(self, xr):
for l in range(self.spectral_layers):
if hasattr(self, "b"):
xr = self.mul_add_handle(
xr, self.w[l].to(xr.dtype), self.b[l].to(xr.dtype)
)
else:
xr = self.mul_handle(xr, self.w[l].to(xr.dtype))
xr = torch.view_as_complex(xr)
xr = self.activation(xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
xr = self.mul_handle(xr, self.wout)
return xr
def forward(self, x):
dtype = x.dtype
# x = x.to(torch.float32)
# FWD transform
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = self.forward_transform(x)
x = torch.view_as_real(x)
# MLP
x = self.forward_mlp(x)
# BWD transform
with amp.autocast(enabled=False):
x = torch.view_as_complex(x)
x = self.inverse_transform(x)
x = x.to(dtype)
return x
class SpectralAttentionS2(nn.Module):
"""
geometrical Spectral Attention layer
"""
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold=0.0,
hidden_size_factor=2,
use_complex_network=True,
use_complex_kernels=False,
complex_activation="real",
bias=False,
spectral_layers=1,
drop_rate=0.0,
):
super(SpectralAttentionS2, self).__init__()
self.embed_dim = embed_dim
self.sparsity_threshold = sparsity_threshold
self.hidden_size = int(hidden_size_factor * self.embed_dim)
self.scale = 0.02
# self.mul_add_handle = compl_muladd1d_fwd_c if use_complex_kernels else compl_muladd1d_fwd
self.mul_add_handle = (
compl_muladd2d_fwd_c if use_complex_kernels else compl_muladd2d_fwd
)
# self.mul_handle = compl_mul1d_fwd_c if use_complex_kernels else compl_mul1d_fwd
self.mul_handle = compl_mul2d_fwd_c if use_complex_kernels else compl_mul2d_fwd
self.spectral_layers = spectral_layers
self.modes_lat = forward_transform.lmax
self.modes_lon = forward_transform.mmax
# only storing the forward handle to be able to call it
self.forward_transform = forward_transform.forward
self.inverse_transform = inverse_transform.forward
assert inverse_transform.lmax == self.modes_lat
assert inverse_transform.mmax == self.modes_lon
# weights
w = [self.scale * torch.randn(self.embed_dim, self.hidden_size, 2)]
# w = [self.scale * torch.randn(self.embed_dim + 4*self.embed_freqs, self.hidden_size, 2)]
for l in range(1, self.spectral_layers):
w.append(self.scale * torch.randn(self.hidden_size, self.hidden_size, 2))
self.w = nn.ParameterList(w)
if bias:
self.b = nn.ParameterList(
[
self.scale * torch.randn(2 * self.hidden_size, 1, 1, 2)
for _ in range(self.spectral_layers)
]
)
self.wout = nn.Parameter(
self.scale * torch.randn(self.hidden_size, self.embed_dim, 2)
)
self.drop = nn.Dropout(drop_rate) if drop_rate > 0.0 else nn.Identity()
self.activation = ComplexReLU(
mode=complex_activation, bias_shape=(self.hidden_size, 1, 1)
)
def forward_mlp(self, xr):
for l in range(self.spectral_layers):
if hasattr(self, "b"):
xr = self.mul_add_handle(
xr, self.w[l].to(xr.dtype), self.b[l].to(xr.dtype)
)
else:
xr = self.mul_handle(xr, self.w[l].to(xr.dtype))
xr = torch.view_as_complex(xr)
xr = self.activation(xr)
xr = self.drop(xr)
xr = torch.view_as_real(xr)
# final MLP
xr = self.mul_handle(xr, self.wout)
return xr
def forward(self, x):
dtype = x.dtype
# x = x.to(torch.float32)
# FWD transform
with amp.autocast(enabled=False):
x = x.to(torch.float32)
x = self.forward_transform(x)
x = torch.view_as_real(x)
# MLP
x = self.forward_mlp(x)
# BWD transform
with amp.autocast(enabled=False):
x = torch.view_as_complex(x)
x = self.inverse_transform(x)
x = x.to(dtype)
return x
| earth2mip-main | earth2mip/networks/fcnv2/layers.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from apex.normalization import FusedLayerNorm
from torch.utils.checkpoint import checkpoint
# helpers
from earth2mip.networks.fcnv2.layers import trunc_normal_, DropPath, MLP
from earth2mip.networks.fcnv2.layers import SpectralAttentionS2, SpectralConvS2
from earth2mip.networks.fcnv2.layers import SpectralAttention2d, SpectralConv2d
import torch_harmonics as harmonics
# to fake the sht module with ffts
from earth2mip.networks.fcnv2.layers import RealFFT2, InverseRealFFT2
from earth2mip.networks.fcnv2.contractions import *
# from earth2mip.networks.fcnv2 import activations
from earth2mip.networks.fcnv2.activations import *
class SpectralFilterLayer(nn.Module):
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type="linear",
sparsity_threshold=0.0,
use_complex_kernels=True,
hidden_size_factor=2,
compression=None,
rank=128,
complex_network=True,
complex_activation="real",
spectral_layers=1,
drop_rate=0.0,
):
super(SpectralFilterLayer, self).__init__()
if filter_type == "non-linear" and isinstance(
forward_transform, harmonics.RealSHT
):
self.filter = SpectralAttentionS2(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold,
use_complex_network=complex_network,
use_complex_kernels=use_complex_kernels,
hidden_size_factor=hidden_size_factor,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
bias=False,
)
elif filter_type == "non-linear" and isinstance(forward_transform, RealFFT2):
self.filter = SpectralAttention2d(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold,
use_complex_kernels=use_complex_kernels,
hidden_size_factor=hidden_size_factor,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
bias=False,
)
elif filter_type == "linear" and isinstance(forward_transform, RealSHT):
self.filter = SpectralConvS2(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold,
use_complex_kernels=use_complex_kernels,
compression=compression,
rank=rank,
bias=False,
)
elif filter_type == "linear" and isinstance(forward_transform, RealFFT2):
self.filter = SpectralConv2d(
forward_transform,
inverse_transform,
embed_dim,
sparsity_threshold,
use_complex_kernels=use_complex_kernels,
compression=compression,
rank=rank,
bias=False,
)
else:
raise (NotImplementedError)
def forward(self, x):
return self.filter(x)
class FourierNeuralOperatorBlock(nn.Module):
def __init__(
self,
forward_transform,
inverse_transform,
embed_dim,
filter_type="linear",
mlp_ratio=2.0,
drop_rate=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=(nn.LayerNorm, nn.LayerNorm),
# num_blocks = 8,
sparsity_threshold=0.0,
use_complex_kernels=True,
compression=None,
rank=128,
inner_skip="linear",
outer_skip=None, # None, nn.linear or nn.Identity
concat_skip=False,
mlp_mode="none",
complex_network=True,
complex_activation="real",
spectral_layers=1,
checkpointing=False,
):
super(FourierNeuralOperatorBlock, self).__init__()
# norm layer
self.norm0 = norm_layer[0]() # ((h,w))
# convolution layer
self.filter_layer = SpectralFilterLayer(
forward_transform,
inverse_transform,
embed_dim,
filter_type,
sparsity_threshold,
use_complex_kernels=use_complex_kernels,
hidden_size_factor=mlp_ratio,
compression=compression,
rank=rank,
complex_network=complex_network,
complex_activation=complex_activation,
spectral_layers=spectral_layers,
drop_rate=drop_rate,
)
if inner_skip == "linear":
self.inner_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif inner_skip == "identity":
self.inner_skip = nn.Identity()
self.concat_skip = concat_skip
if concat_skip and inner_skip is not None:
self.inner_skip_conv = nn.Conv2d(2 * embed_dim, embed_dim, 1, bias=False)
if filter_type == "linear":
self.act_layer = act_layer()
# dropout
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
# norm layer
self.norm1 = norm_layer[1]() # ((h,w))
if mlp_mode != "none":
mlp_hidden_dim = int(embed_dim * mlp_ratio)
self.mlp = MLP(
in_features=embed_dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop_rate=drop_rate,
checkpointing=checkpointing,
)
if outer_skip == "linear":
self.outer_skip = nn.Conv2d(embed_dim, embed_dim, 1, 1)
elif outer_skip == "identity":
self.outer_skip = nn.Identity()
if concat_skip and outer_skip is not None:
self.outer_skip_conv = nn.Conv2d(2 * embed_dim, embed_dim, 1, bias=False)
def forward(self, x):
residual = x
x = self.norm0(x)
x = self.filter_layer(x).contiguous()
if hasattr(self, "inner_skip"):
if self.concat_skip:
x = torch.cat((x, self.inner_skip(residual)), dim=1)
x = self.inner_skip_conv(x)
else:
x = x + self.inner_skip(residual)
if hasattr(self, "act_layer"):
x = self.act_layer(x)
x = self.norm1(x)
if hasattr(self, "mlp"):
x = self.mlp(x)
x = self.drop_path(x)
if hasattr(self, "outer_skip"):
if self.concat_skip:
x = torch.cat((x, self.outer_skip(residual)), dim=1)
x = self.outer_skip_conv(x)
else:
x = x + self.outer_skip(residual)
return x
# @torch.jit.ignore
# def checkpoint_forward(self, x):
# return checkpoint(self._forward, x)
# def forward(self, x):
# if self.checkpointing:
# return self.checkpoint_forward(x)
# else:
# return self._forward(x)
class FourierNeuralOperatorNet(nn.Module):
def __init__(
self,
params,
spectral_transform="sht",
filter_type="non-linear",
img_size=(721, 1440),
scale_factor=16,
in_chans=2,
out_chans=2,
embed_dim=256,
num_layers=12,
mlp_mode="none",
mlp_ratio=2.0,
drop_rate=0.0,
drop_path_rate=0.0,
num_blocks=16,
sparsity_threshold=0.0,
normalization_layer="instance_norm",
hard_thresholding_fraction=1.0,
use_complex_kernels=True,
big_skip=True,
compression=None,
rank=128,
complex_network=True,
complex_activation="real",
spectral_layers=3,
laplace_weighting=False,
checkpointing=False,
):
super(FourierNeuralOperatorNet, self).__init__()
self.params = params
self.spectral_transform = (
params.spectral_transform
if hasattr(params, "spectral_transform")
else spectral_transform
)
self.filter_type = (
params.filter_type if hasattr(params, "filter_type") else filter_type
)
self.img_size = (params.img_crop_shape_x, params.img_crop_shape_y)
self.scale_factor = (
params.scale_factor if hasattr(params, "scale_factor") else scale_factor
)
self.in_chans = (
params.N_in_channels if hasattr(params, "N_in_channels") else in_chans
)
self.out_chans = (
params.N_out_channels if hasattr(params, "N_out_channels") else out_chans
)
self.embed_dim = self.num_features = (
params.embed_dim if hasattr(params, "embed_dim") else embed_dim
)
self.num_layers = (
params.num_layers if hasattr(params, "num_layers") else num_layers
)
self.num_blocks = (
params.num_blocks if hasattr(params, "num_blocks") else num_blocks
)
self.hard_thresholding_fraction = (
params.hard_thresholding_fraction
if hasattr(params, "hard_thresholding_fraction")
else hard_thresholding_fraction
)
self.normalization_layer = (
params.normalization_layer
if hasattr(params, "normalization_layer")
else normalization_layer
)
self.mlp_mode = params.mlp_mode if hasattr(params, "mlp_mode") else mlp_mode
self.big_skip = params.big_skip if hasattr(params, "big_skip") else big_skip
self.compression = (
params.compression if hasattr(params, "compression") else compression
)
self.rank = params.rank if hasattr(params, "rank") else rank
self.complex_network = (
params.complex_network
if hasattr(params, "complex_network")
else complex_network
)
self.complex_activation = (
params.complex_activation
if hasattr(params, "complex_activation")
else complex_activation
)
self.spectral_layers = (
params.spectral_layers
if hasattr(params, "spectral_layers")
else spectral_layers
)
self.laplace_weighting = (
params.laplace_weighting
if hasattr(params, "laplace_weighting")
else laplace_weighting
)
self.checkpointing = (
params.checkpointing if hasattr(params, "checkpointing") else checkpointing
)
# compute downsampled image size
self.h = self.img_size[0] // self.scale_factor
self.w = self.img_size[1] // self.scale_factor
# dropout
self.pos_drop = nn.Dropout(p=drop_rate) if drop_rate > 0.0 else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, self.num_layers)]
# pick norm layer
if self.normalization_layer == "layer_norm":
norm_layer0 = partial(
nn.LayerNorm,
normalized_shape=(self.img_size[0], self.img_size[1]),
eps=1e-6,
)
norm_layer1 = partial(
nn.LayerNorm, normalized_shape=(self.h, self.w), eps=1e-6
)
elif self.normalization_layer == "instance_norm":
norm_layer0 = partial(
nn.InstanceNorm2d,
num_features=self.embed_dim,
eps=1e-6,
affine=True,
track_running_stats=False,
)
norm_layer1 = norm_layer0
# elif self.normalization_layer == "batch_norm":
# norm_layer = partial(nn.InstanceNorm2d, num_features=self.embed_dim, eps=1e-6, affine=True, track_running_stats=False)
else:
raise NotImplementedError(
f"Error, normalization {self.normalization_layer} not implemented."
)
# ENCODER is just an MLP?
encoder_hidden_dim = self.embed_dim
encoder_act = nn.GELU
# encoder0 = nn.Conv2d(self.in_chans, encoder_hidden_dim, 1, bias=True)
# encoder1 = nn.Conv2d(encoder_hidden_dim, self.embed_dim, 1, bias=False)
# encoder_act = nn.GELU()
# self.encoder = nn.Sequential(encoder0, encoder_act, encoder1, norm_layer0())
self.encoder = MLP(
in_features=self.in_chans,
hidden_features=encoder_hidden_dim,
out_features=self.embed_dim,
output_bias=False,
act_layer=encoder_act,
drop_rate=0.0,
checkpointing=checkpointing,
)
# self.input_encoding = nn.Conv2d(self.in_chans, self.embed_dim, 1)
# self.pos_embed = nn.Parameter(torch.zeros(1, self.pos_embed_dim, self.img_size[0], self.img_size[1]))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.embed_dim, self.img_size[0], self.img_size[1])
)
# prepare the SHT
modes_lat = int(self.h * self.hard_thresholding_fraction)
modes_lon = int((self.w // 2 + 1) * self.hard_thresholding_fraction)
if self.spectral_transform == "sht":
self.trans_down = harmonics.RealSHT(
*self.img_size, lmax=modes_lat, mmax=modes_lon, grid="equiangular"
).float()
self.itrans_up = harmonics.InverseRealSHT(
*self.img_size, lmax=modes_lat, mmax=modes_lon, grid="equiangular"
).float()
self.trans = harmonics.RealSHT(
self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid="legendre-gauss"
).float()
self.itrans = harmonics.InverseRealSHT(
self.h, self.w, lmax=modes_lat, mmax=modes_lon, grid="legendre-gauss"
).float()
# we introduce some ad-hoc rescaling of the weights to aid gradient computation:
sht_rescaling_factor = 1e5
self.trans_down.weights = self.trans_down.weights * sht_rescaling_factor
self.itrans_up.pct = self.itrans_up.pct / sht_rescaling_factor
self.trans.weights = self.trans.weights * sht_rescaling_factor
self.itrans.pct = self.itrans.pct / sht_rescaling_factor
elif self.spectral_transform == "fft":
self.trans_down = RealFFT2(
*self.img_size, lmax=modes_lat, mmax=modes_lon
).float()
self.itrans_up = InverseRealFFT2(
*self.img_size, lmax=modes_lat, mmax=modes_lon
).float()
self.trans = RealFFT2(
self.h, self.w, lmax=modes_lat, mmax=modes_lon
).float()
self.itrans = InverseRealFFT2(
self.h, self.w, lmax=modes_lat, mmax=modes_lon
).float()
else:
raise (ValueError("Unknown spectral transform"))
self.blocks = nn.ModuleList([])
for i in range(self.num_layers):
first_layer = i == 0
last_layer = i == self.num_layers - 1
forward_transform = self.trans_down if first_layer else self.trans
inverse_transform = self.itrans_up if last_layer else self.itrans
inner_skip = "linear" if 0 < i < self.num_layers - 1 else None
outer_skip = "identity" if 0 < i < self.num_layers - 1 else None
mlp_mode = self.mlp_mode if not last_layer else "none"
if first_layer:
norm_layer = (norm_layer0, norm_layer1)
elif last_layer:
norm_layer = (norm_layer1, norm_layer0)
else:
norm_layer = (norm_layer1, norm_layer1)
block = FourierNeuralOperatorBlock(
forward_transform,
inverse_transform,
self.embed_dim,
filter_type=self.filter_type,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
sparsity_threshold=sparsity_threshold,
use_complex_kernels=use_complex_kernels,
inner_skip=inner_skip,
outer_skip=outer_skip,
mlp_mode=mlp_mode,
compression=self.compression,
rank=self.rank,
complex_network=self.complex_network,
complex_activation=self.complex_activation,
spectral_layers=self.spectral_layers,
checkpointing=self.checkpointing,
)
self.blocks.append(block)
# DECODER is also an MLP
decoder_hidden_dim = self.embed_dim
decoder_act = nn.GELU
# decoder0 = nn.Conv2d(self.embed_dim + self.big_skip*self.in_chans, decoder_hidden_dim, 1, bias=True)
# decoder1 = nn.Conv2d(decoder_hidden_dim, self.out_chans, 1, bias=False)
# decoder_act = nn.GELU()
# self.decoder = nn.Sequential(decoder0, decoder_act, decoder1)
self.decoder = MLP(
in_features=self.embed_dim + self.big_skip * self.in_chans,
hidden_features=decoder_hidden_dim,
out_features=self.out_chans,
output_bias=False,
act_layer=decoder_act,
drop_rate=0.0,
checkpointing=checkpointing,
)
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
# nn.init.normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm) or isinstance(m, FusedLayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
# x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x):
# save big skip
if self.big_skip:
residual = x
# encoder
x = self.encoder(x)
# do positional embedding
x = x + self.pos_embed
# forward features
x = self.forward_features(x)
# concatenate the big skip
if self.big_skip:
x = torch.cat((x, residual), dim=1)
# decoder
x = self.decoder(x)
return x
| earth2mip-main | earth2mip/networks/fcnv2/sfnonet.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
def config_logger(log_level=logging.INFO):
logging.basicConfig(format=_format, level=log_level)
def log_to_file(
logger_name=None, log_level=logging.INFO, log_filename="tensorflow.log"
):
if not os.path.exists(os.path.dirname(log_filename)):
os.makedirs(os.path.dirname(log_filename))
if logger_name is not None:
log = logging.getLogger(logger_name)
else:
log = logging.getLogger()
fh = logging.FileHandler(log_filename)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(_format))
log.addHandler(fh)
def log_versions():
import torch
import subprocess
logging.info("--------------- Versions ---------------")
try:
logging.info(
"git branch: " + str(subprocess.check_output(["git", "branch"]).strip())
)
logging.info(
"git hash: "
+ str(subprocess.check_output(["git", "rev-parse", "HEAD"]).strip())
)
except:
pass
logging.info("Torch: " + str(torch.__version__))
logging.info("----------------------------------------")
class disable_logging(object):
def __init__(self, level=logging.ERROR):
logging.disable(level=level)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
logging.disable(level=logging.NOTSET)
| earth2mip-main | earth2mip/networks/fcnv2/logging_utils.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
# Helper routines for FNOs
@torch.jit.script
def compl_contract2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixys,kixyr->srbkxy", a, b)
res = torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
return res
@torch.jit.script
def compl_contract2d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
res = torch.einsum("bixy,kixy->bkxy", ac, bc)
return torch.view_as_real(res)
@torch.jit.script
def compl_contract_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bins,kinr->srbkn", a, b)
res = torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
return res
@torch.jit.script
def compl_contract_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
res = torch.einsum("bin,kin->bkn", ac, bc)
return torch.view_as_real(res)
@torch.jit.script
def compl_ttc1_c_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
res = torch.einsum("jt,bct->jbct", ac, bc)
return torch.view_as_real(res)
@torch.jit.script
def compl_ttc2_c_fwd(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
cc = torch.view_as_complex(c)
res = torch.einsum("oi,icj,jbct->bot", ac, bc, cc)
return torch.view_as_real(res)
def contract_tt(x, w):
y = compl_ttc1_c_fwd(w[2], x)
return compl_ttc2_c_fwd(w[0], w[1], y)
# Helper routines for spherical MLPs
@torch.jit.script
def compl_mul1d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixs,ior->srbox", a, b)
res = torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
return res
@torch.jit.script
def compl_mul1d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bix,io->box", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd1d_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
res = compl_mul1d_fwd(a, b) + c
return res
@torch.jit.script
def compl_muladd1d_fwd_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmpcc = torch.view_as_complex(compl_mul1d_fwd_c(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
# for the real-valued case:
@torch.jit.script
def compl_mul1d_fwd_r(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
res = torch.einsum("bix,io->box", a, b)
return res
@torch.jit.script
def compl_muladd1d_fwd_r(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = compl_mul1d_fwd_r(a, b)
return tmp + c
# Helper routines for FFT MLPs
@torch.jit.script
def compl_mul2d_fwd(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
tmp = torch.einsum("bixys,ior->srboxy", a, b)
res = torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
return res
@torch.jit.script
def compl_mul2d_fwd_c(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
resc = torch.einsum("bixy,io->boxy", ac, bc)
res = torch.view_as_real(resc)
return res
@torch.jit.script
def compl_muladd2d_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
res = compl_mul2d_fwd(a, b) + c
return res
@torch.jit.script
def compl_muladd2d_fwd_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmpcc = torch.view_as_complex(compl_mul2d_fwd_c(a, b))
cc = torch.view_as_complex(c)
return torch.view_as_real(tmpcc + cc)
# for the real-valued case:
@torch.jit.script
def compl_mul2d_fwd_r(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
res = torch.einsum("bixy,io->boxy", a, b)
return res
@torch.jit.script
def compl_muladd2d_fwd_r(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = compl_mul2d_fwd_c(a, b)
return torch.view_as_real(tmp + c)
| earth2mip-main | earth2mip/networks/fcnv2/contractions.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| earth2mip-main | earth2mip/lagged_ensembles/__init__.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from collections import deque
async def yield_lagged_ensembles(
*,
observations,
forecast,
lags: int = 2,
n: int = 10,
):
"""Yield centered lagged ensembles
The forecast array has shape (len(observations), n)
The ensemble consist of runs initialized with an offset of (-lags, ..., 0,
...lags). The ensemble size is therefore ``2*lags + =`` for points within
the interior of the array.
Supports running in parallel using the ``rank`` and ``world_size`` flags
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
rank = 0
world_size = 1
# example one. no garbage collection
nt = len(observations)
assert n < nt
# work trackers that will be used to determine when an ensemble is finished,
# and ensure that all data is processed
finished = set()
ensemble = {}
obs_buffer = deque([])
for i in range(n + world_size):
obs_buffer.append(await observations[i])
n_iter = int(nt // world_size)
assert nt % world_size == 0
buffers = None
for i0 in range(n_iter):
for k in range(world_size):
i = world_size * i0 + k
if i + n + 1 < nt:
obs_buffer.append(await observations[i + n + 1])
i = world_size * i0 + rank
nsteps = min(nt - world_size * i0 - 1, n)
lead_time = -1
async for y in forecast[i]:
lead_time += 1
j = i + lead_time
if lead_time > nsteps:
break
if torch.distributed.is_initialized():
buffers = [torch.empty_like(y) for _ in range(world_size)]
# TODO only gather from needed ranks (i - m)
torch.distributed.all_gather(buffers, y)
if y.device != torch.device("cpu"):
cpu_buffers = [
torch.empty_like(b, device="cpu", pin_memory=True)
for b in buffers
]
for cpu, gpu in zip(cpu_buffers, buffers):
cpu.copy_(gpu, non_blocking=True)
else:
cpu_buffers = buffers
else:
cpu_buffers = [y]
lead_time = j - i
# need to loop over ranks to ensure that number of iterations
# per rank is the same
for r in range(world_size):
for m in range(-lags, lags + 1):
ii = i0 * world_size + r
jj = ii + lead_time
if jj >= nt:
break
# Should this rank process the data or not?
i_owner = jj - lead_time - m
if i_owner % world_size != rank:
continue
k = (jj, lead_time + m)
store_me = cpu_buffers[r]
# ensemble[k][m]
ensemble.setdefault(k, {})[m] = store_me
# There are two options for this finishing criteria
# 1. if it work is not done in the next iteration, then we know
# we are done this would be implemented by
#
# if not done(j, lead_time + m, i + 1):
#
# 2. if the ensemble has the expected number of members
# 2 seems easier to parallelize and less subject to the
# looping we take, so is what we do here:
expected = num(n=n, ell=lead_time + m, j=jj, L=lags)
if jj < nt and len(ensemble[k]) == expected:
# sanity check that a single ensemble is not
# processed multiple times
if k in finished:
assert False, k
finished.add(k)
# need to synchronize to ensure cpu buffers are filled
# before yielding the complete ensemble
if torch.cuda.is_available():
torch.cuda.synchronize()
yield k, ensemble.pop(k), await observations[jj]
for _ in range(world_size):
obs_buffer.popleft()
assert not ensemble, len(ensemble)
def num(n, ell, j, L):
a = max(ell - j, ell - n, -L)
b = min(ell, L)
return b - a + 1
def done(j, ell, i, lags, n):
"""Unused helper function wich says if lag ell and valid_time j are written
to in a given iteration `i` of the loop in lagged_average_simple
This is one way to implement the done criteria which is less easily
parallelized. I am leaving it in the code for educational value only.
"""
#
a = j - i - lags <= ell <= j - i + lags
b = n >= j - i >= 0
return a & b
| earth2mip-main | earth2mip/lagged_ensembles/core.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# SPDX-FileCopyrightText: All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import concurrent.futures
import datetime
import logging
from functools import partial
import argparse
import cupy
import pandas as pd
import torch
import xarray
from earth2mip import forecasts, _cli_utils
from earth2mip.initial_conditions.era5 import HDF5DataSource
from earth2mip.datasets.hindcast import open_forecast
from earth2mip.lagged_ensembles import core
from earth2mip.xarray import metrics
from earth2mip.xarray.utils import concat_dict, to_cupy
from earth2mip import config
# patch the proper scoring imports
use_cupy = True
if use_cupy:
import cupy as np
else:
import numpy as np
logger = logging.getLogger(__name__)
async def lagged_average_simple(
*,
observations,
run_forecast,
score,
lags=2,
n=10,
):
scores = {}
async for (j, l), ensemble, obs in core.yield_lagged_ensembles(
observations=observations,
forecast=run_forecast,
lags=lags,
n=n,
):
scores.setdefault(j, {})[l] = score(ensemble, obs)
return scores
def get_times_2018(nt):
times = [
datetime.datetime(2018, 1, 1) + k * datetime.timedelta(hours=12)
for k in range(nt)
]
return times
class Observations:
def __init__(self, times, pool, data_source, device=None):
self.pool = pool
self.device = device
self.times = times
self.data_source = data_source
def _get_time(self, time):
return self.data_source[time]
async def __getitem__(self, i):
"""
Returns (channel, lat, lon)
"""
time = self.times[i]
logger.debug("Loading %s", time)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(self.pool, self._get_time, time)
def __len__(self):
return len(self.times)
def score(channel_names, ensemble, obs):
"""
Args:
ensemble: list of (c, ...)
obs: (c, ...)
Returns:gg
(c,)
"""
import dask
dask.config.set(scheduler="single-threaded")
obs = to_cupy(obs.drop(["time", "channel"])).assign_coords(
time=obs.time, channel=obs.channel
)
lat = to_cupy(obs.lat)
out = {}
ens = torch.stack(list(ensemble.values()), dim=0)
coords = {**obs.coords}
coords["channel"] = channel_names
ensemble_xr = xarray.DataArray(
np.asarray(ens), dims=["ensemble", *obs.dims], coords=coords
)
# add ensemble dimension
# the convention is that ensemble member 0 is the deterministic (i.e. best)
# one
ensemble_xr = ensemble_xr.assign_coords(
ensemble=xarray.Variable(["ensemble"], list(ensemble))
)
ensemble_xr = ensemble_xr.chunk(lat=32)
obs = obs.chunk(lat=32)
# need to chunk to avoid OOMs
pred_align, obs_align = xarray.align(ensemble_xr, obs)
with metrics.properscoring_with_cupy():
out = metrics.score_ensemble(pred_align, obs_align, lat=lat)
mempool = cupy.get_default_memory_pool()
logger.debug(
"bytes used: %0.1f\ttotal: %0.1f",
mempool.used_bytes() / 2**30,
mempool.total_bytes() / 2**30,
)
return out
def collect_score(score, times) -> pd.DataFrame:
"""traverse the collected scores and collate into a data frame
score[j][l][series] is a DataArray of `series` for valid index `j` and lead
time `l`
"""
# save data with these columns
# time,valid_time,model,series,t850,u10m,v10m,t2m,z500,initial_time
dt = times[1] - times[0]
flat = {}
for j in score:
for ell in score[j]:
for series in score[j][ell]:
arr = score[j][ell][series]
arr = arr.copy()
try:
# is a cupy array
arr.data = arr.data.get()
except AttributeError:
# otherwise do nothing
pass
arr = arr.squeeze()
flat[(times[j] - ell * dt, ell * dt, series)] = arr
# idx = pd.MultiIndex.from_tuples(list(flat.keys()), names=['initial_time', 'time'])
combined = concat_dict(flat, key_names=["initial_time", "time", "series"])
df = combined.to_dataset(dim="channel").to_dataframe().reset_index()
df["valid_time"] = df["initial_time"] + df["time"]
del df["time"]
del df["key"]
return df
def main(args):
"""Run a lagged ensemble scoring
Can be run against either a fcn model (--model), a forecast directory as
output by earth2mip.time_collection (--forecast_dir), persistence forecast
(--persistence), or deterministic IFS (--ifs).
Saves data as csv files (1 per rank).
Examples:
torchrun --nproc_per_node 2 --nnodes 1 -m earth2mip.lagged_ensembles --model sfno_73ch --inits 10 --leads 5 --lags 4
""" # noqa
times = list(get_times_2018(args.inits))
FIELDS = ["u10m", "v10m", "z500", "t2m", "t850"]
pool = concurrent.futures.ThreadPoolExecutor()
data_source = HDF5DataSource.from_path(args.data or config.ERA5_HDF5_73)
obs = Observations(times=times, pool=pool, data_source=data_source, device="cpu")
try:
torch.distributed.init_process_group(backend="nccl", init_method="env://")
except ValueError:
pass
rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
device = torch.device("cuda", rank % torch.cuda.device_count())
if args.model:
timeloop = _cli_utils.model_from_args(args, device=device)
run_forecast = forecasts.TimeLoopForecast(
timeloop, times=times, observations=obs
)
elif args.forecast_dir:
run_forecast = forecasts.XarrayForecast(
open_forecast(args.forecast_dir, group="mean.zarr"),
times=times,
fields=FIELDS,
)
elif args.ifs:
# TODO fix this import error
# TODO convert ifs to zarr so we don't need custom code
from earth2mip.datasets.deterministic_ifs import open_deterministic_ifs
run_forecast = forecasts.XarrayForecast(open_deterministic_ifs(args.ifs))
elif args.persistence:
run_forecast = forecasts.Persistence
else:
raise ValueError(
"need to provide one of --persistence --ifs --forecast-dir or --model."
)
if rank == 0:
logging.basicConfig(level=logging.INFO)
scores_future = lagged_average_simple(
observations=obs,
score=partial(score, run_forecast.channel_names),
run_forecast=run_forecast,
lags=args.lags,
n=args.leads,
)
with torch.cuda.device(device):
scores = asyncio.run(scores_future)
df = collect_score(scores, times)
path = f"{args.output}.{rank:03d}.csv"
print(f"saving scores to {path}")
# remove headers from other ranks so it is easy to cat the files
df.to_csv(path, header=(rank == 0))
def parse_args():
parser = argparse.ArgumentParser(
description="Your CLI description here", usage=main.__doc__
)
parser.add_argument("--data", type=str, help="Path to data file")
_cli_utils.add_model_args(parser, required=False)
parser.add_argument("--forecast_dir", type=str, help="Path to forecast directory")
parser.add_argument("--ifs", type=str, default="", help="IFS parameter")
parser.add_argument("--persistence", action="store_true", help="Enable persistence")
parser.add_argument("--inits", type=int, default=10, help="Number of inits")
parser.add_argument("--lags", type=int, default=4, help="Number of lags")
parser.add_argument("--leads", type=int, default=54, help="Number of leads")
parser.add_argument("--output", type=str, default=".", help="Output directory")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(args)
| earth2mip-main | earth2mip/lagged_ensembles/__main__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.