file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
Z_normal_8_2.py | 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def break_points_quantiles(size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
|
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete | mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean | identifier_body |
Z_normal_8_2.py | 1.15]),
9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),
10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),
11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),
12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),
13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),
14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),
15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),
16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),
17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),
18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),
19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),
20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),
}
return options[size]
def | (size):
options=np.linspace(0, 1, size+1)[1:]
return options
#y_alphabets = break_points_quantiles(y_alphabet_size).tolist()
y_alphabets = break_points_gaussian(y_alphabet_size).tolist()
def hamming_distance1(string1, string2):
distance = 0
L = len(string1)
for i in range(L):
if string1[i] != string2[i]:
distance += 1
return distance
def hamming_distance(s1, s2):
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1, s2))
"""------------- X-axis Distribution ------------- """
def x_distrubted_values(series):
mean=np.mean(series)
#median=sorted(series)[len(series) // 2]
return mean
"""------------- Index to Letter conversion ------------- """
def index_to_letter(idx):
"""Convert a numerical index to a char."""
if 0 <= idx < 20:
return chr(97 + idx)
else:
raise ValueError('A wrong idx value supplied.')
def normalize(x):
X = np.asanyarray(x)
if np.nanstd(X) < epsilon:
res = []
for entry in X:
if not np.isnan(entry):
res.append(0)
else:
res.append(np.nan)
return res
return (X - np.nanmean(X)) / np.nanstd(X)
def normal_distribution(x):
x = (x-min(x))/(max(x)-min(x))
return x
"""------------- 1- Normalize Data ------------- """
x1=normalize(x1)
plt.plot(x1)
plt.show()
"""------------- 5.2- Y_Alphabetize ------------- """
def alphabetize_ts(sub_section):
mean_val=x_distrubted_values(sub_section)
y_alpha_val=min(y_alphabets, key=lambda x:abs(x-mean_val))
y_alpha_idx=y_alphabets.index(y_alpha_val)
curr_word = index_to_letter(y_alpha_idx)
return(curr_word)
"""------------- 2- Segmentization Data ------------- """
def segment_ts(series,windowSize=window_size,skip_offset=skip_offset):
ts_len=len(x1)
mod = ts_len%windowSize
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-mod-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = series[curr_count:(curr_count+windowSize)]
sub_section=normalize(sub_section)
#print(curr_count,(curr_count+windowSize))
#print(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
zlp=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_word=alphabetize_ts(chunk)
zlp+=str(curr_word)
complete_indices.append(curr_count)
num+=chunk_size
words.append(zlp)
indices.append(curr_count)
curr_count=curr_count+skip_offset-1
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame(temp_list)
temp_df.insert(loc=0, column='keys', value=zlp)
temp_df.insert(loc=1, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=2, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=3, column='scale_low', value=np.min(sub_section))
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize11,indices,df_sax=segment_ts(x1)
"""------------- SAX ------------- """
""" Complete Words """
def complete_word(series=x1,word_len=word_lenth,skip_len=skip_offset):
alphabetize,indices,df_sax=segment_ts(series)
complete_word=list()
complete | break_points_quantiles | identifier_name |
genstate.go | () *genState {
return &genState{
// Mark the name that is used for the binary type as a reserved name
// within the output structs.
definedGlobals: map[string]bool{
ygot.BinaryTypeName: true,
ygot.EmptyTypeName: true,
},
uniqueDirectoryNames: make(map[string]string),
uniqueEnumeratedTypedefNames: make(map[string]string),
uniqueIdentityNames: make(map[string]string),
uniqueEnumeratedLeafNames: make(map[string]string),
uniqueProtoMsgNames: make(map[string]map[string]bool),
uniqueProtoPackages: make(map[string]string),
generatedUnions: make(map[string]bool),
}
}
// enumeratedUnionEntry takes an input YANG union yang.Entry and returns the set of enumerated
// values that should be generated for the entry. New yang.Entry instances are synthesised within
// the yangEnums returned such that enumerations can be generated directly from the output of
// this function in common with enumerations that are not within a union. The name of the enumerated
// value is calculated based on the original context, whether path compression is enabled based
// on the compressPaths boolean, and whether the name should not include underscores, as per the
// noUnderscores boolean.
func (s *genState) enumeratedUnionEntry(e *yang.Entry, compressPaths, noUnderscores bool) ([]*yangEnum, error) {
var es []*yangEnum
for _, t := range enumeratedUnionTypes(e.Type.Type) {
var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
| newGenState | identifier_name |
|
genstate.go | var en *yangEnum
switch {
case t.IdentityBase != nil:
en = &yangEnum{
name: s.identityrefBaseTypeFromIdentity(t.IdentityBase, noUnderscores),
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yidentityref,
IdentityBase: t.IdentityBase,
},
},
}
case t.Enum != nil:
var enumName string
if _, chBuiltin := yang.TypeKindFromName[t.Name]; chBuiltin {
enumName = s.resolveEnumName(e, compressPaths, noUnderscores)
} else {
var err error
enumName, err = s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
return nil, err
}
}
en = &yangEnum{
name: enumName,
entry: &yang.Entry{
Name: e.Name,
Type: &yang.YangType{
Name: e.Type.Name,
Kind: yang.Yenum,
Enum: t.Enum,
},
Annotation: map[string]interface{}{"valuePrefix": traverseElementSchemaPath(e)},
},
}
}
es = append(es, en)
}
return es, nil
}
// buildDirectoryDefinitions extracts the yang.Entry instances from a map of
// entries that need struct or message definitions built for them. It resolves
// each yang.Entry to a yangDirectory which contains the elements that are
// needed for subsequent code generation. The name of the directory entry that
// is returned is based on the generatedLanguage that is supplied. The
// compressPaths and genFakeRoot arguments are used to determine how paths that
// are included within the generated structs are used. If the excludeState
// argument is set, those elements within the YANG schema that are marked config
// false (i.e., are read only) are excluded from the returned directories.
func (s *genState) buildDirectoryDefinitions(entries map[string]*yang.Entry, compressPaths, genFakeRoot bool, lang generatedLanguage, excludeState bool) (map[string]*yangDirectory, []error) {
var errs []error
mappedStructs := make(map[string]*yangDirectory)
for _, e := range entries {
// If we are excluding config false (state entries) then skip processing
// this element.
if excludeState && !isConfig(e) {
continue
}
if e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok |
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", | {
genEnums[en.name] = en
} | conditional_block |
genstate.go | }
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
}
// identityrefBaseTypeFromIdentity takes an input yang.Identity pointer and
// determines the name of the identity used within the generated code for it. The value
// returned is based on the defining module followed by the CamelCase-ified version
// of the identity's name. If noUnderscores is set to false, underscores are omitted
// from the name returned such that the enumerated type name is compliant with
// language styles where underscores are not allowed in names.
func (s *genState) identityrefBaseTypeFromIdentity(i *yang.Identity, noUnderscores bool) string {
definingModName := parentModulePrettyName(i)
// As per a typedef that includes an enumeration, there is a many to one
// relationship between leaves and an identity value, therefore, we want to
// reuse the existing name for the identity enumeration if one exists.
identityKey := fmt.Sprintf("%s/%s", definingModName, i.Name)
if definedName, ok := s.uniqueIdentityNames[identityKey]; ok {
return definedName
}
var name string
if noUnderscores {
name = fmt.Sprintf("%s%s", yang.CamelCase(definingModName), strings.Replace(yang.CamelCase(i.Name), "_", "", -1))
} else {
name = fmt.Sprintf("%s_%s", yang.CamelCase(definingModName), yang.CamelCase(i.Name))
}
// The name of an identityref base type must be unique within the entire generated
// code, so the context of name generation is global.
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueIdentityNames[identityKey] = uniqueName
return uniqueName
}
// resolveEnumName takes a yang.Entry and resolves its name into the type name
// that will be used in the generated code. Whilst a leaf may only be used
// in a single context (i.e., at its own path), resolveEnumName may be called
// multiple times, and hence de-duplication of unique name generation is required.
// If noUnderscores is set to true, then underscores are omitted from the
// output name.
func (s *genState) resolveEnumName(e *yang.Entry, compressPaths, noUnderscores bool) string {
// It is possible, given a particular enumerated leaf, for it to appear
// multiple times in the schema. For example, through being defined in
// a grouping which is instantiated in two places. In these cases, the
// enumerated values must be the same since the path to the node - i.e.,
// module/hierarchy/of/containers/leaf-name must be unique, since we
// cannot have multiple modules of the same name, and paths within the
// module must be unique. To this end, we check whether we are generating
// an enumeration for exactly the same node, and if so, re-use the name
// of the enumeration that has been generated. This improves usability
// for the end user by avoiding multiple enumerated types.
//
// The path that is used for the enumeration is therefore taking the goyang
// "Node" hierarchy - we walk back up the tree until such time as we find
// a node that is not within the same module (parentModulePrettyName(parent) !=
// parentModulePrettyName(currentNode)), and use this as the unique path.
definingModName := parentModulePrettyName(e.Node)
var identifierPathElem []string
for elem := e.Node; elem.ParentNode() != nil && parentModulePrettyName(elem) == definingModName; elem = elem.ParentNode() {
identifierPathElem = append(identifierPathElem, elem.NName())
}
// Since the path elements are compiled from leaf back to root, then reverse them to
// form the path, this is not strictly required, but aids debugging of the elements.
var identifierPath string
for i := len(identifierPathElem) - 1; i >= 0; i-- {
identifierPath = fmt.Sprintf("%s/%s", identifierPath, identifierPathElem[i])
}
// For leaves that have an enumeration within a typedef that is within a union,
// we do not want to just use the place in the schema definition for de-duplication,
// since it becomes confusing for the user to have non-contextual names within
// this context. We therefore rewrite the identifier path to have the context
// that we are in. By default, we just use the name of the node, but in OpenConfig
// schemas we rely on the grandparent name.
if !isYANGBaseType(e.Type) {
idPfx := e.Name
if compressPaths && e.Parent != nil && e.Parent.Parent != nil {
idPfx = e.Parent.Parent.Name
}
identifierPath = fmt.Sprintf("%s%s", idPfx, identifierPath)
}
// If the leaf had already been encountered, then return the previously generated
// name, rather than generating a new name.
if definedName, ok := s.uniqueEnumeratedLeafNames[identifierPath]; ok {
return definedName
}
if compressPaths {
// If we compress paths then the name of this enum is of the form
// ModuleName_GrandParent_Leaf - we use GrandParent since Parent is
// State or Config so would not be unique. The proposed name is
// handed to makeNameUnique to ensure that it does not clash with
// other defined names.
name := fmt.Sprintf("%s_%s_%s", yang.CamelCase(definingModName), yang.CamelCase(e.Parent.Parent.Name), yang.CamelCase(e.Name))
if noUnderscores {
name = strings.Replace(name, "_", "", -1)
}
uniqueName := makeNameUnique(name, s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName
return uniqueName
}
// If this was we don't compress the paths, then we write out the entire path.
var nbuf bytes.Buffer
for i, p := range traverseElementSchemaPath(e) {
if i != 0 && !noUnderscores {
nbuf.WriteRune('_')
}
nbuf.WriteString(yang.CamelCase(p))
} | uniqueName := makeNameUnique(nbuf.String(), s.definedGlobals)
s.uniqueEnumeratedLeafNames[identifierPath] = uniqueName | random_line_split |
|
genstate.go | e.IsList() || e.IsDir() || isRoot(e) {
// This should be mapped to a struct in the generated code since it has
// child elements in the YANG schema.
elem := &yangDirectory{
entry: e,
}
// Encode the name of the struct according to the language specified
// within the input arguments.
switch lang {
case protobuf:
// In the case of protobuf the message name is simply the camel
// case name that is specified.
elem.name = s.protoMsgName(e, compressPaths)
case golang:
// For Go, we map the name of the struct to the path elements
// in CamelCase separated by underscores.
elem.name = s.goStructName(e, compressPaths, genFakeRoot)
default:
errs = append(errs, fmt.Errorf("unknown generating language specified for %s, got: %v", e.Name, lang))
continue
}
// Find the elements that should be rooted on this particular entity.
var fieldErr []error
elem.fields, fieldErr = findAllChildren(e, compressPaths, excludeState)
if fieldErr != nil {
errs = append(errs, fieldErr...)
continue
}
// Determine the path of the element from the schema.
elem.path = strings.Split(schemaTreePath(e), "/")
// Mark this struct as the fake root if it is specified to be.
if e.Node != nil && e.Node.NName() == rootElementNodeName {
elem.isFakeRoot = true
}
// Handle structures that will represent the container which is duplicated
// inside a list. This involves extracting the key elements of the list
// and returning a yangListAttr structure that describes how they should
// be represented.
if e.IsList() {
lattr, listErr := s.buildListKey(e, compressPaths)
if listErr != nil {
errs = append(errs, listErr...)
continue
}
elem.listAttr = lattr
}
mappedStructs[e.Path()] = elem
} else {
errs = append(errs, fmt.Errorf("%s was not an element mapped to a struct", e.Path()))
}
}
return mappedStructs, errs
}
// findEnumSet walks the list of enumerated value leaves and determines whether
// code generation is required for each enum. Particularly, it removes
// duplication between config and state containers when compressPaths is true.
// It also de-dups references to the same identity base, and type definitions.
// If noUnderscores is set to true, then underscores are omitted from the enum
// names to reflect to the preferred style of some generated languages.
func (s *genState) findEnumSet(entries map[string]*yang.Entry, compressPaths, noUnderscores bool) (map[string]*yangEnum, []error) {
validEnums := make(map[string]*yang.Entry)
var enumNames []string
var errs []error
if compressPaths {
// Don't generate output for an element that exists both in the config and state containers,
// i.e., /interfaces/interface/config/enum and /interfaces/interface/state/enum should not
// both have code generated for them. Since there may be containers underneath state then
// we cannot rely on state having a specific place in the tree, therefore, walk through the
// path and swap 'state' for 'config' where it is found allowing us to check whether the
// state leaf has a corresponding config leaf, and if so, to ignore it. Note that a schema
// that is a valid OpenConfig schema has only a single instance of 'config' or 'state' in
// the path, therefore the below algorithm replaces only one element.
for path, e := range entries {
parts := strings.Split(path, "/")
var newPath []string
for _, p := range parts {
if p == "state" {
p = "config"
}
newPath = append(newPath, p)
}
if path == joinPath(newPath) {
// If the path remains the same - i.e., we did not replace state with
// config, then the enumeration is valid, such that code should have
// code generated for it.
validEnums[path] = e
enumNames = append(enumNames, path)
} else {
// Else, if we changed the path, then we changed a state container for
// a config container, and we should check whether the config leaf
// exists. Only when it doesn't do we consider this enum.
if _, ok := entries[joinPath(newPath)]; !ok {
validEnums[path] = e
enumNames = append(enumNames, path)
}
}
}
} else {
// No de-duplication occurs when path compression is disabled.
validEnums = entries
for n := range validEnums {
enumNames = append(enumNames, n)
}
}
// Sort the name of the enums such that we have deterministic ordering. This allows the
// same entity to be used for code generation each time (avoiding flaky tests or scenarios
// where there are erroneous config/state differences).
sort.Strings(enumNames)
// Sort the list of enums such that we can ensure when there is deduplication then the same
// source entity is used for code generation.
genEnums := make(map[string]*yangEnum)
for _, eN := range enumNames {
e := validEnums[eN]
_, builtin := yang.TypeKindFromName[e.Type.Name]
switch {
case e.Type.Name == "union", len(e.Type.Type) > 0 && !builtin:
// Calculate any enumerated types that exist within a union, whether it
// is a directly defined union, or a non-builtin typedef.
es, err := s.enumeratedUnionEntry(e, compressPaths, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
for _, en := range es {
if _, ok := genEnums[en.name]; !ok {
genEnums[en.name] = en
}
}
case e.Type.Name == "identityref":
// This is an identityref - we do not want to generate code for an
// identityref but rather for the base identity. This means that we reduce
// duplication across different enum types. Re-map the "path" that is to
// be used to the new identityref name.
if e.Type.IdentityBase == nil {
errs = append(errs, fmt.Errorf("entry %s was an identity with a nil base", e.Name))
continue
}
idBaseName := s.resolveIdentityRefBaseType(e, noUnderscores)
if _, ok := genEnums[idBaseName]; !ok {
genEnums[idBaseName] = &yangEnum{
name: idBaseName,
entry: e,
}
}
case e.Type.Name == "enumeration":
// We simply want to map this enumeration into a new name. Since we do
// de-duplication of re-used enumerated leaves at different points in
// the schema (e.g., if openconfig-bgp/container/enum-A can be instantiated
// in two places, then we do not want to have multiple enumerated types
// that represent this leaf), then we do not have errors if duplicates
// occur, we simply perform de-duplication at this stage.
enumName := s.resolveEnumName(e, compressPaths, noUnderscores)
if _, ok := genEnums[enumName]; !ok {
genEnums[enumName] = &yangEnum{
name: enumName,
entry: e,
}
}
default:
// This is a type which is defined through a typedef.
typeName, err := s.resolveTypedefEnumeratedName(e, noUnderscores)
if err != nil {
errs = append(errs, err)
continue
}
if _, ok := genEnums[typeName]; !ok {
genEnums[typeName] = &yangEnum{
name: typeName,
entry: e,
}
}
}
}
return genEnums, errs
}
// resolveIdentityRefBaseType calculates the mapped name of an identityref's
// base such that it can be used in generated code. The value that is returned
// is defining module name followed by the CamelCase-ified version of the
// base's name. This function wraps the identityrefBaseTypeFromIdentity
// function since it covers the common case that the caller is interested in
// determining the name from an identityref leaf, rather than directly from the
// identity. If the noUnderscores bool is set to true, underscores are omitted
// from the name returned such that the enumerated type name is compliant
// with language styles where underscores are not allowed in names.
func (s *genState) resolveIdentityRefBaseType(idr *yang.Entry, noUnderscores bool) string | {
return s.identityrefBaseTypeFromIdentity(idr.Type.IdentityBase, noUnderscores)
} | identifier_body |
|
flappybird.js | Frame = parent.lastJumpFrame;
}
this.b.y += this.v;
if(this.b.y < 0) this.b.v = 0;
if(this.b.y - Const.BIRD_RADIUS >= Const.SCREEN_HEIGHT) this.valid = false;
this.g = parent.h + this.b.dis(parent.b);
this.h = nextCenter.dis(this.b) + (nextCenter.y - this.b.y)*(nextCenter.y - this.b.y)*0.01;
this.f = this.g + this.h;
},
OP : function(frame, jump) {
this.frame = frame;
this.jump = jump;
}
};
XHH.Point.prototype = {
dis : function(point) {
return Math.sqrt((this.x - point.x)*(this.x - point.x) + (this.y - point.y)*(this.y - point.y))
}
};
XHH.Node.prototype = {
toOP : function() {
return new XHH.OP(this.frame, this.jump);
}
};
XHH.Bird.prototype = {
jump : function() {
if(this.isDead) return;
this.vy = -Const.BIRD_JUMP_SPEED;
},
update : function() {
if(!this.isDead)
this.x += this.vx;
this.y += this.vy;
if(this.y < 0) {
this.y = 0;
this.vy = 0;
}
if(this.y > Const.SCREEN_HEIGHT - this.r) {
this.y = Const.SCREEN_HEIGHT - this.r;
return;
}
this.vy += Const.G;
},
die : function() {
this.isDead = true;
this.vy = 0;
}
};
XHH.Obstacle.prototype = {
/**
*
* @param {XHH.Bird} bird
*/
hit : function(bird) {
var left = this.x - this.width / 2;
var right = this.x + this.width / 2;
var bottom = this.dir == 1 ? 0 : Const.SCREEN_HEIGHT - this.height;
var top = bottom + this.height;
if(this.dir == 1) {
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y <= top) return true;
if(bird.x >= left && bird.x < right && bird.y - Const.BIRD_RADIUS <= top) return true;
}else{
if(bird.x >= left - Const.BIRD_RADIUS && bird.x <= right + Const.BIRD_RADIUS && bird.y >= bottom) return true;
if(bird.x >= left && bird.x <= right && bird.y + Const.BIRD_RADIUS >= bottom) return true;
}
var bc = new XHH.Point(bird.x, bird.y);
var lc = new XHH.Point(left, this.dir == 1 ? top : bottom);
var rc = new XHH.Point(right, this.dir == 1 ? top : bottom);
if(lc.dis(bc) <= Const.BIRD_RADIUS) return true;
if(rc.dis(bc) <= Const.BIRD_RADIUS) return true;
return false;
}
}
XHH.Game.prototype = {
random : function() {
var x = Math.abs(Math.sin(this.seed++)) * 100;
return x - Math.floor(x);
},
createObstacle : function() {
for(var i=0;i<Const.OBST_COUNT;i++) {
var ht_up = Math.floor(this.random() * (Const.OBST_MAX_HEIGHT - Const.OBST_MIN_HEIGHT)) + Const.OBST_MIN_HEIGHT;
var ht_dw = Const.SCREEN_HEIGHT - Const.PASS_HEIGHT - ht_up;
var x = Const.OBST_START_X + i*Const.OBST_MARGIN;
var obst_up = new XHH.Obstacle(x, ht_up, 1);
var obst_dw = new XHH.Obstacle(x, ht_dw, -1);
this.obsts.push(obst_up);
this.obsts.push(obst_dw);
}
},
gameOver : function(){
this.isGameOver = true;
this.gameOverTime = new Date().getTime();
this.bird.die();
this.saveRecord();
},
checkGameOver : function() {
// hit the floor
if(this.bird.y >= Const.SCREEN_HEIGHT - this.bird.r) return true;
// at most 3*2 obstacles in the view
var passed = false;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(this.bird)) {
console.log('obst ' + (this.obstIndex + i) + ' hitted the bird!');
return true;
}
if(this.bird.x > obst.x && !obst.passed) {
obst.passed = passed = true;
}
}
if(passed) {
this.score++;
if(this.score > this.record) this.record = this.score;
}
return false;
},
hitTest : function(pt) {
for(var i=0;i<6*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(pt)) return true;
}
return false;
},
update : function() {
if(!this.isGameStarted) return;
this.bird.update();
if(this.isGameOver) return;
this.left += this.vx;
if (this.checkGameOver())
this.gameOver();
var obst_lm = this.obsts[this.obstIndex];
// left most obstacle was out of view
if(obst_lm.x + obst_lm.width/2 < this.left)
this.obstIndex+=2;
| if(this.isCOM) {
if(this.ops.length == 0 && this.lastFound) {
this.lastFound = this.AStar();
}
if(this.ops.length != 0) {
while(this.ops[0].frame < this.frame) this.ops.shift();
if(this.ops[0].frame == this.frame) {
this.ops.shift();
this.bird.jump();
}
}
}
this.frame++;
},
drawBird : function() {
ctx.beginPath();
ctx.strokeStyle = "#FFFFFF";
ctx.fillStyle = "#FF0000";
ctx.arc(this.bird.x - this.left, this.bird.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
//ctx.endPath();
},
drawTraj : function() {
for(var i=0;i<this.traj.length;i++)
{
var p = this.traj[i].b;
ctx.beginPath();
ctx.fillStyle = "#0000FF";
ctx.arc(p.x - this.left, p.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
}
},
drawObst : function(obst) {
var x = obst.x - this.left - obst.width/2;
var y = obst.dir == 1 ? 0 : Const.SCREEN_HEIGHT - obst.height;
var x_s = x + obst.width/3;
var w_l = obst.width/3;
var w_r = obst.width/3*2;
var grd=this.ctx.createLinearGradient(x,y,x_s,y);
grd.addColorStop(0,"#75BA6E");
grd.addColorStop(1,"#DDF0D8");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x, y, w_l, obst.height);
var grd=this.ctx.createLinearGradient(x_s,y,x + obst.width, y);
grd.addColorStop(0,"#DDF0D8");
grd.addColorStop(1,"#318C27");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x_s, y, w_r, obst.height);
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 2;
this.ctx.rect(x,y,obst.width,obst.height);
this.ctx.stroke();
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 3;
this.ctx.rect(x,obst.dir == 1 ? y + obst.height - Const.OBST_HEAD_HEIGHT : y, obst.width, Const.OBST_HEAD_HEIGHT);
this.ctx.stroke();
},
drawObsts : function() {
// at most 3*2 obstacles in the view
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
this.drawObst(obst);
}
},
render : function() {
this.update();
this.ctx.clearRect(0,0,Const.SCREEN_WIDTH,Const.SCREEN_HEIGHT);
this.drawObsts();
this.drawTraj();
this.drawBird();
},
getRecord : function() {
var record = localStorage.getItem("record");
return record ? record : 0;
},
saveRecord : function() {
localStorage.setItem("record", this.record);
},
AStar : function() {
var | random_line_split |
|
flappybird.js |
if(this.bird.y >= Const.SCREEN_HEIGHT - this.bird.r) return true;
// at most 3*2 obstacles in the view
var passed = false;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(this.bird)) {
console.log('obst ' + (this.obstIndex + i) + ' hitted the bird!');
return true;
}
if(this.bird.x > obst.x && !obst.passed) {
obst.passed = passed = true;
}
}
if(passed) {
this.score++;
if(this.score > this.record) this.record = this.score;
}
return false;
},
hitTest : function(pt) {
for(var i=0;i<6*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.hit(pt)) return true;
}
return false;
},
update : function() {
if(!this.isGameStarted) return;
this.bird.update();
if(this.isGameOver) return;
this.left += this.vx;
if (this.checkGameOver())
this.gameOver();
var obst_lm = this.obsts[this.obstIndex];
// left most obstacle was out of view
if(obst_lm.x + obst_lm.width/2 < this.left)
this.obstIndex+=2;
if(this.isCOM) {
if(this.ops.length == 0 && this.lastFound) {
this.lastFound = this.AStar();
}
if(this.ops.length != 0) {
while(this.ops[0].frame < this.frame) this.ops.shift();
if(this.ops[0].frame == this.frame) {
this.ops.shift();
this.bird.jump();
}
}
}
this.frame++;
},
drawBird : function() {
ctx.beginPath();
ctx.strokeStyle = "#FFFFFF";
ctx.fillStyle = "#FF0000";
ctx.arc(this.bird.x - this.left, this.bird.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
//ctx.endPath();
},
drawTraj : function() {
for(var i=0;i<this.traj.length;i++)
{
var p = this.traj[i].b;
ctx.beginPath();
ctx.fillStyle = "#0000FF";
ctx.arc(p.x - this.left, p.y, this.bird.r, 0, 2*Math.PI);
ctx.fill();
}
},
drawObst : function(obst) {
var x = obst.x - this.left - obst.width/2;
var y = obst.dir == 1 ? 0 : Const.SCREEN_HEIGHT - obst.height;
var x_s = x + obst.width/3;
var w_l = obst.width/3;
var w_r = obst.width/3*2;
var grd=this.ctx.createLinearGradient(x,y,x_s,y);
grd.addColorStop(0,"#75BA6E");
grd.addColorStop(1,"#DDF0D8");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x, y, w_l, obst.height);
var grd=this.ctx.createLinearGradient(x_s,y,x + obst.width, y);
grd.addColorStop(0,"#DDF0D8");
grd.addColorStop(1,"#318C27");
this.ctx.fillStyle = grd;
this.ctx.fillRect(x_s, y, w_r, obst.height);
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 2;
this.ctx.rect(x,y,obst.width,obst.height);
this.ctx.stroke();
this.ctx.beginPath();
this.ctx.strokeStyle = "291B09";
this.ctx.lineWidth = 3;
this.ctx.rect(x,obst.dir == 1 ? y + obst.height - Const.OBST_HEAD_HEIGHT : y, obst.width, Const.OBST_HEAD_HEIGHT);
this.ctx.stroke();
},
drawObsts : function() {
// at most 3*2 obstacles in the view
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
this.drawObst(obst);
}
},
render : function() {
this.update();
this.ctx.clearRect(0,0,Const.SCREEN_WIDTH,Const.SCREEN_HEIGHT);
this.drawObsts();
this.drawTraj();
this.drawBird();
},
getRecord : function() {
var record = localStorage.getItem("record");
return record ? record : 0;
},
saveRecord : function() {
localStorage.setItem("record", this.record);
},
AStar : function() {
var bx = new XHH.Point(this.bird.x, this.bird.y);
var it = null, ib = null;
for(var i=0;i<3*2;i++)
{
var obst = this.obsts[this.obstIndex + i];
if(obst.x > bx.x && obst.dir == 1 && it == null) it = obst;
if(obst.x > bx.x && obst.dir == -1 && ib == null) ib = obst;
}
var center = new XHH.Point(it.x + this.bird.r*2, it.height + Const.PASS_HEIGHT/2 + this.bird.r);
console.log("A* current = " + bx.x + "," + bx.y + " target = " + center.x + "," + center.y);
var q = new PriorityQueue({ comparator: function(a, b) { return a.f - b.f; }});
var parent = {
parent : null,
b : bx,
g : 0,
h : bx.dis(center),
v : this.bird.vy,
frame : this.frame,
lastJumpFrame : this.frame,
jump : 0,
toOP : function() { return new XHH.OP(this.frame, this.jump)}
};
var n0 = new XHH.Node(parent, false, center);
//var n1 = new XHH.Node(parent, true, center);
var startTime = new Date().getTime();
if(n0.valid && !this.hitTest(n0.b)) q.queue(n0);
//if(n1.valid && !this.hitTest(n1.b)) q.queue(n1);
var created = q.length;
var expended = 0;
var found = false;
while(q.length != 0) {
var p = q.dequeue();
expended ++;
// goal reached
if(p.b.dis(center) < 32) {
console.log("found!");
this.ops = [];
this.traj = [];
this.ops.push(p.toOP());
this.traj.push(p);
var pp = p.parent;
while(pp) {
if(pp.jump) this.ops.push(pp.toOP());
this.traj.push(pp);
pp = pp.parent;
}
this.ops.reverse();
found = true;
break;
}
n0 = new XHH.Node(p, false, center);
if(n0.valid && !this.hitTest(n0.b)) { q.queue(n0); created++; }
if(p.frame - p.lastJumpFrame >= Const.JUMP_INTERVAL)
{
n1 = new XHH.Node(p, true, center);
if(n1.valid && !this.hitTest(n1.b)) { q.queue(n1); created++; }
}
if(expended > 4e5) break;
}
var endTime = new Date().getTime();
console.log("found = " + found + " created = " + created + " expended = " + expended + " time = " + (endTime - startTime));
return found;
},
start : function(isCOM) {
this.isCOM = isCOM;
this.isGameStarted = true;
if(isCOM) {
this.lastFound = this.AStar();
}
},
init : function(seed, ctx) {
this.seed = seed ? seed : 0;
this.ctx = ctx;
this.obstIndex = 0;
this.vx = Const.X_VOL;
this.obsts = [];
this.left = 0;
this.score = 0;
this.isCOM = false;
this.record = this.getRecord();
this.obstIndex = 0;
this.bird = new XHH.Bird();
this.isGameOver = false;
this.isGameStarted = false;
this.createObstacle();
this.ops = [];
this.traj = [];
this.lastFound = false;
this.frame = 0;
},
jump : function() {
if(this.isGameOver && (new Date().getTime() - this.gameOverTime > 500)){
this.init(this.seed, this.ctx);
} else if(!this.isGameStarted) | {
this.start(false);
this.bird.jump();
} | conditional_block |
|
raft.go | // configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) up | pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
| dateFollowerState(m | identifier_name |
raft.go | r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
}
}
} else {
r.rejectVoting(m)
return
}
}
func (r *Raft) startVoting() {
r.becomeCandidate()
r.votes[r.id] = true
r.Vote = r.id
if r.tallyAndWin() {
r.becomeLeader()
return
}
r.electionElapsed = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
// preparations
logIndex := r.RaftLog.LastIndex()
logTerm, _ := r.RaftLog.Term(logIndex)
m := pb.Message{MsgType: pb.MessageType_MsgRequestVote, From: r.id, To: p, Term: r.Term, LogTerm: logTerm, Index: logIndex}
r.sendMsg(m)
}
}
func (r *Raft) tallyAndWin() bool {
countAccept := 0
//countReject := 0
for _, v := range r.votes {
if v == true {
countAccept++
}
}
if countAccept > len(r.Prs)-countAccept {
return true
} else {
return false
}
}
func (r *Raft) tallyAndLose() bool {
countReject := 0
//countReject := 0
for _, v := range r.votes {
if v == false {
countReject++
}
}
if countReject > len(r.Prs)-countReject {
return true
} else {
return false
}
}
func (r *Raft) handleVotingResponse(m pb.Message) {
if !m.Reject {
r.votes[m.From] = true
} else {
r.votes[m.From] = false
if m.Term > r.Term {
r.State = StateFollower
r.Term = m.Term
r.electionElapsed = 0
}
}
// when more than half servers have voted, we tally
if len(r.votes) > len(r.Prs)-len(r.votes) {
if r.tallyAndWin() {
r.becomeLeader()
}
if r.tallyAndLose() {
// we don't specify leader here
r.becomeFollower(r.Term, 0)
}
}
}
func (r *Raft) handleMsgHeartbeat(m pb.Message) {
}
// Step the entrance of handle message, see `MessageType`
// on `eraftpb.proto` for what msgs should be handled
func (r *Raft) Step(m pb.Message) error {
// Your Code Here (2A).
switch r.State {
case StateFollower:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgHeartbeat:
r.handleMsgHeartbeat(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateCandidate:
switch m.MsgType {
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgHup:
r.startVoting()
case pb.MessageType_MsgRequestVoteResponse:
r.handleVotingResponse(m)
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
}
case StateLeader:
switch m.MsgType {
case pb.MessageType_MsgHeartbeat:
r.handleHeartbeat(m)
case pb.MessageType_MsgHup:
case pb.MessageType_MsgRequestVote:
r.handleVoting(m)
case pb.MessageType_MsgBeat:
r.handleBeat()
case pb.MessageType_MsgAppend:
r.handleAppendEntries(m)
case pb.MessageType_MsgPropose:
r.handlePropose(m)
case pb.MessageType_MsgAppendResponse:
r.handleAppendResponse(m)
}
}
return nil
}
func (r *Raft) handlePropose(m pb.Message) {
// update match & next for the leader + followers
r.RaftLog.appendLog(m.Entries)
r.Prs[r.id].Match = r.RaftLog.LastIndex()
r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//r.initializeProgressSecondTime()
// if there is only one node in the scene
if len(r.Prs) == 1 {
r.RaftLog.committed = r.Prs[r.id].Match
}
r.broadcastAppendEntries()
}
func (r *Raft) sendAppendEntries(to uint64) {
// preparations
// notice Index for the algorithm is different from Index for the language
nextLogIndex := r.Prs[to].Next
prevLogIndex := nextLogIndex - 1
prevLogTerm, _ := r.RaftLog.Term(prevLogIndex)
//// if there is nothing to send, we append an NO-OP entry
//if nextLogIndex == r.RaftLog.LastIndex() + 1 {
// // update match & next for the leader
// r.RaftLog.appendLog([]*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}})
// r.Prs[r.id].Match = r.RaftLog.LastIndex() | // r.Prs[r.id].Next = r.RaftLog.LastIndex() + 1
//}
// convert array of objects to array of pointers
entriesToAppend := r.RaftLog.entries[nextLogIndex-1:] | random_line_split |
|
raft.go | // configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
| lse {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
| r.rejectVoting(m)
return
} e | conditional_block |
raft.go | // configuration change (if any). Config changes are only allowed to
// be proposed if the leader's applied index is greater than this
// value.
// (Used in 3A conf change)
PendingConfIndex uint64
actualElectionTimeout int
//// only will be useful if self is a leader
//appendCount map[uint64]uint64
}
func (r *Raft) generateElectionTimeout() int {
min := r.electionTimeout
max := r.electionTimeout * 2 - 1
return rand.Intn(max-min+1) + min
}
// newRaft return a raft peer with the given config
func newRaft(c *Config) *Raft {
if err := c.validate(); err != nil {
panic(err.Error())
}
peer2Progress := make(map[uint64]*Progress, len(c.peers))
peer2Vote := make(map[uint64]bool, len(c.peers))
for _, s := range c.peers {
peer2Vote[s] = false
peer2Progress[s] = &Progress{0, 0}
}
rand.Seed(time.Now().UnixNano())
hardState, _, _ := c.Storage.InitialState()
return &Raft{id: c.ID, Term: hardState.Term, Vote: hardState.Vote, RaftLog: newLog(c.Storage), State: StateFollower, Prs: peer2Progress, votes: peer2Vote, Lead: 0, heartbeatTimeout: c.HeartbeatTick, electionTimeout: c.ElectionTick, heartbeatElapsed: 0, electionElapsed: 0, actualElectionTimeout: 0}
}
// sendAppend sends an append RPC with new entries (if any) and the
// current commit index to the given peer. Returns true if a message was sent.
func (r *Raft) sendAppend(to uint64) bool {
r.sendAppendEntries(to)
return true
}
// sendHeartbeat sends a heartbeat RPC to the given peer.
func (r *Raft) sendHeartbeat(to uint64) {
m := pb.Message{MsgType: pb.MessageType_MsgHeartbeat, From: r.id, To: to, Term: r.Term}
r.sendMsg(m)
}
// tick advances the internal logical clock by a single tick.
func (r *Raft) tick() {
r.heartbeatElapsed++
r.electionElapsed++
if r.State == StateLeader {
if r.heartbeatElapsed == r.heartbeatTimeout {
r.broadcastMsgHeartbeat()
}
} else if r.State == StateCandidate {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
} else if r.State == StateFollower {
if r.electionElapsed > r.actualElectionTimeout {
m := pb.Message{MsgType: pb.MessageType_MsgHup, From: r.id, To: r.id}
r.sendMsgLocally(m)
}
}
}
// becomeFollower transform this peer's state to Follower
func (r *Raft) becomeFollower(term uint64, lead uint64) {
r.State = StateFollower
r.Term = term
r.Lead = lead
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
}
// becomeCandidate transform this peer's state to candidate
func (r *Raft) becomeCandidate() {
| func (r *Raft) handleBeat() {
r.broadcastMsgHeartbeat()
}
func (r *Raft) initializeProgressFirstTime() {
for _, p := range r.Prs {
p.Match = 0
p.Next = r.RaftLog.LastIndex() + 1
}
}
func (r *Raft) initializeProgressSecondTime() {
for i, p := range r.Prs {
if i != r.id {
p.Next++
}
}
}
func (r *Raft) sendInitialAppend() {
r.initializeProgressFirstTime()
// send msgAppend(no-op); not sure if this should be here
propMsg := pb.Message{From: 1, To: 1, MsgType: pb.MessageType_MsgPropose, Entries: []*pb.Entry{{Term: r.Term, Index: r.RaftLog.LastIndex() + 1}}}
r.sendMsgLocally(propMsg)
}
// becomeLeader transform this peer's state to leader
func (r *Raft) becomeLeader() {
// NOTE: Leader should propose a noop entry on its term
r.State = StateLeader
r.Vote = 0
for p, _ := range r.Prs {
if p == r.id {
continue
}
r.Prs[p].Match = 0
r.Prs[p].Next = r.RaftLog.LastIndex() + 1
}
//r.initializeProgress()
// send heartbeat
//m := pb.Message{MsgType: pb.MessageType_MsgBeat, From: r.id, To: r.id}
//r.sendMsgLocally(m)
// send noop message
r.sendInitialAppend()
r.electionElapsed = 0
}
// helpers
func (r *Raft) broadcastMsgHeartbeat() {
for p, _ := range r.Prs {
// skip self
if p == r.id {
continue
}
r.sendHeartbeat(p)
}
}
// the message will go over the network
func (r *Raft) sendMsg(m pb.Message) {
r.msgs = append(r.msgs, m)
}
// the message will NOT go over the network
func (r *Raft) sendMsgLocally(m pb.Message) {
r.Step(m)
}
func (r *Raft) grantVoting(m pb.Message) {
r.Vote = m.From
r.State = StateFollower
r.Term = m.Term
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: false, Term: r.Term}
r.sendMsg(newMsg)
}
func (r *Raft) rejectVoting(m pb.Message) {
newMsg := pb.Message{MsgType: pb.MessageType_MsgRequestVoteResponse, From: r.id, To: m.From, Reject: true, Term: r.Term}
r.sendMsg(newMsg)
}
// including self
func (r *Raft) hasVotedForAnotherCandidate(m pb.Message) bool {
if !(r.Vote == 0 || r.Vote == m.From) {
return true
}
return false
}
func (r *Raft) hasVotedForSelf() bool {
if r.Vote == r.id {
return true
} else {
return false
}
}
func (r *Raft) hasMoreCommittedLogsThanCandidate(m pb.Message) bool {
lastCommittedLog, err := r.RaftLog.lastCommittedLog()
if err != nil {
return false
}
if m.LogTerm < lastCommittedLog.Term {
return true
} else if m.LogTerm == lastCommittedLog.Term {
if m.Index < lastCommittedLog.Index {
return true
}
}
return false
}
func (r *Raft) hasEarlierLogThanCandidate(m pb.Message) bool {
// determine if the candidate has later term & log than self
currentLogTerm, _ := r.RaftLog.Term(r.RaftLog.LastIndex())
if m.LogTerm < currentLogTerm {
return false
} else if m.LogTerm == currentLogTerm {
if m.Index < r.RaftLog.LastIndex() {
return false
}
}
return true
}
func (r *Raft) updateFollowerState(m pb.Message) {
r.Vote = 0
r.Lead = 0
r.Term = m.Term
r.State = StateFollower
}
func (r *Raft) handleVoting(m pb.Message) {
//DPrintf("current Term %d, current Index %d, message Term %d, message Index %d", r.Term, r.RaftLog.LastIndex(), m.LogTerm, m.Index)
// ensure the candidate has all committed logs
//if r.hasMoreCommittedLogsThanCandidate(m) {
// r.rejectVoting(m)
// return
//}
// determine if the raft has already voted
if m.Term > r.Term {
//if r.hasVotedForAnotherCandidate(m) && !r.hasVotedForSelf() {
// r.rejectVoting(m)
// return
//} else {
// r.grantVoting(m)
// return
//}
r.updateFollowerState(m)
}
if m.Term == r.Term {
if r.hasVotedForAnotherCandidate(m) {
r.rejectVoting(m)
return
} else {
if r.hasEarlierLogThanCandidate(m) && !r.hasMoreCommittedLogsThanCandidate(m){
r.grantVoting(m)
return
} else {
r.rejectVoting(m)
return
| r.State = StateCandidate
r.Term++
r.Lead = 0
r.electionElapsed = 0
r.actualElectionTimeout = r.generateElectionTimeout()
r.votes = map[uint64]bool{}
}
| identifier_body |
SEODWARF_S2_TURB_fMask.py | required=True,
help="path to folder where results are stored",
metavar='<string>')
params = vars(parser.parse_args())
inFolder = params['inFolder']
outFolder = params['outFolder']
# recover list of bands to be processed
bandNum10m = [4,3,2,8]
bandNumAll = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12']
bandList = []
allFiles = glob.glob(inFolder + '\*\GRANULE\*\IMG_DATA\*.jp2')
for b in range(len(bandNumAll)):
for file in allFiles:
if file.endswith('_%s.jp2' % bandNumAll[b]):
bandList.append(file)
# recover metadata file
os.listdir(inFolder + '/GRANULE/')
MTD_MSIL1C = inFolder + '/MTD_MSIL1C.xml'
MTD_TL = inFolder + '/GRANULE/' + os.listdir(inFolder + '/GRANULE/')[0] + '/MTD_TL.xml'
# --- read metadata ---
print('Read metadata files')
# search tile name in input folder
match = re.search('([A-Z][1-9][1-9][A-Z][A-Z][A-Z])', inFolder)
tile = match.group(0)
EPSG_code = 'EPSG_326' + tile[1:3]
dataset = gdal.Open('SENTINEL2_L1C:%s:10m:%s' % (MTD_MSIL1C, EPSG_code), GA_ReadOnly)
if dataset is None:
print('Unable to open image')
sys.exit(1)
MTD = dataset.GetMetadata()
wkt_projection =dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
# print MTD
# read metadata per band
bandB = dataset.GetRasterBand(3)
bandG = dataset.GetRasterBand(2)
bandR = dataset.GetRasterBand(1)
bandIR = dataset.GetRasterBand(4)
MTD_B = bandB.GetMetadata()
MTD_G = bandG.GetMetadata()
MTD_R = bandR.GetMetadata()
MTD_IR = bandIR.GetMetadata()
# --- recover values from metadata ---
ULX = geotransform[0]
ULY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
nl = dataset.RasterYSize
nc = dataset.RasterXSize
DATE = MTD['GENERATION_TIME'][0:10]
HOUR = MTD['GENERATION_TIME'][11:16]
if MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2A':
sensor = 'S2A'
elif MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2B':
sensor = 'S2B'
imageNameLong = MTD['PRODUCT_URI']
# pos_ = [pos for pos, char in enumerate(imageNameLong) if char == '_']
# tile = imageNameLong[pos_[4]+1:pos_[4]+7]
dst_earth_sun = float(MTD['REFLECTANCE_CONVERSION_U'])
QUANT = int(MTD['QUANTIFICATION_VALUE'])
ESUN = [MTD_R['SOLAR_IRRADIANCE'], MTD_G['SOLAR_IRRADIANCE'], MTD_B['SOLAR_IRRADIANCE'], MTD_IR['SOLAR_IRRADIANCE']]
# read xml file and get extra metadata
root = ET.parse(MTD_TL).getroot()
tmp = root.find('.//Mean_Sun_Angle/ZENITH_ANGLE')
thetas = 90 - float(tmp.text)
print('DONE\n')
# --- fMask cloud masking ---
# print('Cloud mask with fMask')
# create ouput image name and output folder
imageName = sensor + '_' + DATE + '_' + tile
outFolder2 = outFolder + '/' + imageName
"""
if not os.path.exists(outFolder2):
os.mkdir(outFolder2)
out_cloud_Mask = outFolder2 + '/cloud_FMask.tif'
if not os.path.isfile(out_cloud_Mask): # if mask doesn't already exist
os.chdir('C:\Users\Olivier\Anaconda2\Scripts') # change path to fmask folder
# create virtual raster
outVRT = outFolder2 + '/allbands.vrt'
cmd = 'gdalbuildvrt -resolution user -tr 20 20 -separate ' + outVRT
for b in bandList:
cmd = cmd + ' ' + b
subprocess.call(cmd, shell=True)
# create angle image
outAngles = outFolder2 + '/angles.img'
cmd = 'python fmask_sentinel2makeAnglesImage.py -i ' + MTD_TL + ' -o ' + outAngles
subprocess.call(cmd, shell=True)
# create mask
outFMASK = outFolder2 + '/cloud.img'
cmd = 'python fmask_sentinel2Stacked.py -a ' + outVRT + ' -z ' + outAngles + ' -o ' + outFMASK
subprocess.call(cmd, shell=True)
# resample mask
cmd = 'gdalwarp -tr 10 10 -ot Byte ' + outFMASK + ' ' + out_cloud_Mask
subprocess.call(cmd, shell=True)
print('DONE\n')
else:
print('Cloud masking already done\n')
"""
# --- DOS1 correction ---
# rasterize input shapefile mask
print('DOS1 atmospheric correction')
# check if DOS1 correction has already been applied
if not os.path.exists(outFolder2 + '/TOC'):
os.mkdir(outFolder2 + '/TOC')
DOS_red = outFolder2+'/TOC/' + imageName + '_B04_TOC.tif'
if not os.path.isfile(DOS_red): # if outFile does not already exist
pathMaskShp = 'C:/Users/ucfadko/Desktop/Coastline_EUROPE_UTMZ33N/Coastline_EUROPE_UTMZ33N.shp'
outLW_Mask = outFolder2 + '/LW_mask.tif'
# check if shapefile exists
if not os.path.isfile(pathMaskShp):
print('Coastline shapefile is not in the right folder')
sys.exit(1)
Xmin = ULX
Xmax = ULX + nc*pixelWidth
Ymin = ULY - nl*pixelWidth
Ymax = ULY
print ('Water/Land mask creation')
cmd = 'gdal_rasterize -a id -ot Byte -te ' + str(Xmin) + ' ' + str(Ymin) + ' ' + str(Xmax) + ' ' + str(Ymax) + ' -tr ' + str(pixelWidth)+ ' ' + str(pixelWidth) + ' ' + pathMaskShp + ' ' + outLW_Mask
# print cmd
subprocess.call(cmd, shell=True)
print ('DONE\n')
# read land/water mask
ds_LW_mask = gdal.Open(outLW_Mask, GA_ReadOnly)
if ds_LW_mask is None:
print('Unable to open land/water mask')
sys.exit(1)
LW_mask = ds_LW_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_LW_mask.RasterXSize, ds_LW_mask.RasterYSize)
"""
# read cloud mask
ds_cloud_mask = gdal.Open(out_cloud_Mask, GA_ReadOnly)
if ds_cloud_mask is None:
print('Unable to open cloud mask')
sys.exit(1)
cloud_mask = ds_cloud_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_cloud_mask.RasterXSize, ds_cloud_mask.RasterYSize)
"""
# loop through bands (beware order of bands is R,G,B,IR - so band[1] is red band
for b in range( dataset.RasterCount ):
# read raster band
band = dataset.GetRasterBand(b+1).ReadAsArray(0, 0, dataset.RasterXSize, dataset.RasterYSize)
# apply masks
band = numpy.where((LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# PREVIOUS LINE WITH CLOUD MASK : band = numpy.where((( cloud_mask==5) | (cloud_mask==4)) & (LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# band = numpy.where(LW_mask==0, band, 0)
# convert DN to TOA reflectance
band = band.astype(float)
band = band / QUANT
# convert TOA reflectance to TOA radiance
band = (band * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# convert 2 | help="path to folder containing S2 data (first folder, not IMG_DATA)",
metavar='<string>')
parser.add_argument("-outFolder", | random_line_split |
|
SEODWARF_S2_TURB_fMask.py | , not IMG_DATA)",
metavar='<string>')
parser.add_argument("-outFolder",
required=True,
help="path to folder where results are stored",
metavar='<string>')
params = vars(parser.parse_args())
inFolder = params['inFolder']
outFolder = params['outFolder']
# recover list of bands to be processed
bandNum10m = [4,3,2,8]
bandNumAll = ['B01', 'B02', 'B03', 'B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B09', 'B10', 'B11', 'B12']
bandList = []
allFiles = glob.glob(inFolder + '\*\GRANULE\*\IMG_DATA\*.jp2')
for b in range(len(bandNumAll)):
for file in allFiles:
if file.endswith('_%s.jp2' % bandNumAll[b]):
bandList.append(file)
# recover metadata file
os.listdir(inFolder + '/GRANULE/')
MTD_MSIL1C = inFolder + '/MTD_MSIL1C.xml'
MTD_TL = inFolder + '/GRANULE/' + os.listdir(inFolder + '/GRANULE/')[0] + '/MTD_TL.xml'
# --- read metadata ---
print('Read metadata files')
# search tile name in input folder
match = re.search('([A-Z][1-9][1-9][A-Z][A-Z][A-Z])', inFolder)
tile = match.group(0)
EPSG_code = 'EPSG_326' + tile[1:3]
dataset = gdal.Open('SENTINEL2_L1C:%s:10m:%s' % (MTD_MSIL1C, EPSG_code), GA_ReadOnly)
if dataset is None:
print('Unable to open image')
sys.exit(1)
MTD = dataset.GetMetadata()
wkt_projection =dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
# print MTD
# read metadata per band
bandB = dataset.GetRasterBand(3)
bandG = dataset.GetRasterBand(2)
bandR = dataset.GetRasterBand(1)
bandIR = dataset.GetRasterBand(4)
MTD_B = bandB.GetMetadata()
MTD_G = bandG.GetMetadata()
MTD_R = bandR.GetMetadata()
MTD_IR = bandIR.GetMetadata()
# --- recover values from metadata ---
ULX = geotransform[0]
ULY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
nl = dataset.RasterYSize
nc = dataset.RasterXSize
DATE = MTD['GENERATION_TIME'][0:10]
HOUR = MTD['GENERATION_TIME'][11:16]
if MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2A':
sensor = 'S2A'
elif MTD['DATATAKE_1_SPACECRAFT_NAME'] == 'Sentinel-2B':
sensor = 'S2B'
imageNameLong = MTD['PRODUCT_URI']
# pos_ = [pos for pos, char in enumerate(imageNameLong) if char == '_']
# tile = imageNameLong[pos_[4]+1:pos_[4]+7]
dst_earth_sun = float(MTD['REFLECTANCE_CONVERSION_U'])
QUANT = int(MTD['QUANTIFICATION_VALUE'])
ESUN = [MTD_R['SOLAR_IRRADIANCE'], MTD_G['SOLAR_IRRADIANCE'], MTD_B['SOLAR_IRRADIANCE'], MTD_IR['SOLAR_IRRADIANCE']]
# read xml file and get extra metadata
root = ET.parse(MTD_TL).getroot()
tmp = root.find('.//Mean_Sun_Angle/ZENITH_ANGLE')
thetas = 90 - float(tmp.text)
print('DONE\n')
# --- fMask cloud masking ---
# print('Cloud mask with fMask')
# create ouput image name and output folder
imageName = sensor + '_' + DATE + '_' + tile
outFolder2 = outFolder + '/' + imageName
"""
if not os.path.exists(outFolder2):
os.mkdir(outFolder2)
out_cloud_Mask = outFolder2 + '/cloud_FMask.tif'
if not os.path.isfile(out_cloud_Mask): # if mask doesn't already exist
os.chdir('C:\Users\Olivier\Anaconda2\Scripts') # change path to fmask folder
# create virtual raster
outVRT = outFolder2 + '/allbands.vrt'
cmd = 'gdalbuildvrt -resolution user -tr 20 20 -separate ' + outVRT
for b in bandList:
cmd = cmd + ' ' + b
subprocess.call(cmd, shell=True)
# create angle image
outAngles = outFolder2 + '/angles.img'
cmd = 'python fmask_sentinel2makeAnglesImage.py -i ' + MTD_TL + ' -o ' + outAngles
subprocess.call(cmd, shell=True)
# create mask
outFMASK = outFolder2 + '/cloud.img'
cmd = 'python fmask_sentinel2Stacked.py -a ' + outVRT + ' -z ' + outAngles + ' -o ' + outFMASK
subprocess.call(cmd, shell=True)
# resample mask
cmd = 'gdalwarp -tr 10 10 -ot Byte ' + outFMASK + ' ' + out_cloud_Mask
subprocess.call(cmd, shell=True)
print('DONE\n')
else:
print('Cloud masking already done\n')
"""
# --- DOS1 correction ---
# rasterize input shapefile mask
print('DOS1 atmospheric correction')
# check if DOS1 correction has already been applied
if not os.path.exists(outFolder2 + '/TOC'):
|
DOS_red = outFolder2+'/TOC/' + imageName + '_B04_TOC.tif'
if not os.path.isfile(DOS_red): # if outFile does not already exist
pathMaskShp = 'C:/Users/ucfadko/Desktop/Coastline_EUROPE_UTMZ33N/Coastline_EUROPE_UTMZ33N.shp'
outLW_Mask = outFolder2 + '/LW_mask.tif'
# check if shapefile exists
if not os.path.isfile(pathMaskShp):
print('Coastline shapefile is not in the right folder')
sys.exit(1)
Xmin = ULX
Xmax = ULX + nc*pixelWidth
Ymin = ULY - nl*pixelWidth
Ymax = ULY
print ('Water/Land mask creation')
cmd = 'gdal_rasterize -a id -ot Byte -te ' + str(Xmin) + ' ' + str(Ymin) + ' ' + str(Xmax) + ' ' + str(Ymax) + ' -tr ' + str(pixelWidth)+ ' ' + str(pixelWidth) + ' ' + pathMaskShp + ' ' + outLW_Mask
# print cmd
subprocess.call(cmd, shell=True)
print ('DONE\n')
# read land/water mask
ds_LW_mask = gdal.Open(outLW_Mask, GA_ReadOnly)
if ds_LW_mask is None:
print('Unable to open land/water mask')
sys.exit(1)
LW_mask = ds_LW_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_LW_mask.RasterXSize, ds_LW_mask.RasterYSize)
"""
# read cloud mask
ds_cloud_mask = gdal.Open(out_cloud_Mask, GA_ReadOnly)
if ds_cloud_mask is None:
print('Unable to open cloud mask')
sys.exit(1)
cloud_mask = ds_cloud_mask.GetRasterBand(1).ReadAsArray(0, 0, ds_cloud_mask.RasterXSize, ds_cloud_mask.RasterYSize)
"""
# loop through bands (beware order of bands is R,G,B,IR - so band[1] is red band
for b in range( dataset.RasterCount ):
# read raster band
band = dataset.GetRasterBand(b+1).ReadAsArray(0, 0, dataset.RasterXSize, dataset.RasterYSize)
# apply masks
band = numpy.where((LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# PREVIOUS LINE WITH CLOUD MASK : band = numpy.where((( cloud_mask==5) | (cloud_mask==4)) & (LW_mask==0), band, 0) # we keep pixels flagged as water (cloud_mask==5) and snow (cloud_mask==4) as turbid waters can be flagged as snow
# band = numpy.where(LW_mask==0, band, 0)
# convert DN to TOA reflectance
band = band.astype(float)
band = band / QUANT
# convert TOA reflectance to TOA radiance
band = (band * float(ESUN[b]) * math.cos(math.radians(thetas))) / (math.pi * math.pow(dst_earth_sun,2))
# convert 2D array to 1D array, discard zeros and | os.mkdir(outFolder2 + '/TOC') | conditional_block |
canvas.go | },
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) |
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) | {
c.sprite.DrawColorMask(t, matrix, mask)
} | identifier_body |
canvas.go | 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End() | func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) Draw | })
}
// Color returns the color of the pixel over the given position inside the Canvas. | random_line_split |
canvas.go | 1},
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m {
c.mat[i] = float32(m[i])
}
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) | (t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) | DrawColorMask | identifier_name |
canvas.go | },
}
c.SetBounds(bounds)
var shader *glhf.Shader
mainthread.Call(func() {
var err error
shader, err = glhf.NewShader(
canvasVertexFormat,
canvasUniformFormat,
canvasVertexShader,
canvasFragmentShader,
)
if err != nil {
panic(errors.Wrap(err, "failed to create Canvas, there's a bug in the shader"))
}
})
c.shader = shader
return c
}
// MakeTriangles creates a specialized copy of the supplied Triangles that draws onto this Canvas.
//
// TrianglesPosition, TrianglesColor and TrianglesPicture are supported.
func (c *Canvas) MakeTriangles(t pixel.Triangles) pixel.TargetTriangles {
return &canvasTriangles{
GLTriangles: NewGLTriangles(c.shader, t),
dst: c,
}
}
// MakePicture create a specialized copy of the supplied Picture that draws onto this Canvas.
//
// PictureColor is supported.
func (c *Canvas) MakePicture(p pixel.Picture) pixel.TargetPicture {
if cp, ok := p.(*canvasPicture); ok {
return &canvasPicture{
GLPicture: cp.GLPicture,
dst: c,
}
}
if gp, ok := p.(GLPicture); ok {
return &canvasPicture{
GLPicture: gp,
dst: c,
}
}
return &canvasPicture{
GLPicture: NewGLPicture(p),
dst: c,
}
}
// SetMatrix sets a Matrix that every point will be projected by.
func (c *Canvas) SetMatrix(m pixel.Matrix) {
for i := range m |
}
// SetColorMask sets a color that every color in triangles or a picture will be multiplied by.
func (c *Canvas) SetColorMask(col color.Color) {
rgba := pixel.Alpha(1)
if col != nil {
rgba = pixel.ToRGBA(col)
}
c.col = mgl32.Vec4{
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
}
}
// SetComposeMethod sets a Porter-Duff composition method to be used in the following draws onto
// this Canvas.
func (c *Canvas) SetComposeMethod(cmp pixel.ComposeMethod) {
c.cmp = cmp
}
// SetBounds resizes the Canvas to the new bounds. Old content will be preserved.
func (c *Canvas) SetBounds(bounds pixel.Rect) {
c.gf.SetBounds(bounds)
if c.sprite == nil {
c.sprite = pixel.NewSprite(nil, pixel.Rect{})
}
c.sprite.Set(c, c.Bounds())
//c.sprite.SetMatrix(pixel.IM.Moved(c.Bounds().Center()))
}
// Bounds returns the rectangular bounds of the Canvas.
func (c *Canvas) Bounds() pixel.Rect {
return c.gf.Bounds()
}
// SetSmooth sets whether stretched Pictures drawn onto this Canvas should be drawn smooth or
// pixely.
func (c *Canvas) SetSmooth(smooth bool) {
c.smooth = smooth
}
// Smooth returns whether stretched Pictures drawn onto this Canvas are set to be drawn smooth or
// pixely.
func (c *Canvas) Smooth() bool {
return c.smooth
}
// must be manually called inside mainthread
func (c *Canvas) setGlhfBounds() {
_, _, bw, bh := intBounds(c.gf.Bounds())
glhf.Bounds(0, 0, bw, bh)
}
// must be manually called inside mainthread
func setBlendFunc(cmp pixel.ComposeMethod) {
switch cmp {
case pixel.ComposeOver:
glhf.BlendFunc(glhf.One, glhf.OneMinusSrcAlpha)
case pixel.ComposeIn:
glhf.BlendFunc(glhf.DstAlpha, glhf.Zero)
case pixel.ComposeOut:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.Zero)
case pixel.ComposeAtop:
glhf.BlendFunc(glhf.DstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposeRover:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.One)
case pixel.ComposeRin:
glhf.BlendFunc(glhf.Zero, glhf.SrcAlpha)
case pixel.ComposeRout:
glhf.BlendFunc(glhf.Zero, glhf.OneMinusSrcAlpha)
case pixel.ComposeRatop:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.SrcAlpha)
case pixel.ComposeXor:
glhf.BlendFunc(glhf.OneMinusDstAlpha, glhf.OneMinusSrcAlpha)
case pixel.ComposePlus:
glhf.BlendFunc(glhf.One, glhf.One)
case pixel.ComposeCopy:
glhf.BlendFunc(glhf.One, glhf.Zero)
default:
panic(errors.New("Canvas: invalid compose method"))
}
}
// Clear fills the whole Canvas with a single color.
func (c *Canvas) Clear(color color.Color) {
c.gf.Dirty()
rgba := pixel.ToRGBA(color)
// color masking
rgba = rgba.Mul(pixel.RGBA{
R: float64(c.col[0]),
G: float64(c.col[1]),
B: float64(c.col[2]),
A: float64(c.col[3]),
})
mainthread.CallNonBlock(func() {
c.setGlhfBounds()
c.gf.Frame().Begin()
glhf.Clear(
float32(rgba.R),
float32(rgba.G),
float32(rgba.B),
float32(rgba.A),
)
c.gf.Frame().End()
})
}
// Color returns the color of the pixel over the given position inside the Canvas.
func (c *Canvas) Color(at pixel.Vec) pixel.RGBA {
return c.gf.Color(at)
}
// Texture returns the underlying OpenGL Texture of this Canvas.
//
// Implements GLPicture interface.
func (c *Canvas) Texture() *glhf.Texture {
return c.gf.Texture()
}
// Frame returns the underlying OpenGL Frame of this Canvas.
func (c *Canvas) Frame() *glhf.Frame {
return c.gf.frame
}
// SetPixels replaces the content of the Canvas with the provided pixels. The provided slice must be
// an alpha-premultiplied RGBA sequence of correct length (4 * width * height).
func (c *Canvas) SetPixels(pixels []uint8) {
c.gf.Dirty()
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
tex.SetPixels(0, 0, tex.Width(), tex.Height(), pixels)
tex.End()
})
}
// Pixels returns an alpha-premultiplied RGBA sequence of the content of the Canvas.
func (c *Canvas) Pixels() []uint8 {
var pixels []uint8
mainthread.Call(func() {
tex := c.Texture()
tex.Begin()
pixels = tex.Pixels(0, 0, tex.Width(), tex.Height())
tex.End()
})
return pixels
}
// Draw draws the content of the Canvas onto another Target, transformed by the given Matrix, just
// like if it was a Sprite containing the whole Canvas.
func (c *Canvas) Draw(t pixel.Target, matrix pixel.Matrix) {
c.sprite.Draw(t, matrix)
}
// DrawColorMask draws the content of the Canvas onto another Target, transformed by the given
// Matrix and multiplied by the given mask, just like if it was a Sprite containing the whole Canvas.
//
// If the color mask is nil, a fully opaque white mask will be used causing no effect.
func (c *Canvas) DrawColorMask(t pixel.Target, matrix pixel.Matrix, mask color.Color) {
c.sprite.DrawColorMask(t, matrix, mask)
}
type canvasTriangles struct {
*GLTriangles
dst *Canvas
}
func (ct *canvasTriangles) draw(tex *glhf.Texture, bounds pixel.Rect) {
ct.dst.gf.Dirty()
// save the current state vars to avoid race condition
cmp := ct.dst.cmp
mat := ct.dst.mat
col := ct.dst.col
smt := ct.dst.smooth
mainthread.CallNonBlock(func() {
ct.dst.setGlhfBounds()
setBlendFunc(cmp)
frame := ct.dst.gf.Frame()
shader := ct.dst.shader
frame.Begin()
shader.Begin()
dstBounds := ct.dst.Bounds()
shader.SetUniformAttr(canvasBounds, mgl32.Vec4{
float32(dstBounds.Min.X),
float32(dstBounds.Min.Y),
float32(dstBounds.W()),
float32(dstBounds.H()),
})
shader.SetUniformAttr(canvasTransform, mat)
shader.SetUniformAttr(canvasColorMask, col)
if tex == nil {
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
} else {
tex.Begin()
bx, by, bw, bh := intBounds(bounds)
shader.SetUniformAttr(canvasTexBounds, mgl32.Vec4{
float32(bx),
float32(by),
float32(bw),
float32(bh),
})
if tex.Smooth() != smt {
tex.SetSmooth(smt)
}
ct.vs.Begin()
ct.vs.Draw()
ct.vs.End()
tex.End()
}
shader.End()
frame.End()
})
}
func (ct *canvasTriangles) Draw() {
ct.draw(nil, pixel.Rect{})
}
type canvasPicture struct {
GLPicture
dst *Canvas
}
func (cp *canvasPicture) | {
c.mat[i] = float32(m[i])
} | conditional_block |
mod.rs | builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self |
/// The endian of the words of the input file
pub fn inendian(&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else {
fletch.init = init;
};
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in | {
self.swap = Some(s);
self
} | identifier_body |
mod.rs | A builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self {
self.swap = Some(s);
self
}
/// The endian of the words of the input file
pub fn | (&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else {
fletch.init = init;
};
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in | inendian | identifier_name |
mod.rs | fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.inendian(x)),
"wordsize" => usize::from_str(¤t_val)
.ok()
.map(|x| fletch.wordsize(x)),
"out_endian" => Endian::from_str(¤t_val)
.ok()
.map(|x| fletch.outendian(x)),
"check" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.check(x)),
"name" => Some(fletch.name(¤t_val)),
_ => return Err(CheckBuilderErr::UnknownKey(current_key)),
};
match fletch_op {
Some(f) => fletch = f.clone(),
None => return Err(CheckBuilderErr::MalformedString(current_key)),
}
}
Ok(fletch)
}
type Err = CheckBuilderErr;
}
impl<Sum: Modnum> FromStr for Fletcher<Sum> {
/// Construct a new fletcher sum algorithm from a string.
/// Note that all parameters except width are in hexadecimal.
///
/// Example:
///
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// # use std::str::FromStr;
/// Fletcher::<u32>::from_str("width=32 init=1 module=0xfff1 name=\"adler-32\"").is_ok();
/// ```
fn from_str(s: &str) -> Result<Fletcher<Sum>, CheckBuilderErr> {
FletcherBuilder::<Sum>::from_str(s)?.build()
}
type Err = CheckBuilderErr;
}
impl<S: Modnum> Digest for Fletcher<S> {
type Sum = S::Double;
fn init(&self) -> Self::Sum {
self.to_compact((self.init, S::zero()))
}
fn dig_word(&self, sum: Self::Sum, word: u64) -> Self::Sum {
let (mut s, mut c) = self.from_compact(sum);
let modword = S::mod_from(word, &self.module);
s = S::add_mod(s, &modword, &self.module);
c = S::add_mod(c, &s, &self.module);
self.to_compact((s, c))
}
fn finalize(&self, sum: Self::Sum) -> Self::Sum {
self.add(sum, &self.addout)
}
fn to_bytes(&self, s: Self::Sum) -> Vec<u8> {
self.wordspec.output_to_bytes(s, 2 * self.hwidth)
}
fn wordspec(&self) -> WordSpec {
self.wordspec
}
}
impl<S: Modnum> LinearCheck for Fletcher<S> {
type Shift = S;
fn init_shift(&self) -> Self::Shift {
S::zero()
}
fn inc_shift(&self, shift: Self::Shift) -> Self::Shift {
S::add_mod(shift, &S::one(), &self.module)
}
fn shift(&self, sum: Self::Sum, shift: &Self::Shift) -> Self::Sum {
let (s, mut c) = self.from_compact(sum);
let shift_diff = S::mul_mod(s, shift, &self.module);
c = S::add_mod(c, &shift_diff, &self.module);
self.to_compact((s, c))
}
fn add(&self, sum_a: Self::Sum, sum_b: &Self::Sum) -> Self::Sum {
let (sa, ca) = self.from_compact(sum_a);
let (sb, cb) = self.from_compact(*sum_b);
let sum_s = sa.add_mod(&sb, &self.module);
let sum_c = ca.add_mod(&cb, &self.module);
self.to_compact((sum_s, sum_c))
}
fn negate(&self, sum: Self::Sum) -> Self::Sum {
let (s, c) = self.from_compact(sum);
self.to_compact((s.neg_mod(&self.module), c.neg_mod(&self.module)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::checksum::tests::{check_example, test_find, test_prop, test_shifts};
use std::str::FromStr;
#[test]
fn adler32() {
let adel = Fletcher::<u16>::with_options()
.width(32)
.init(1)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&adel);
test_find(&adel);
test_prop(&adel);
check_example(&adel, 0x81bfd25f);
let nobel = Fletcher::with_options()
.width(32)
.init(1u32)
.module(65521)
.check(0x091e01de)
.build()
.unwrap();
test_shifts(&nobel);
test_find(&nobel);
test_prop(&adel);
check_example(&nobel, 0x81bfd25f);
}
#[test]
fn fletcher16() {
let f16 = Fletcher::with_options()
.width(16)
.module(0xffu8)
.check(0x1ede)
.build()
.unwrap();
test_shifts(&f16);
test_find(&f16);
test_prop(&f16);
check_example(&f16, 0x7815);
}
#[test]
fn fletcher8() {
let f8 = Fletcher::<u8>::from_str("width=8 module=f init=0 addout=0 swap=false check=0xc")
.unwrap();
test_shifts(&f8);
test_prop(&f8);
check_example(&f8, 0x6); | }
} | random_line_split |
|
mod.rs | builder for a fletcher.
///
/// One can use it for specifying a fletcher algorithm, which can be used for checksumming.
///
/// Example:
/// ```
/// # use delsum_lib::fletcher::Fletcher;
/// let adler32 = Fletcher::<u32>::with_options()
/// .width(32)
/// .init(1)
/// .module(65521)
/// .check(0x091e01de)
/// .name("adler32")
/// .build()
/// .is_ok();
/// ```
#[derive(Clone, Debug)]
pub struct FletcherBuilder<Sum: Modnum> {
width: Option<usize>,
module: Option<Sum>,
init: Option<Sum>,
addout: Option<Sum::Double>,
swap: Option<bool>,
input_endian: Option<Endian>,
output_endian: Option<Endian>,
wordsize: Option<usize>,
check: Option<Sum::Double>,
name: Option<String>,
}
impl<S: Modnum> FletcherBuilder<S> {
/// Sets the width of the type (both sums included, must be even, mandatory)
pub fn width(&mut self, w: usize) -> &mut Self {
self.width = Some(w);
self
}
/// Sets the module of both sums (mandatory)
pub fn module(&mut self, m: S) -> &mut Self {
self.module = Some(m);
self
}
/// Sets the initial value
///
/// Contains one value for the regular sum.
pub fn init(&mut self, i: S) -> &mut Self {
self.init = Some(i);
self
}
/// Sets a value that gets added after the checksum is finished
///
/// Contains separate values for both sums, the cumulative one is bitshifted
pub fn addout(&mut self, o: S::Double) -> &mut Self {
self.addout = Some(o);
self
}
/// Normally, the cumulative sum is saved on the higher bits and the normal sum in the lower bits.
/// Setting this option to true swaps the positions.
pub fn swap(&mut self, s: bool) -> &mut Self {
self.swap = Some(s);
self
}
/// The endian of the words of the input file
pub fn inendian(&mut self, e: Endian) -> &mut Self {
self.input_endian = Some(e);
self
}
/// The number of bits in a word of the input file
pub fn wordsize(&mut self, n: usize) -> &mut Self {
self.wordsize = Some(n);
self
}
/// The endian of the checksum
pub fn outendian(&mut self, e: Endian) -> &mut Self {
self.output_endian = Some(e);
self
}
/// Checks whether c is the same as the checksum of "123456789" on creation
pub fn check(&mut self, c: S::Double) -> &mut Self {
self.check = Some(c);
self
}
/// A name to be displayed
pub fn name(&mut self, n: &str) -> &mut Self {
self.name = Some(String::from(n));
self
}
/// Returns the Fletcher object after verifying correctness
pub fn build(&self) -> Result<Fletcher<S>, CheckBuilderErr> {
let init = self.init.unwrap_or_else(S::zero);
let addout = self.addout.unwrap_or_else(S::Double::zero);
// note: we only store the half width because it is more useful to us
let hwidth = match self.width {
None => return Err(CheckBuilderErr::MissingParameter("width")),
Some(w) => {
if w % 2 != 0 || w > addout.bits() {
return Err(CheckBuilderErr::ValueOutOfRange("width"));
} else {
w / 2
}
}
};
let mask = (S::Double::one() << hwidth) - S::Double::one();
let module = self.module.unwrap_or_else(S::zero);
let wordsize = self.wordsize.unwrap_or(8);
if wordsize == 0 || wordsize % 8 != 0 || wordsize > 64 {
return Err(CheckBuilderErr::ValueOutOfRange("wordsize"));
}
let wordspec = WordSpec {
input_endian: self.input_endian.unwrap_or(Endian::Big),
wordsize,
output_endian: self.output_endian.unwrap_or(Endian::Big),
};
let mut fletch = Fletcher {
hwidth,
module,
init,
addout,
swap: self.swap.unwrap_or(false),
wordspec,
mask,
name: self.name.clone(),
};
let (mut s, mut c) = fletch.from_compact(addout);
if !module.is_zero() {
s = s % module;
c = c % module;
fletch.init = init % module;
} else | ;
fletch.addout = fletch.to_compact((s, c));
match self.check {
Some(chk) => {
if fletch.digest(&b"123456789"[..]).unwrap() != chk {
println!("{:x?}", fletch.digest(&b"123456789"[..]).unwrap());
Err(CheckBuilderErr::CheckFail)
} else {
Ok(fletch)
}
}
None => Ok(fletch),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Fletcher<Sum: Modnum> {
hwidth: usize,
module: Sum,
init: Sum,
addout: Sum::Double,
swap: bool,
wordspec: WordSpec,
mask: Sum::Double,
name: Option<String>,
}
impl<Sum: Modnum> Display for Fletcher<Sum> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.name {
Some(n) => write!(f, "{}", n),
None => {
write!(
f,
"fletcher width={} module={:#x} init={:#x} addout={:#x} swap={}",
2 * self.hwidth,
self.module,
self.init,
self.addout,
self.swap
)?;
if self.wordspec.word_bytes() != 1 {
write!(
f,
" in_endian={} wordsize={}",
self.wordspec.input_endian, self.wordspec.wordsize
)?;
};
if self.hwidth * 2 > 8 {
write!(f, " out_endian={}", self.wordspec.output_endian)?;
};
Ok(())
}
}
}
}
impl<Sum: Modnum> Fletcher<Sum> {
/// Creates a `FletcherBuilder`, see `FletcherBuilder` documentation for more details.
pub fn with_options() -> FletcherBuilder<Sum> {
FletcherBuilder {
width: None,
module: None,
init: None,
addout: None,
swap: None,
input_endian: None,
output_endian: None,
wordsize: None,
check: None,
name: None,
}
}
fn from_compact(&self, x: Sum::Double) -> (Sum, Sum) {
let l = Sum::from_double(x & self.mask);
let h = Sum::from_double((x >> self.hwidth) & self.mask);
if self.swap {
(h, l)
} else {
(l, h)
}
}
fn to_compact(&self, (s, c): (Sum, Sum)) -> Sum::Double {
let (l, h) = if self.swap { (c, s) } else { (s, c) };
(Sum::Double::from(l) & self.mask) ^ (Sum::Double::from(h) & self.mask) << self.hwidth
}
}
impl<Sum: Modnum> FromStr for FletcherBuilder<Sum> {
/// See documentation of FromStr on Fletcher<Sum>
fn from_str(s: &str) -> Result<FletcherBuilder<Sum>, CheckBuilderErr> {
let mut fletch = Fletcher::<Sum>::with_options();
for x in KeyValIter::new(s) {
let (current_key, current_val) = match x {
Err(key) => return Err(CheckBuilderErr::MalformedString(key)),
Ok(s) => s,
};
let fletch_op = match current_key.as_str() {
"width" => usize::from_str(¤t_val).ok().map(|x| fletch.width(x)),
"module" => Sum::from_hex(¤t_val).ok().map(|x| fletch.module(x)),
"init" => Sum::from_hex(¤t_val).ok().map(|x| fletch.init(x)),
"addout" => Sum::Double::from_hex(¤t_val)
.ok()
.map(|x| fletch.addout(x)),
"swap" => bool::from_str(¤t_val).ok().map(|x| fletch.swap(x)),
"in | {
fletch.init = init;
} | conditional_block |
RF.py | (300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix | for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model.fit | features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct): | random_line_split |
RF.py | 300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
|
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model | return(np.max(x)) | identifier_body |
RF.py | 300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
temp += abs(channel[i+1] - channel[i])
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def | (x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model | line_length | identifier_name |
RF.py | 300000/1000 - 0.5 + 0.25)/0.25 = 1199
pop1 = dg_finger1; pop2 = dg_finger2; pop3 = dg_finger3; pop4 = dg_finger4; pop5 = dg_finger5
windows1 = []; windows2 = []; windows3 = []; windows4 = []; windows5 = []
while len(pop1) >= 500:
temp1 = pop1[0:500]; temp2 = pop2[0:500]; temp3 = pop3[0:500]
temp4 = pop4[0:500]; temp5 = pop5[0:500]
windows1.append(temp1); windows2.append(temp2); windows3.append(temp3)
windows4.append(temp4); windows5.append(temp5)
for pop_amount in range(250):
pop1.pop(0); pop2.pop(0); pop3.pop(0); pop4.pop(0); pop5.pop(0)
# make arrays to track how much each finger changes in each time window
change1 = []; change2 = []; change3 =[]; change4 = []; change5 =[]
for window in windows1:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change1.append(temp_change)
for window in windows2:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change2.append(temp_change)
for window in windows3:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change3.append(temp_change)
for window in windows4:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change4.append(temp_change)
for window in windows5:
temp_change = 0
for i in range(len(window)-1):
temp_change+= abs(window[i+1] - window[i])
change5.append(temp_change)
# find where there are changes over a time frame for at least one of the fingers
nonzero_indicies = []
for index in range(len(change1)):
if (change1[index] + change2[index] + change3[index] + change4[index] + change5[index]) != 0:
nonzero_indicies.append(index)
# every time interval has some change in at least one finger --> no need to reduce
# make labels by finding highest change in each interval
list_of_changes = []
for i in range(len(change1)):
temp = []
temp.append(change1[i])
temp.append(change2[i])
temp.append(change3[i])
temp.append(change4[i])
temp.append(change5[i])
list_of_changes.append(temp)
labels_list = []
for five_set in list_of_changes:
labels_list.append(five_set.index(max(five_set)))
# one hot encode these labels
labels_df = pd.DataFrame(data = labels_list)
labels = np.array(labels_df)
labels = labels.reshape(len(labels_list),)
labels_one_hot = to_categorical(labels)
"""Prepare Inputs"""
# find which channels are the most variant over the entire time scale
full_ecog_p1_list = full_ecog_p1.tolist()
ecog_df = pd.DataFrame(full_ecog_p1_list)
big_list_of_channels = []
for i in range(ecog_df.shape[1]):
big_list_of_channels.append(ecog_df[i].tolist())
channel_changes = []
for channel in big_list_of_channels:
temp = 0
for i in range(len(channel)-1):
|
channel_changes.append(temp)
# all channels have similar change over entire time scale
# filter the data
numerator, denominator = scipy.signal.butter(5,(2*200)/1000)
for i in range(62):
ecog_df[i] = scipy.signal.lfilter(numerator,denominator,ecog_df[i].tolist())
# get into arrays consistent with outputs
for i in range(len(ecog_df)):
full_ecog_p1_list[i] = ecog_df.loc[i].tolist()
np.shape(full_ecog_p1_list)
ECOG_windows = np.zeros((len(labels_one_hot),500,62))
count = 0
while len(full_ecog_p1_list) >= 500:
ECOG_windows[count,:,:] = full_ecog_p1_list[0:500]
for pop_amount in range(250):
full_ecog_p1_list.pop(0)
count += 1
np.shape(ECOG_windows)
## CALCULATE FEATURES
def bandpower(x, fmin, fmax):
f, Pxx = scipy.signal.periodogram(x, fs=1000)
ind_min = np.argmax(f > fmin) - 1
ind_max = np.argmax(f > fmax) - 1
return np.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def line_length(x):
return(sum(abs(np.diff(x))))
def area(x):
return(sum(abs(x)))
def energy(x):
return(sum(np.square(x)))
def dc_gain(x):
return(np.mean(x))
def zero_crossings(x):
return(sum(x > np.mean(x)))
def peak_volt(x):
return(np.max(x))
def variance(x):
return(np.std(x)**2)
feat_names = ['BP 8-12', 'BP 18-24', 'BP 75-115', 'BP 125-159', 'BP 160-180', 'Line Length', 'Area', 'Energy', 'DC Gain', 'Zero Crossings', 'Peak Voltage', 'Variance']
n_feats = 12
n_channels = 62
batch_size = 40
batch_ct = len(change1);
features = np.zeros((batch_ct, n_channels, n_feats))
for chan in range(n_channels):
for idx in range(batch_ct):
x = ECOG_windows[idx,:,chan]
features[idx,chan,0] = bandpower(x, 8, 12)
features[idx,chan,1] = bandpower(x, 18, 24)
features[idx,chan,2] = bandpower(x, 75, 115)
features[idx,chan,3] = bandpower(x, 125, 159)
features[idx,chan,4] = bandpower(x, 160, 180)
features[idx,chan,5] = line_length(x)
features[idx,chan,6] = area(x)
features[idx,chan,7] = energy(x)
features[idx,chan,8] = dc_gain(x)
features[idx,chan,9] = zero_crossings(x)
features[idx,chan,10] = peak_volt(x)
features[idx,chan,11] = variance(x)
pd.DataFrame(features[:,chan,:],columns=feat_names).to_csv('Features/feats_500ms_'+str(chan),index=False)
# reduce dimensionality of feature matrix
features_2d = np.zeros((batch_ct, n_channels*n_feats))
for idx in range(batch_ct):
for chan in range(n_channels):
for feat in range(n_feats):
features_2d[idx, chan + feat*n_channels] = features[idx,chan,feat]
# scale
scl = MinMaxScaler()
features_2d = scl.fit_transform(pd.DataFrame(features_2d))
"""Final Preprocessing and Train Test Split """
X_train = features_2d[0:960]
Y_train = labels_list[0:960]
X_test = features_2d[960:]
Y_test = labels_list[960:]
# shuffle data
X_train_shuff, Y_train_shuff = shuffle(X_train,Y_train)
X_test_shuff, Y_test_shuff = shuffle(X_test,Y_test)
print(np.shape(X_train))
print(np.shape(Y_train))
print(np.shape(X_test))
print(np.shape(Y_test))
"""Train Model
"""
# train model
rf_model = RandomForestClassifier(max_leaf_nodes=70)
rf_model.fit(X_train_shuff, Y_train_shuff)
# get predictions
train_predictions = rf_model.predict(X_train_shuff)
test_predictions = rf_model.predict(X_test_shuff)
# get accuracy
train_score = rf_model.score(X_train_shuff, Y_train_shuff)
print('Train Accuracy:')
print(train_score)
test_score = rf_model.score(X_test_shuff, Y_test_shuff)
print('Test Accuracy:')
print(test_score)
# Optimize Max Leaf Nodes
max_leaves = [2,3,4,5,6,7,8,10,12,15,20,25,30,40,50,75,100,150,200,250]
train_scores = []
test_scores = []
for max_leaf_nodes in max_leaves:
# shuffle train test set each time???
model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes)
model | temp += abs(channel[i+1] - channel[i]) | conditional_block |
cmake_templates.py | ${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
|
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + | chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s | identifier_body |
cmake_templates.py | ${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def | (f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir | WriteSourceDirectories | identifier_name |
cmake_templates.py | #template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TRuntimeOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Runtime" in s.data:
runtime = s.data["Runtime"]
#insert any environment variables
if ContainsEnvVariable(runtime):
runtime = InsertEnvVariable(runtime)
else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + runtime
output = TExecutableOutput.substitute(dict(dir=runtime))
WriteToFile(f,output, s.HasCondition(), s.condition)
if "Libs" in s.data:
print("LIBS OUTPUT BEING SET")
statics = s.data["Libs"]
#insert any environment variables
if ContainsEnvVariable(statics):
statics = InsertEnvVariable(statics)
else:
statics = statics if statics.startswith('/') else "/"+statics
statics = rootDir + statics
output = TLibraryoutput.substitute(dict(dir=statics))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the module output section of the CmakeLists file
def WriteModuleOutput(f, rootDir, m):
name = m.settings.data["Name"] #name of lib/exe
t = m.settings.data["Type"] #build type (lib/exe)
if "exe" in t:
f.write(TExecutable.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "shared" in t:
f.write(TSharedLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "static" in t:
f.write(TStaticLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name)))
elif "object" in t:
| f.write(TObjectLib.substitute(dict(project=name)))
f.write(TTargetLinkLibs.substitute(dict(name=name))) | conditional_block |
|
cmake_templates.py | ${${files}})\n')
#for outputting an executable
TExecutable = Template("add_executable(${project} $${SOURCES} $${HEADERS})\n")
#for outputting a shared library
TSharedLib = Template("add_library(${project} SHARED $${SOURCES} $${HEADERS})\n")
#for outputting a static library
TStaticLib = Template("add_library(${project} STATIC $${SOURCES} $${HEADERS})\n")
#for outputting a collection of code files to an object file
TObjectLib = Template("add_library(${project} OBJECT $${SOURCES}")
#template for appending a cmake variable to another cmake variable
TAppendVariable = Template("set( ${var} $${${var}} $${${appendedval}})\n")
#template for appending a python variable to a cmake variable
TAppendPythonVariable = Template("set( ${var} $${${var}} ${appendedval})\n")
#template for setting cmake variable
TMakeVariable = Template('set (${var} ${value})\n')
#template for adding a link directory
TLinkDirectory = Template('link_directories("${dir}")')
#template for targeting link libs
TTargetLinkLibs = Template("""if(NOT LIBS STREQUAL "")
target_link_libraries(${name} $${LIBS})
endif()
""")
#for linking a framework on the mac
TLinkFramework = Template("""find_library(${framework}_LIB ${framework})
MARK_AS_ADVANCED(${framework}_LIB)
set(LIBS $${LIBS} $${${framework}_LIB})""")
#for linking a system library
TLinkSystemLib = Template("""find_package(${framework} REQUIRED)
include_directories($${${framework_upper}_INCLUDE_DIRS})
set(LIBS $${LIBS} $${${framework_upper}_LIBRARIES})""")
#for linking objects into this module
TLinkObject = Template("set(LIBS $${LIBS} $<TARGET_OBJECTS>:${object})")
#template for exectuable output
TExecutableOutput = Template('set(EXECUTABLE_OUTPUT_PATH "${dir}")\n')
#template for exectuable output
TRuntimeOutput = Template('set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${dir}")\n')
#template for library output
TLibraryoutput = Template('set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${dir}")\nset(LIBRARY_OUTPUT_PATH "${dir}")\n')
#template for including a submodule
TSubmoduleInclude = Template('add_subdirectory(${dir})')
#-----Helper Functions----
def WriteToFile(f, output, condition = False, conditionID = ""):
f.write(output if not condition else WrapInGuard(conditionID, output))
def InsertEnvVariable(s):
return Template(s).substitute(os.environ)
def ContainsEnvVariable(s):
return ("$" in s)
#removes all characters that may cause issues with cmake
#such as ${} characters for environment variables
def Strip(s):
chars = "${}"
for i in range(0,len(chars)):
s=s.replace(chars[i],"")
return s
#-----Write Functions-----
#Puts innerbody into TIfGuard template with the given condition
#then returns the string
def WrapInGuard(condition, innerbody):
return TIfGuard.substitute(dict(condition=condition, innerbody=innerbody))
def WriteProjectSettings(f, section):
#defaults
if "UseFolders" not in section.data: section.data["UseFolders"] = "OFF"
#output
output = TProjectSettings.substitute(section.data)
f.write(output)
#writes required CMAKE variables to the file
def WriteRequiredVariables(f):
#all required variables go here to initialise
variables = [
dict(var="INCLUDES", value='""'),
dict(var="SOURCES", value='""'),
dict(var="LIBS", value='""')
]
#write them to file
for v in variables:
f.write(TMakeVariable.substitute(v))
#definitions such as #defines
def WriteDefinitions(f, sections):
#first write the one which is not platform specific
for s in sections:
defs = s.data[":"]
#gather definitions to be output
output = ""
for d in defs:
output += TDefinition.substitute(dict(definition=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#project include directories
def WriteIncludeDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
#gather definitions to be output
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
headerID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#add include directory
output = TIncludeDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#glob all header files
output = THeaderGlob.substitute(dict(dir=d, header_id=headerID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append to HEADERS variable
output = TAppendVariable.substitute(dict(var="HEADERS", appendedval=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Header Files" + localDir, files=headerID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#project source directories
def WriteSourceDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
localDir = d if d.startswith("/") else "/"+d
sourceID = Strip(localDir.replace('/','_'))
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = rootDir + localDir
#glob all source files
output = TSourceGlob.substitute(dict(dir=d, source_id=sourceID)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#append globbed source files to SOURCES cmake variable
output = TAppendVariable.substitute(dict(var="SOURCES", appendedval=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#make source group so they appear in filters
localDir = Strip(localDir.replace('/','\\\\'))
output = TSourceGroup.substitute(dict(folder="Source Files" + localDir, files=sourceID))
WriteToFile(f,output, s.HasCondition(), s.condition)
#includes local library directories
def WriteProjectLibDirectories(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
dirs = s.data[":"]
output = ""
for d in dirs:
#insert any environment variables
if ContainsEnvVariable(d):
d = InsertEnvVariable(d)
else:
d = d if d.startswith('/') else "/"+d
d = rootDir + d
#include lib directory
output = TLinkDirectory.substitute(dict(dir=d)) + "\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
#adds all libs to the LIBS cmake var
def WriteLinkLibs(f, rootDir, sections):
#first write the one which is not platform specific
for s in sections:
libs = s.data[":"]
output = ""
for l in libs:
if "-framework" in l:
frameworkName = l.replace("-framework ", "")
frameworkName = frameworkName.strip()
output = TLinkFramework.substitute(dict(framework=frameworkName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-system" in l:
systemLibName = l.replace("-system ", "")
systemLibName = systemLibName.strip()
output = TLinkSystemLib.substitute(dict(framework=systemLibName,framework_upper=systemLibName.upper())) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
elif "-object" in l:
objectLibName = l.replace("-object ", "")
objectLibName = objectLibName.strip()
output = TLinkObject.substitute(dict(object=objectLibName)) +"\n"
WriteToFile(f,output, s.HasCondition(), s.condition)
else:
#add to LIBS cmake var
output = TAppendPythonVariable.substitute(dict(var="LIBS", appendedval=l))
WriteToFile(f,output, s.HasCondition(), s.condition)
#Writes the cmake runtime/lib etc. outputs
def WriteOutputs(f, rootDir, sections):
for s in sections:
if "Executable" in s.data:
runtime = s.data["Executable"]
#insert any environment variables
if ContainsEnvVariable(runtime): | else:
runtime = runtime if runtime.startswith('/') else "/"+runtime
runtime = rootDir + | runtime = InsertEnvVariable(runtime) | random_line_split |
__init__.py | (flag):
"""
Turn debugging on or off.
@param flag (boolean) True means output debugging, False means no.
"""
global debug
debug = flag
XLM.XLM_Object.debug = flag
XLM.xlm_library.debug = flag
XLM.ms_stack_transformer.debug = flag
XLM.stack_transformer.debug = flag
XLM.excel2007.debug = flag
####################################################################
def _extract_xlm(maldoc):
"""
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = | set_debug | identifier_name |
|
__init__.py | (cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
""" | random_line_split |
||
__init__.py | # Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
print((row, col))
print(xlm_cell)
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
| """
Run olevba on the given file and extract the XLM macro code lines.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (str) The XLM macro cells extracted from running olevba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
| identifier_body |
|
__init__.py | ba on the
given file. None will be returned on error.
"""
# Run olevba on the given file.
olevba_out = None
FNULL = open(os.devnull, 'w')
try:
cmd = "timeout 30 olevba -c \"" + str(maldoc) + "\""
olevba_out = subprocess.check_output(cmd, shell=True, stderr=FNULL)
except Exception as e:
color_print.output('r', "ERROR: Running olevba on " + str(maldoc) + " failed. " + str(e))
return None
# Not handling encrypted Excel files.
if (b"FILEPASS record: file is password protected" in olevba_out):
color_print.output('y', "WARNING: " + str(maldoc) + " is password protected. Not emulating.")
return None
# Pull out the chunks containing the XLM lines.
chunk_pat = b"in file: xlm_macro \- OLE stream: 'xlm_macro'\n(?:\- ){39}\n(.+)"
chunks = re.findall(chunk_pat, olevba_out, re.DOTALL)
# Pull out all the XLM lines from each chunk.
r = b""
xlm_pat = br"' \d\d\d\d {1,10}\d{1,6} [^\n]+\n"
for chunk in chunks:
for line in re.findall(xlm_pat, chunk):
r += line
# Convert characters so this can be parsed.
try:
r = XLM.utils.to_str(r)
except UnicodeDecodeError:
r = XLM.utils.strip_unprintable(r)
# Did we find XLM?
if (len(r.strip()) == 0):
color_print.output('y', "WARNING: No XLM found.")
return None
# Done. Return XLM lines.
return r
####################################################################
def _guess_xlm_sheet(workbook):
"""
Guess the sheet containing the XLM macros by finding the sheet with the
most unresolved "#NAME" cells.
@param workbook (ExcelSheet object) The Excel spreadsheet to check.
@return (str) The name of the sheet that might contain the XLM macros.
"""
# TODO: If plugin_biff.py used by olevba to dump XLM includes sheet names this
# function will no longer be needed.
# Look at each sheet.
xlm_sheet = None
unresolved_count = -1
for curr_sheet_name in workbook.sheet_names():
curr_sheet = workbook.sheet_by_name(curr_sheet_name)
curr_unresolved_count = 0
for cell_value in list(curr_sheet.cells.values()):
cell_value = cell_value.strip()
if (len(cell_value) == 0):
continue
if (cell_value.strip() == "#NAME?"):
curr_unresolved_count += 1
if (curr_unresolved_count > unresolved_count):
unresolved_count = curr_unresolved_count
xlm_sheet = curr_sheet_name
# Return the sheet with the most '#NAME' cells.
return xlm_sheet
####################################################################
def _merge_XLM_cells(maldoc, xlm_cells):
"""
Merge the given XLM cells into the value cells read from the
given Excel file.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@param xlm_cells (dict) A dict of XLM formula objects (XLM_Object objects) where
dict[ROW][COL] gives the XLM cell at (ROW, COL).
@return (tuple) A 3 element tuple where the 1st element is the updated ExcelWorkbook and
2nd element is a list of 2 element tuples containing the XLM cell indices on success and
the 3rd element is the XLM sheet object, (None, None, None) on error.
"""
# Read in the Excel workbook data.
color_print.output('g', "Merging XLM macro cells with data cells ...")
workbook = excel.read_excel_sheets(maldoc)
if (workbook is None):
color_print.output('r', "ERROR: Reading in Excel file " + str(maldoc) + " failed.")
return (None, None, None)
# Guess the name of the sheet containing the XLM macros.
xlm_sheet_name = _guess_xlm_sheet(workbook)
if debug:
print("XLM Sheet:")
print(xlm_sheet_name)
print("")
# Insert the XLM macros into the XLM sheet.
xlm_sheet = workbook.sheet_by_name(xlm_sheet_name)
xlm_cell_indices = []
if debug:
print("=========== START MERGE ==============")
rows = xlm_cells.keys()
for row in rows:
cols = xlm_cells[row].keys()
for col in cols:
xlm_cell = xlm_cells[row][col]
if debug:
|
cell_index = (row, col)
xlm_sheet.cells[cell_index] = xlm_cell
xlm_cell_indices.append(cell_index)
# Debug.
if debug:
print("=========== DONE MERGE ==============")
print(workbook)
# Done. Return the indices of the added XLM cells and the updated
# workbook.
color_print.output('g', "Merged XLM macro cells with data cells.")
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_2007(maldoc):
"""
Read in an Excel 2007+ workbook and the XLM macros in the workbook.
@param maldoc (str) The fully qualified name of the Excel file to
analyze.
@return (tuple) A 3 element tuple where the 1st element is the workbook object,
the 2nd element is a list of XLM cell indices ((row, column) tuples) and the 3rd
element is a sheet element for the sheet with XLM macros.
"""
# Read in the 2007+ cells.
color_print.output('g', "Analyzing Excel 2007+ file ...")
workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc)
color_print.output('g', "Extracted XLM from ZIP archive.")
if (workbook_info is None):
return (None, None, None)
if (len(workbook_info) == 0):
color_print.output('y', "WARNING: No XLM macros found.")
return (None, None, None)
if debug:
print("=========== START 2007+ CONTENTS ==============")
for sheet in workbook_info.keys():
print("\n------")
print(sheet)
print("")
for c in workbook_info[sheet].keys():
print(str(c) + " ---> " + str(workbook_info[sheet][c]))
print("=========== DONE 2007+ CONTENTS ==============")
# Figure out which sheet probably has the XLM macros.
xlm_sheet_name = None
max_formulas = -1
for sheet in workbook_info.keys():
if (len(workbook_info[sheet]) > max_formulas):
max_formulas = len(workbook_info[sheet])
xlm_sheet_name = sheet
# Parse each formula and add it to a sheet object.
xlm_cells = {}
for cell_index in workbook_info[xlm_sheet_name].keys():
# Value only cell?
row = cell_index[0]
col = cell_index[1]
if (row not in xlm_cells):
xlm_cells[row] = {}
raw_formula = workbook_info[xlm_sheet_name][cell_index][0]
if (raw_formula is None):
# Do we have a value?
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
# Just save the value in the cell.
xlm_cells[row][col] = formula_val
continue
# Parse the formula into an XLM object.
formula_str = b"=" + raw_formula
formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)
# Set the value of the formula if we know it.
formula_val = workbook_info[xlm_sheet_name][cell_index][1]
if (formula_val is not None):
formula.value = formula_val
# Save the XLM object.
formula.update_cell_id(cell_index)
xlm_cells[row][col] = formula
color_print.output('g', "Parsed MS XLM macros.")
# Merge the XLM cells with the value cells into a single unified spereadsheet
# object.
workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)
if (workbook is None):
color_print.output('r', "ERROR: Merging XLM cells failed. Emulation aborted.")
return (None, None, None)
# Done.
return (workbook, xlm_cell_indices, xlm_sheet)
####################################################################
def _read_workbook_97(maldoc):
"""
Read in an Excel 97 workbook and the XLM macros in the workbook.
@param | print((row, col))
print(xlm_cell) | conditional_block |
rainstorm.rs | c_float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
}; | #[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll | }
| random_line_split |
rainstorm.rs | angle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll_hooker.hook(22, hooked_extramousesample_trampoline);
// let mut ivengineclient_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ivengineclient.get_ptr().to_uint() as *mut *const ());
// REAL_SERVERCMDKEYVALUES = ivengineclient_hooker.get_orig_method(185);
// ivengineclient_hooker.hook(185, sdk::raw::get_hooked_servercmdkeyvalues());
CINPUT_PTR = locate_cinput().expect("Failed to locate CInput pointer (signature not found)");
let mut hooker = vmthook::VMTHooker::new(CINPUT_PTR as *mut *const ());
hooker.hook(8, sdk::get_hooked_getusercmd());
let mut iprediction_hooker = vmthook::VMTHooker::new(sdk::raw::getptr_iprediction().to_uint() as *mut *const ());
REAL_RUNCOMMAND = iprediction_hooker.get_orig_method(17);
iprediction_hooker.hook(17, sdk::raw::get_hooked_runcommand());
};
}
/// If we haven't seen this INetChannel before, hook it.
fn maybe_hook_inetchannel(ptrs: &GamePointers) {
static mut LAST_NETCHANNEL: Option<sdk::raw::INetChannelPtr> = None;
unsafe {
let inetchannel = sdk::raw::get_current_inetchannel(ptrs.ivengineclient.get_ptr());
//log!("chan: {}\n", inetchannel.to_uint());
let is_new_channel = match LAST_NETCHANNEL {
Some(last) => { inetchannel != last },
None => true
};
LAST_NETCHANNEL = Some(inetchannel);
if !is_new_channel {
//log!("Not patching old netchannel");
return;
}
let mut hooker = vmthook::VMTHooker::new(inetchannel.to_uint() as *mut *const ());
REAL_NETCHANNEL_SENDDATAGRAM = hooker.get_orig_method(46);
hooker.hook(46, ::sdk::raw::get_netchannel_senddatagram_trampoline().to_uint() as *const ());
log!("senddatagram: {}\n", hooker.get_orig_method(46));
};
}
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
#[lang = "begin_unwind"]
extern fn begin_unwind(fmt: &core::fmt::Arguments, file: &str, line: uint) -> ! | {
log!("Failed at line {} of {}!\n", line, file);
let _ = logging::log_fmt(fmt).ok(); // if we fail here, god help us
unsafe { libc::exit(42); }
} | identifier_body |
|
rainstorm.rs | _float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn | (c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll | rainstorm_command_cb | identifier_name |
rainstorm.rs | _float;
pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
pub fn fmaxf(a: c_float, b: c_float) -> c_float;
pub fn fminf(a: c_float, b: c_float) -> c_float;
pub fn fmodf(a: c_float, b: c_float) -> c_float;
pub fn nextafterf(x: c_float, y: c_float) -> c_float;
pub fn hypotf(x: c_float, y: c_float) -> c_float;
pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn logbf(n: c_float) -> c_float;
pub fn log1pf(n: c_float) -> c_float;
pub fn ilogbf(n: c_float) -> c_int;
pub fn modff(n: c_float, iptr: &mut c_float) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub fn tgammaf(n: c_float) -> c_float;
/*#[cfg(unix)]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;
#[cfg(windows)]
#[link_name="__lgammaf_r"]
pub fn lgammaf_r(n: c_float, sign: &mut c_int) -> c_float;*/
}
}
#[no_mangle]
pub static mut NOCMD_ENABLED: bool = false;
#[no_mangle]
pub static mut REAL_INIT: *const () = 0 as *const();
#[no_mangle]
pub static mut REAL_CREATEMOVE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_EXTRAMOUSESAMPLE: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_RUNCOMMAND: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_SERVERCMDKEYVALUES: *const () = 0 as *const ();
#[no_mangle]
pub static mut REAL_NETCHANNEL_SENDDATAGRAM: *const () = 0 as *const ();
#[no_mangle]
pub static mut CINPUT_PTR: *mut sdk::CInput = 0 as *mut sdk::CInput;
struct CString(*const libc::c_char);
impl CString {
pub fn new(src: &'static [u8]) -> Option<CString> {
let slice = src.repr();
if unsafe { *((slice.data as uint + (slice.len - 1)) as *const u8) == 0 } {
Some(CString(slice.data as *const libc::c_char))
} else {
None
}
}
pub unsafe fn new_raw(src: *const u8) -> CString {
CString(src as *const libc::c_char)
}
}
#[no_mangle]
pub extern "C" fn rainstorm_getivengineclient() -> sdk::raw::IVEngineClientPtr {
unsafe { (*(cheats::CHEAT_MANAGER)).get_gamepointers().ivengineclient.get_ptr() }
}
pub struct GamePointers {
ivengineclient: sdk::IVEngineClient,
icliententitylist: sdk::IClientEntityList,
ibaseclientdll: sdk::IBaseClientDLL,
ienginetrace: sdk::IEngineTrace,
appsysfactory: Option<sdk::AppSysFactory>,
ivmodelinfo: sdk::IVModelInfo,
icvar: Option<sdk::ICvar>,
iuniformrandomstream: sdk::IUniformRandomStream,
globals: Option<*mut sdk::CGlobalVarsBase>
}
impl GamePointers {
pub fn load() -> GamePointers {
log!("Loading GamePointers...\n");
GamePointers {
ivengineclient: sdk::get_ivengineclient(),
ibaseclientdll: sdk::get_ibaseclientdll(),
icliententitylist: sdk::get_icliententitylist(),
ienginetrace: sdk::get_ienginetrace(),
ivmodelinfo: sdk::get_ivmodelinfo(),
appsysfactory: None,
icvar: None,
iuniformrandomstream: sdk::get_iuniformrandomstream(),
globals: None
}
}
}
pub unsafe fn locate_cinput() -> Option<*mut sdk::CInput> {
let start_addr = REAL_CREATEMOVE as *const ();
log!("Locating CInput from CreateMove at {}\n", start_addr);
let result = utils::search_memory(start_addr, 100, &[0x8B, 0x0D]);
//let result = utils::search_memory(((result1 as uint) + 2) as *const (), 100, &[0x8B, 0x0D]);
match result {
Some(ptr) => {
let load_instruction_operand = ((ptr as uint) + 2) as *const *const *mut sdk::CInput;
log!("CInput load found at {}\n", load_instruction_operand);
let cinput_ptr_ptr = *load_instruction_operand;
log!("CInput pointer: {}\n", cinput_ptr_ptr);
log!("CInput found at {}\n", *cinput_ptr_ptr);
Some((*cinput_ptr_ptr))
},
None => {
log!("CInput not found?!?\n");
None
}
}
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_preinithook(app_sys_factory: sdk::AppSysFactoryPtr, _physics_factory: *mut (), globals: *mut sdk::CGlobalVarsBase) {
log!("pre-init hook running\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).preinit(app_sys_factory, globals);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_postinithook() {
log!("Post-init hook running...\n");
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).postinit();
} else {
log!("Cheat manager not found!\n");
libc::exit(1);
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_pre_createmove(sequence_number: *mut libc::c_int, input_sample_frametime: *mut libc::c_float, active: *mut bool) {
if cheats::CHEAT_MANAGER.is_not_null() | else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_process_usercmd(cmd: &mut sdk::CUserCmd) {
if cheats::CHEAT_MANAGER.is_not_null() {
maybe_hook_inetchannel((*cheats::CHEAT_MANAGER).get_gamepointers());
(*cheats::CHEAT_MANAGER).process_usercmd(cmd);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub unsafe extern "C" fn rainstorm_extramousesample(input_sample_frametime: libc::c_float, active: bool) {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).extramousesample(input_sample_frametime, active);
} else {
quit!("Cheat manager not found!\n");
};
}
#[no_mangle]
pub extern "C" fn rainstorm_command_cb(c_arguments: *const libc::c_char) {
let arguments_str = unsafe { core::str::raw::c_str_to_static_slice(c_arguments) };
log!("Command callback: {}\n", arguments_str);
let mut parts_iter = arguments_str.split(' ');
let command = parts_iter.next().expect("No command type specified!");
let parts: collections::Vec<&str> = parts_iter.collect();
unsafe {
if cheats::CHEAT_MANAGER.is_not_null() {
(*cheats::CHEAT_MANAGER).handle_command(command, parts.as_slice());
}
}
}
#[no_mangle]
pub extern "C" fn rainstorm_init(log_fd: libc::c_int, hooked_init_trampoline: *const (), hooked_createmove_trampoline: *const (),
hooked_extramousesample_trampoline: *const (), hooked_runcommand_trampoline: *const ()) {
unsafe { let _ = logging::set_fd(log_fd).unwrap(); }
log!("Rainstorm starting up!\n");
cheats::cheatmgr_setup();
unsafe {
let mut ibaseclientdll_hooker = vmthook::VMTHooker::new((*cheats::CHEAT_MANAGER).get_gamepointers().ibaseclientdll.get_ptr().to_uint() as *mut *const ());
REAL_INIT = ibaseclientdll_hooker.get_orig_method(0);
REAL_CREATEMOVE = ibaseclientdll_hooker.get_orig_method(21);
REAL_EXTRAMOUSESAMPLE = ibaseclientdll_hooker.get_orig_method(22);
ibaseclientdll_hooker.hook(0, hooked_init_trampoline);
ibaseclientdll_hooker.hook(21, hooked_createmove_trampoline);
ibaseclientdll | {
(*cheats::CHEAT_MANAGER).pre_createmove(sequence_number, input_sample_frametime, active);
} | conditional_block |
ImageMap-dbg.js | >Properties
* <ul>
* <li>{@link #getName name} : string</li></ul>
* </li>
* <li>Aggregations
* <ul>
* <li>{@link #getAreas areas} : sap.ui.commons.Area[]</li></ul>
* </li>
* <li>Associations
* <ul></ul>
* </li>
* <li>Events
* <ul>
* <li>{@link sap.ui.commons.ImageMap#event:press press} : fnListenerFunction or [fnListenerFunction, oListenerObject] or [oData, fnListenerFunction, oListenerObject]</li></ul>
* </li>
* </ul>
*
* @param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* Combination of image areas where at runtime these areas are starting points for hyperlinks or actions
* @extends sap.ui.core.Control
*
* @author SAP AG
* @version 1.20.7
*
* @constructor
* @public
* @name sap.ui.commons.ImageMap
*/
sap.ui.core.Control.extend("sap.ui.commons.ImageMap", { metadata : {
// ---- object ----
publicMethods : [
// methods
"createArea"
],
// ---- control specific ----
library : "sap.ui.commons",
properties : {
"name" : {type : "string", group : "Misc", defaultValue : null}
},
aggregations : {
"areas" : {type : "sap.ui.commons.Area", multiple : true, singularName : "area"}
},
events : {
"press" : {}
}
}});
/**
* Creates a new subclass of class sap.ui.commons.ImageMap with name <code>sClassName</code>
* and enriches it with the information contained in <code>oClassInfo</code>.
*
* <code>oClassInfo</code> might contain the same kind of informations as described in {@link sap.ui.core.Element.extend Element.extend}.
*
* @param {string} sClassName name of the class to be created
* @param {object} [oClassInfo] object literal with informations about the class
* @param {function} [FNMetaImpl] constructor function for the metadata object. If not given, it defaults to sap.ui.core.ElementMetadata.
* @return {function} the created class / constructor function
* @public
* @static
* @name sap.ui.commons.ImageMap.extend
* @function
*/
sap.ui.commons.ImageMap.M_EVENTS = {'press':'press'};
/**
* Getter for property <code>name</code>.
* Name for the image that serves as reference
*
* Default value is empty/<code>undefined</code>
*
* @return {string} the value of property <code>name</code>
* @public
* @name sap.ui.commons.ImageMap#getName
* @function
*/
/**
* Setter for property <code>name</code>.
*
* Default value is empty/<code>undefined</code>
*
* @param {string} sName new value for property <code>name</code>
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#setName
* @function
*/
/**
* Getter for aggregation <code>areas</code>.<br/>
* Area representing the reference to the target location
*
* @return {sap.ui.commons.Area[]}
* @public
* @name sap.ui.commons.ImageMap#getAreas
* @function
*/
/**
* Inserts a area into the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to insert; if empty, nothing is inserted
* @param {int}
* iIndex the <code>0</code>-based index the area should be inserted at; for
* a negative value of <code>iIndex</code>, the area is inserted at position 0; for a value
* greater than the current size of the aggregation, the area is inserted at
* the last position
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#insertArea
* @function
*/
/**
* Adds some area <code>oArea</code>
* to the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to add; if empty, nothing is inserted
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#addArea
* @function
*/
/**
* Removes an area from the aggregation named <code>areas</code>.
*
* @param {int | string | sap.ui.commons.Area} vArea the area to remove or its index or id
* @return {sap.ui.commons.Area} the removed area or null
* @public
* @name sap.ui.commons.ImageMap#removeArea
* @function
*/
/**
* Removes all the controls in the aggregation named <code>areas</code>.<br/>
* Additionally unregisters them from the hosting UIArea.
* @return {sap.ui.commons.Area[]} an array of the removed elements (might be empty)
* @public
* @name sap.ui.commons.ImageMap#removeAllAreas
* @function
*/
/**
* Checks for the provided <code>sap.ui.commons.Area</code> in the aggregation named <code>areas</code>
* and returns its index if found or -1 otherwise.
*
* @param {sap.ui.commons.Area}
* oArea the area whose index is looked for.
* @return {int} the index of the provided control in the aggregation if found, or -1 otherwise
* @public
* @name sap.ui.commons.ImageMap#indexOfArea
* @function
*/
/**
* Destroys all the areas in the aggregation
* named <code>areas</code>.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#destroyAreas
* @function
*/
/**
* Event for the areas that can be clicked in an ImageMap
*
* @name sap.ui.commons.ImageMap#press
* @event
* @param {sap.ui.base.Event} oControlEvent
* @param {sap.ui.base.EventProvider} oControlEvent.getSource
* @param {object} oControlEvent.getParameters
* @param {string} oControlEvent.getParameters.areaId Id of clicked Area.
* @public
*/
/**
* Attach event handler <code>fnFunction</code> to the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>.
* When called, the context of the event handler (its <code>this</code>) will be bound to <code>oListener<code> if specified
* otherwise to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* Event for the areas that can be clicked in an ImageMap
*
* @param {object}
* [oData] An application specific payload object, that will be passed to the event handler along with the event object when firing the event.
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* [oListener] Context object to call the event handler with. Defaults to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#attachPress
* @function
*/
/**
* Detach event handler <code>fnFunction</code> from the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>
*
* The passed function and listener object must match the ones used for event registration.
*
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* oListener Context object on which the given function had to be called.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#detachPress
* @function
*/
/**
* Fire event press to attached listeners.
*
* Expects following event parameters:
* <ul>
* <li>'areaId' of type <code>string</code> Id of clicked Area.</li>
* </ul>
*
* @param {Map} [mArguments] the arguments to pass along with the event.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @protected
* @name sap.ui.commons.ImageMap#firePress
* @function
*/
/**
* Adds an area to the ImageMap
*
* @name sap.ui.commons.ImageMap.prototype.createArea
* @function
* @param {string[]}
* aArea
*
| * @public
*/
// Start of sap\ui\commons\ImageMap.js
jQuery.sap.require("sap.ui.core.delegate.ItemNavigation");
/**
* Adds areas to the Image Map. Each argument must be either a | * @type void | random_line_split |
ImageMap-dbg.js | param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* Combination of image areas where at runtime these areas are starting points for hyperlinks or actions
* @extends sap.ui.core.Control
*
* @author SAP AG
* @version 1.20.7
*
* @constructor
* @public
* @name sap.ui.commons.ImageMap
*/
sap.ui.core.Control.extend("sap.ui.commons.ImageMap", { metadata : {
// ---- object ----
publicMethods : [
// methods
"createArea"
],
// ---- control specific ----
library : "sap.ui.commons",
properties : {
"name" : {type : "string", group : "Misc", defaultValue : null}
},
aggregations : {
"areas" : {type : "sap.ui.commons.Area", multiple : true, singularName : "area"}
},
events : {
"press" : {}
}
}});
/**
* Creates a new subclass of class sap.ui.commons.ImageMap with name <code>sClassName</code>
* and enriches it with the information contained in <code>oClassInfo</code>.
*
* <code>oClassInfo</code> might contain the same kind of informations as described in {@link sap.ui.core.Element.extend Element.extend}.
*
* @param {string} sClassName name of the class to be created
* @param {object} [oClassInfo] object literal with informations about the class
* @param {function} [FNMetaImpl] constructor function for the metadata object. If not given, it defaults to sap.ui.core.ElementMetadata.
* @return {function} the created class / constructor function
* @public
* @static
* @name sap.ui.commons.ImageMap.extend
* @function
*/
sap.ui.commons.ImageMap.M_EVENTS = {'press':'press'};
/**
* Getter for property <code>name</code>.
* Name for the image that serves as reference
*
* Default value is empty/<code>undefined</code>
*
* @return {string} the value of property <code>name</code>
* @public
* @name sap.ui.commons.ImageMap#getName
* @function
*/
/**
* Setter for property <code>name</code>.
*
* Default value is empty/<code>undefined</code>
*
* @param {string} sName new value for property <code>name</code>
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#setName
* @function
*/
/**
* Getter for aggregation <code>areas</code>.<br/>
* Area representing the reference to the target location
*
* @return {sap.ui.commons.Area[]}
* @public
* @name sap.ui.commons.ImageMap#getAreas
* @function
*/
/**
* Inserts a area into the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to insert; if empty, nothing is inserted
* @param {int}
* iIndex the <code>0</code>-based index the area should be inserted at; for
* a negative value of <code>iIndex</code>, the area is inserted at position 0; for a value
* greater than the current size of the aggregation, the area is inserted at
* the last position
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#insertArea
* @function
*/
/**
* Adds some area <code>oArea</code>
* to the aggregation named <code>areas</code>.
*
* @param {sap.ui.commons.Area}
* oArea the area to add; if empty, nothing is inserted
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#addArea
* @function
*/
/**
* Removes an area from the aggregation named <code>areas</code>.
*
* @param {int | string | sap.ui.commons.Area} vArea the area to remove or its index or id
* @return {sap.ui.commons.Area} the removed area or null
* @public
* @name sap.ui.commons.ImageMap#removeArea
* @function
*/
/**
* Removes all the controls in the aggregation named <code>areas</code>.<br/>
* Additionally unregisters them from the hosting UIArea.
* @return {sap.ui.commons.Area[]} an array of the removed elements (might be empty)
* @public
* @name sap.ui.commons.ImageMap#removeAllAreas
* @function
*/
/**
* Checks for the provided <code>sap.ui.commons.Area</code> in the aggregation named <code>areas</code>
* and returns its index if found or -1 otherwise.
*
* @param {sap.ui.commons.Area}
* oArea the area whose index is looked for.
* @return {int} the index of the provided control in the aggregation if found, or -1 otherwise
* @public
* @name sap.ui.commons.ImageMap#indexOfArea
* @function
*/
/**
* Destroys all the areas in the aggregation
* named <code>areas</code>.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#destroyAreas
* @function
*/
/**
* Event for the areas that can be clicked in an ImageMap
*
* @name sap.ui.commons.ImageMap#press
* @event
* @param {sap.ui.base.Event} oControlEvent
* @param {sap.ui.base.EventProvider} oControlEvent.getSource
* @param {object} oControlEvent.getParameters
* @param {string} oControlEvent.getParameters.areaId Id of clicked Area.
* @public
*/
/**
* Attach event handler <code>fnFunction</code> to the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>.
* When called, the context of the event handler (its <code>this</code>) will be bound to <code>oListener<code> if specified
* otherwise to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* Event for the areas that can be clicked in an ImageMap
*
* @param {object}
* [oData] An application specific payload object, that will be passed to the event handler along with the event object when firing the event.
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* [oListener] Context object to call the event handler with. Defaults to this <code>sap.ui.commons.ImageMap</code>.<br/> itself.
*
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#attachPress
* @function
*/
/**
* Detach event handler <code>fnFunction</code> from the 'press' event of this <code>sap.ui.commons.ImageMap</code>.<br/>
*
* The passed function and listener object must match the ones used for event registration.
*
* @param {function}
* fnFunction The function to call, when the event occurs.
* @param {object}
* oListener Context object on which the given function had to be called.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @public
* @name sap.ui.commons.ImageMap#detachPress
* @function
*/
/**
* Fire event press to attached listeners.
*
* Expects following event parameters:
* <ul>
* <li>'areaId' of type <code>string</code> Id of clicked Area.</li>
* </ul>
*
* @param {Map} [mArguments] the arguments to pass along with the event.
* @return {sap.ui.commons.ImageMap} <code>this</code> to allow method chaining
* @protected
* @name sap.ui.commons.ImageMap#firePress
* @function
*/
/**
* Adds an area to the ImageMap
*
* @name sap.ui.commons.ImageMap.prototype.createArea
* @function
* @param {string[]}
* aArea
*
* @type void
* @public
*/
// Start of sap\ui\commons\ImageMap.js
jQuery.sap.require("sap.ui.core.delegate.ItemNavigation");
/**
* Adds areas to the Image Map. Each argument must be either a JSon object or a
* list of objects or the area element or elements.
*
* @param {sap.ui.commons.Area|string} Area to add
* @return {sap.ui.commons.Area} <code>this</code> to allow method chaining
* @public
*/
sap.ui.commons.ImageMap.prototype.createArea = function() {
var oArea = new sap.ui.commons.Area();
for ( var i = 0; i < arguments.length; i++) {
var oContent = arguments[i];
var oArea;
if (oContent instanceof sap.ui.commons.Area) {
oArea = oContent;
} else | {
oArea = new sap.ui.commons.Area(oContent);
} | conditional_block |
|
app-engine.js | ': '2950 Camozzi Rd',
'cityState': 'Revelstoke, BC',
'display_phone': '250 814 0087',
'url': 'www.revelstokemountainresort.com/',
'location': {
'coordinate': {
'latitude': 50.9583028,
'longitude': -118.1637752,
},
'display_address': ['2950 Camozzi Rd'],
}
}];
/**
* --------------------------------------------------------------------
* placeCard is a data object used to display results in the view
* --------------------------------------------------------------------
**/
var placeCard = function(data) {
var that = this;
this.name = ko.observable(data.name);
this.id = ko.observable(data.id);
this.idSelector = ko.computed(function() {
return "#" + data.id;
});
this.description = ko.observable(data.snippet_text);
this.imgSrc = ko.computed(function() {
return data.image_url.replace('ms.jpg', 'l.jpg');
});
this.imgAltTag = ko.computed(function() {
return 'Photo of ' + data.name;
});
this.address1 = ko.observable(data.location.display_address[0]);
this.city = ko.observable(data.location.city);
this.state = ko.observable(data.location.state_code);
this.zip = ko.observable(data.location.postal_code);
this.address2 = ko.computed(function() {
return that.city() + ", " + that.state() + " " + that.zip();
});
this.phone = ko.observable(data.display_phone);
this.webURL = ko.observable(data.url);
this.location = {
coordinate: {
latitude: data.location.coordinate.latitude,
longitude: data.location.coordinate.longitude,
},
address: data.location.display_address[0] +
'<br>' + data.location.display_address[data.location.display_address.length - 1]
};
this.review = {
img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function | yJax | identifier_name |
|
app-engine.js | --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
/*
* -------------------------
* Inital Call to Yelp
* -------------------------
*/
yelpAjax(searchFor(), searchNear()); // onload initalize with starting Yelp Results
/*
* -------------------------------------------------------------------------
* This section handles requests to Google Maps and the related markers
* -------------------------------------------------------------------------
*/
/* --- google map keys --- */
var googleMapsAPIKey = 'AIzaSyClMls0bXZ3jgznlsLiP0ZgRrzSgUGFMbU';
var googleMapsgeocodeKey = 'AIzaSyBEXHFmzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) | { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
} | conditional_block |
|
app-engine.js | img: data.snippet_image_url,
txt: data.snippet_text
};
this.stars = {
count: ko.observable(data.rating),
standard: ko.observable(data.rating_img_url),
large: ko.observable(data.rating_img_url_large),
small: ko.observable(data.rating_img_url_small)
};
this.marker = {
title: data.name,
phone: data.display_phone,
imgSrc: data.image_url,
description: data.snippet_text,
lat: data.location.coordinate.latitude,
lng: data.location.coordinate.longitude,
idSelector: "#" + data.id,
stars: data.rating_img_url
};
this.googleDirections = ko.computed(function() {
return "//google.com/maps?q=" + data.location.display_address[0] + '+' + data.location.city + '+' + data.location.state_code;
});
this.facebookShare = ko.computed(function() {
return "//www.facebook.com/sharer/sharer.php?u=" + data.url;
});
this.twitterShare = ko.computed(function() {
return "//twitter.com/intent/tweet?text=OMG " + data.name + " is an awesome spot for " + searchFor() + " in " + searchNear() + "&url=" + data.url + ";via=dangerdan";
});
};
/* --- resultList is the placeCards' holder --- */
var resultList = ko.observableArray([]);
var originalList = ko.observableArray([]);
/*
* -----------------------------------------------------------------
* easily recognized function that performs ajax request to yelp
* -----------------------------------------------------------------
*/
function updateYelpResults() {
yelpAjax(searchFor(), searchNear()); // get all the needed info
}
/** Hide search results
*
*/
function hideYelpResults() {
$('.yelp-search-results').toggleClass('hidden');
}
/*
* -------------------------------------------------------------------------
* ViewModel, binding and DOM input elements in the form of observables
* -------------------------------------------------------------------------
*/
var searchFor = ko.observable("Pizza"); // form Yelp Search Form with prepopulated placeholder
var searchNear = ko.observable("80210"); // form Yelp Search Form with prepopulated placeholder
var filterField = ko.observable();
/*
* -------------------------------------------------------------------------
* The filter and functions
* -------------------------------------------------------------------------
*/
function filterInputField() {
// ensure emtpy lists
nameList = []; // for names
filteredList = []; // for matches
filterField(filterField().toLowerCase()); // force the case on the search
for (var card in resultList()) {
nameList.push({
'index': card, // store index
'name': resultList()[card].name().toLowerCase(), // grab name as string
'description': resultList()[card].description().toLowerCase() // grabs description as string
});
}
for (var name in nameList) {
if (nameList[name].name.includes(filterField()) || nameList[name].description.includes(filterField())) { // if a name or description contains the search variable...
filteredList.push(resultList()[nameList[name].index]); // put it in filtered List
}
}
if (filteredList.length >= 1) { // if something in filtered List
resultList(filteredList); // put that on the board + map
prepMap();
} else { // otherwise
/* ------ Throw error message ------ */
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
resultList.push(new placeCard(noMatchFilterResponse[0]));
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
};
var ViewModel = function() {
var self = this;
};
/** ---------- filter functions --------------------- **/
function prepMap() {
clearAllMarkers(); // empty current markers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function flipCards() {
resultList(resultList().reverse());
prepMap();
}
function sortABC() {
resultList(resultList().sort(function(left, right) {
return left.name() == right.name() ? 0 : (left.name() < right.name() ? -1 : 1);
}));
prepMap();
}
function sortStars() {
resultList(resultList().sort(function(left, right) {
return left.stars.count() == right.stars.count() ? 0 : (left.stars.count() < right.stars.count() ? -1 : 1);
}));
resultList(resultList().reverse());
prepMap();
}
function resetList() {
resultList(originalList());
prepMap();
}
ko.applyBindings(new ViewModel());
/*
* ----------------------------------------------------
* The following functions handle requests to Yelp
* ----------------------------------------------------
*/
function yelpAjax(searchFor, searchNear) {
/*
* Keys and other tokens needed to access the Yelp API via OAuth
* In a non-Udacious scenario this would have to be moved
* to a server side script and therefore actually be "secret"
*/
var auth = {
consumerKey: "2M-JWI9l8UBCt3vm0R6vZg",
consumerSecret: "2TIm_ve4y6unTQR2D1HGnWTjFOM",
accessToken: "p44DAD9S6MecSv66hmrdR3qdJZhVkg7o",
accessTokenSecret: "rhnGNKjrDKMLZT0aRET8qIA-aWQ",
serviceProvider: {
signatureMethod: "HMAC-SHA1" // found here https://www.yelp.com/developers/documentation/v2/authentication
}
};
/*
* Grab the "secret" part of the auth keys and put them in an object
* that will then be passed on to the coming OAuth.SignatureMethod
*/
var accessor = {
consumerSecret: auth.consumerSecret,
tokenSecret: auth.accessTokenSecret
};
/*
* Create an array of parameters to handoff to message object that follows
* This helps keep things more bite-sized...
*/
var parameters = [
['term', searchFor],
['location', searchNear],
['callback', 'cb'],
['sort', 2], // '2' sorts results by rating
['limit', 15], // limits results to top 15
['oauth_consumer_key', auth.consumerKey],
['oauth_consumer_secret', auth.consumerSecret],
['oauth_token', auth.accessToken],
['oauth_signature_method', auth.serviceProvider.signatureMethod]
];
/*
* This message object is to be fi#EE8060 to Yelp as part of then
* OAuth.setTimestampAndNonce TODO: someday make this server-side
*/
var message = {
'action': 'http://api.yelp.com/v2/search',
'method': 'GET',
'parameters': parameters
};
/*
* Vitrually sign and send things as part of OAuth JS Magic
*/
OAuth.setTimestampAndNonce(message);
OAuth.SignatureMethod.sign(message, accessor);
var parameterMap = OAuth.getParameterMap(message.parameters);
yJax(message.action, parameterMap);
}
/*
* Ajax OAuth method GETs data from Yelp API
*/
function yJax(url, yData) {
$.ajax({
'timeout': 3000,
'type': 'GET',
'url': url,
'data': yData,
'dataType': 'jsonp',
'global': true,
'cache': true,
'jsonpCallback': 'cb',
'success': function(data) {
makeYelpList(data);
},
'error': function() {
makeErrorList();
alert("oh no! the yelp request failed. Please try again later.");
},
});
}
/*
* --------------------------------------------------------
* Changes out the resultList with a new yelp results
* --------------------------------------------------------
*/
function makeYelpList(d) {
response = d.businesses; // push ajax response to the global var 'response'
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
originalList.removeAll();
clearAllMarkers(); // clears marker array
/* --- Display the search results --- */
response.forEach(function(place) { // place cards into observables
resultList.push(new placeCard(place));
originalList.push(new placeCard(place));
});
scrollingTriggersMarkers(); // activate scroll position monitor triggers
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
}
function makeErrorList() | {
/* --- Clean up the old lists --- */
resultList.removeAll(); // empty the resultList
clearAllMarkers(); // clears marker array
/* --- Display the error message + Beacon --- */
errorResponses.forEach(function(place) {
resultList.push(new placeCard(place));
});
/* --- clean up the view --- */
initMap(); // refresh and reconstruct map
OpenInfowindowForMarker(0); // open first infoWindow
forceTop(); // ensure DOM is scrolled to top
} | identifier_body |
|
app-engine.js | mzvonWnDvII96o0Zx8Z--i64lArA';
/* --- var to track data set --- */
var currentMarkers = [];
/* --- clear currentMarkers set --- */
function clearAllMarkers() {
currentMarkers = [];
}
/*
* ---------------------------------------------------------------
* build the map and place markers, listeners and triggers
* ---------------------------------------------------------------
*/
var map;
function initMap() {
// Create a map object and specify the DOM element for display.
map = new google.maps.Map(document.getElementById('map'), {
center: {
lat: response[0].location.coordinate.latitude - mapShift.right,
lng: response[0].location.coordinate.longitude - mapShift.up
},
scrollwheel: false,
zoom: 12,
mapTypeControl: false
});
// set markers with placeholding copy
setMarkers(map, resultList());
infowindow = new google.maps.InfoWindow({
content: "loading..."
});
reformatOnSize(); // for map
$('#map').css('position: absolute');
forceTop();
}
/* --- define icons used for markers --- */
var markerIcon = {
active: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#FFF',
fillOpacity: 0.8,
strokeWeight: 4,
strokeColor: '#0BA',
scale: 2.5,
},
resting: {
path: "M10,0.5c2.7-0.1,6.6,1.8,7.1,7c0.4,5.2-7.1,11.6-7.1,11.6l0,0c0,0-7.5-6.4-7.1-11.6C3.4,2.3,7.2,0.5,10,0.5",
fillColor: '#EE8060',
fillOpacity: 0.6,
strokeWeight: 4,
strokeColor: '#fff',
scale: 2.5,
}
};
/* --- define mapShift to ensure the markers are ----
--- staged on the right side of the view window --- */
var mapShift = {
right: 0.08,
up: 0.04
};
/*
* -----------------------------------------------------------------------------
* Loop through the markers, place them on the map with needed functionality
* ------------------------------------------------------------------------------
*/
function setMarkers(map, points) {
/* --- function needed for cleaning up infowindows --- */
function hideInfoWindowCloseControl() {
// $(".gm-style-iw").next("div").css('display', 'none'); // this function gets rid of close btn in infowindows
// udacity doesn't like it for this project so having an x is fine
}
/* --- function gives all markers resting icon and a base layering --- */
function resetMarkerIcons() {
for (var i = 0; i < currentMarkers.length; i++) {
currentMarkers[i].setIcon(markerIcon.resting);
currentMarkers[i].setZIndex(4);
currentMarkers[i].setAnimation(null); // turn BOUNCE Animation off
}
}
/* --- loop through placeCards and extract marker-related pieces --- */
for (var point in points) {
var place = points[point];
var siteLatLng = new google.maps.LatLng(place.location.coordinate.latitude, place.location.coordinate.longitude);
var marker = new google.maps.Marker({
position: siteLatLng,
map: map,
clickable: true,
animation: google.maps.Animation.DROP, // TODO change to something else?
icon: markerIcon.resting,
title: place.marker.title,
phone: place.marker.phone,
imgSrc: place.marker.imgSrc,
description: place.marker.description,
lat: place.location.coordinate.latitude,
lng: place.location.coordinate.longitude,
idSelector: place.marker.idSelector,
stars: place.marker.stars,
windowContent: '<div class="infowindow-title">' + place.marker.title + '</div><br/><img style="max-width: 96px; height: auto" src="' + place.marker.stars + '"></img>',
});
/* --- push marker to currentMarkers --- */
currentMarkers.push(marker);
/*
* -------------------------------------------------------
* event listeners for markers that set up infowindows
* -------------------------------------------------------
*/
/* --- click --- */
google.maps.event.addListener(marker, "click", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
this.setAnimation(google.maps.Animation.BOUNCE); // bounce on click
hideInfoWindowCloseControl(); // hide infoWindow close control
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
$('html, body').animate({
scrollTop: $(this.idSelector).offset().top - (20 + scrollAdjustment)
}, 100); // scroll to active placeCard in the DOM
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- mouseover --- */
google.maps.event.addListener(marker, "mouseover", function(event) {
resetMarkerIcons(); // put other markers back to resting state
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
/* --- doubleclick (used for DOM scroll trigger) --- */
google.maps.event.addListener(marker, "dblclick", function(event) {
resetMarkerIcons(); // put other markers back to resting state
map.panTo({
lat: (this.lat - mapShift.up),
lng: (this.lng - mapShift.right)
}); // center map to marker with shift for search
infowindow.setContent(this.windowContent); // set infowindow Content
infowindow.open(map, this); // open the infowindow
hideInfoWindowCloseControl(); // hide infoWindow close control
this.setIcon(markerIcon.active); // change icon to active
this.setZIndex(5); // bring marker to top layer
});
}
forceTop(); // scroll DOM back to top
}
/* ------------------------------------------------------------------------
* Section monitors the DOM scrolling and trigger events when appropriate
* ------------------------------------------------------------------------
*/
/* --- pull trigger for a specific marker --- */
function OpenInfowindowForMarker(index) {
google.maps.event.trigger(currentMarkers[index], 'dblclick');
}
function openMarker(index) {
OpenInfowindowForMarker(index);
}
/* --- compare window scroll count to offsets of each placeCard and ---
--- trigger the appropriate marker as the card passes through view --- */
var scrollAdjustment = 0; // zero on standard desktop view
function scrollingTriggersMarkers() {
$(window).scroll(function() { // as user scrolls
var pixelsScrolled = $(window).scrollTop() + scrollAdjustment; // store distance scrolled
for (var resultCard in resultList()) { // for each placeCard
var resultOffset = $(resultList()[resultCard].idSelector()).offset().top; // store the offset of the card
if (resultOffset - pixelsScrolled < 60 && resultOffset - pixelsScrolled > -60) { // check if two distances are close
OpenInfowindowForMarker(resultCard); // open Infowindow for placeCard being viewed in DOM
}
}
});
}
/** ----------------------------------------------------------------------------
* Handles changing mapShift vars in responsive manner using matchMedia
* ----------------------------------------------------------------------------
*/
function reformatOnSize() {
if (window.matchMedia("(min-width: 680px)").matches) { // for "big" screen
mapShift = {
right: 0.08,
up: 0.04
};
scrollAdjustment = 0;
map.setZoom(12);
$('#map').removeClass("fixed");
} else if (window.matchMedia("(orientation: portrait)").matches) { // small screen portrait
mapShift = {
right: -0.01,
up: 0.01
};
scrollAdjustment = 260;
map.setZoom(11);
}
else { // small screen landscape
mapShift = {
right: 0.09, | up: 0
}; | random_line_split |
|
aml.py |
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else: | linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name | c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1 | random_line_split |
aml.py | def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
|
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name | for parent in node.parents:
fullLink(child, parent) | conditional_block |
aml.py | (node, dual):
node.dual = dual
dual.dual = node
def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = | linkDuals | identifier_name |
|
aml.py | def fullLink(child, parent):
linkNodes(child, parent)
linkNodes(parent.dual, child.dual)
def deleteNode(node, layers, layer, dlayer):
for parent in node.parents:
parent.removeChild(node)
parent.dual.removeParent(node)
for child in node.children:
child.removeParent(node)
child.dual.removeChild(node)
layers[layer].discard(node)
if dlayer:
layers[dlayer].discard(node.dual)
for child in node.children:
for parent in node.parents:
fullLink(child, parent)
return layers
def getLower(node):
lowerset = set([node])
for child in node.children:
lowerset = lowerset.union(getLower(child))
return lowerset
def getConstrainedLower(node, layer):
return getLower(node).intersection(layer)
def getUpper(node):
upperset = set()
for parent in node.parents:
upperset = upperset.union(getLower(parent))
return upperset
def getConstrainedUpper(node, layer):
return getUpper(node).intersection(layer)
def getTrace(node, layers):
atoms = getConstrainedLower(node, layers["atoms"])
trace = layers["dualAtoms"]
for atom in atoms:
trace = trace.intersection(getLower(atom.dual))
return trace
def traceConstraint(a, b, layers):
return getTrace(b, layers).issubset(getTrace(a, layers))
def findStronglyDiscriminantCoefficient(a, b, graph):
Omega = set(map(lambda c: c.dual, getConstrainedLower(a, graph["constants"])))
U = getTrace(b, layers)
while U:
zeta = U.pop()
T = Omega.difference(getUpper(zeta))
if T:
c = T.pop()
return c.dual
return None
# Algorithm 1
def enforceNegativeTraceConstraints(graph):
# Preprocessing Step
count = 1
for neg in graph["nterms"]:
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, neg.dual)
graph["dualAtoms"].add(zeta)
count += 1
# Main Algorithm
for neg in graph["nterms"]:
if not traceConstraint(graph["$"], neg, graph):
continue
c = None
while not c:
c = findStronglyDiscriminantCoefficient(graph["$"], neg, graph)
if not c:
h = getConstrainedLower(neg.dual, graph["terms*"]).difference(getLower(graph["$"].dual)).pop()
zeta = Node(name="zeta"+str(count))
linkNodes(zeta, h.dual)
graph["dualAtoms"].add(zeta)
count += 1
phi = Node(name="phi")
dphi = Node(name="[phi]")
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
# Algorithm 2
def enforcePositiveTraceContraints(graph):
phiCount = 1
for pos in graph["pterms"]:
while not traceConstraint(graph["$"], pos, graph):
zeta = getTrace(pos, graph).difference(getTrace(graph["$"], graph)).pop()
Gamma = set()
for c in getConstrainedLower(pos, graph["constants"]):
if zeta not in getLower(c.dual):
Gamma.add(c)
if not Gamma:
linkNodes(zeta, graph["$"])
else:
c = Gamma.pop()
phi = Node(name="phi"+str(phiCount))
dphi = Node(name="[phi"+str(phiCount)+"]")
phiCount += 1
linkDuals(phi, dphi)
fullLink(phi, c)
graph["atoms"].add(phi)
graph["atoms*"].add(dphi)
return
# Algorithm 3
def sparseCrossing(a, b, graph, count):
psiCount = 1
A = getConstrainedLower(a, graph["atoms"]).difference(getLower(b))
U = set()
for phi in A:
U = set()
B = getConstrainedLower(b, graph["atoms"])
Delta = graph["dualAtoms"].difference(getLower(phi.dual))
flag = True
while Delta or flag:
epsilon = B.pop()
DeltaP = Delta.intersection(getLower(epsilon.dual))
if not Delta or (not Delta.issubset(DeltaP) or not DeltaP.issubset(Delta)):
psi = Node(name="psi" + str(psiCount))
dpsi = Node(name="[psi"+ str(psiCount)+"]")
linkDuals(psi, dpsi)
fullLink(psi, phi)
fullLink(psi, epsilon)
graph["atoms"].add(psi)
graph["atoms*"].add(dpsi)
Delta = DeltaP
U.add(epsilon)
psiCount += 1
flag = False
ecount = 1
for epsilon in U:
epsilonp = Node("epsilon'"+str(ecount))
depsilonp = Node("[epsilon'"+str(ecount)+"]")
linkDuals(epsilonp, depsilonp)
fullLink(epsilonp, epsilon)
graph["atoms"].add(epsilonp)
graph["atoms*"].add(depsilonp)
ecount += 1
#for node in A.union(U):
# deleteNode(atom, graph, "atoms", "atoms*")
Joe = set()
for atom in graph["atoms"]:
if atom.children:
Joe.add(atom)
for atom in Joe:
deleteNode(atom, graph, "atoms", "atoms*")
return graph
def cross(graph):
count = 1
for pos in graph["pterms"]:
sparseCrossing(graph["$"], pos, graph, count)
count += 1
return
# Algorithm 4
def reduceAtoms(graph):
Q = set()
Lambda = graph["constants"]
while Lambda:
c = Lambda.pop()
Sc = Q.intersection(getLower(c))
Wc = graph["dualAtoms"]
if Sc:
for phi in Sc:
Wc = Wc.intersection(getConstrainedLower(phi, graph["dualAtoms"]))
Phic = set([x.dual for x in getConstrainedLower(c, graph["atoms"])])
T = getTrace(c, graph)
count = 0
while (not T.issubset(Wc) or not Wc.issubset(T)) and Wc:
eta = Wc.difference(T).pop()
temp = Phic.difference(getUpper(eta))
phi = list(temp)[count%len(temp)].dual
count += 1
Q.add(phi)
Wc = Wc.intersection(getConstrainedLower(phi.dual, graph["dualAtoms"]))
for atom in graph["atoms"].difference(Q.union([graph["Base"]])):
deleteNode(atom, graph, "atoms", "atoms*")
# Observe Atoms
def obeserveAtoms(graph):
importantAtoms = graph["$"].children
importantAtoms.discard(graph["Base"])
for atom in importantAtoms:
pset = atom.parents
print(" or ".join(map(lambda x: x.name, pset.difference(set([graph["$"]])))))
def verifyTraceConstraints(graph):
for neg in graph["nterms"]:
if traceConstraint(graph["$"], neg, graph):
return False
for pos in graph["pterms"]:
if not traceConstraint(graph["$"], pos, graph):
return False
return True
def readData(file):
|
def classify(dataList, labels, graph):
consts = {}
for const in graph["constants"]:
consts[const.name] = const.children
atoms = set()
for i in range(1, len(dataList)):
attr = labels[i] + "_" + dataList[i]
if attr in consts:
atoms = atoms.union(consts[attr])
return graph["$"].children.issubset(atoms)
def createGraph(labels, records):
consts = {}
layers = {"pterms":set(), "nterms":set(), "constants":set(),
"atoms":set(), "terms*":set(), "constants*":set(),
"atoms*":set(), "dualAtoms":set(), "$":None, "Base":None}
consts["$"] = Node(name="$")
cdual = Node(name="[$]")
linkDuals(cdual, consts["$"])
zero = Node(name="0")
zdual = Node(name="[0]")
linkDuals(zero, zdual)
dualBase = Node(name="0*")
for record in records:
term = Node(name=record[0])
dualTerm = Node(name="["+record[0]+"]")
linkDuals(term, dualTerm)
for i in range(1, len(record)-1):
name = labels[i] + "_" + record[i]
if name in consts:
fullLink(consts[name | with open(file, 'r') as data:
labels = list(map(lambda x: x.strip(), data.readline().split(",")))
records = []
for line in data:
records.append(list(map(lambda x: x.strip(),line.split(","))))
return labels, records | identifier_body |
table.go | .Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) renderHelpWindow() error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply") | s.helpwin.MovePrint(17, 1, "<p> to sort by price") | random_line_split |
|
table.go | ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) renderHelpWindow() error {
if !s.helpVisible {
if s.helpwin != nil {
s.helpwin.ClearOk(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(6))
s.helpwin.ColorOn(6)
s.helpwin.Resize(0, 0)
s.helpwin.MoveWindow(200, 200)
s.helpwin.Refresh()
s.renderMenu()
}
return nil
}
var err error
if s.helpwin == nil {
s.helpwin, err = gc.NewWindow(21, 40, (s.screenRows/2)-11, (s.screenCols/2)-20)
if err != nil {
return err
}
}
s.helpwin.Keypad(true)
s.helpwin.Clear()
s.helpwin.SetBackground(gc.ColorPair(1))
s.helpwin.ColorOn(1)
s.helpwin.Resize(11, 40)
s.helpwin.MoveWindow((s.screenRows/2)-11, (s.screenCols/2)-20)
s.helpwin.Box(0, 0)
s.helpwin.MovePrint(0, 1, "Help")
s.helpwin.MovePrint(1, 1, "<up> or <k> to navigate up")
s.helpwin.MovePrint(2, 1, "<down> or <j> to navigate down")
s.helpwin.MovePrint(3, 1, "<ctrl-u> to to page up")
s.helpwin.MovePrint(4, 1, "<ctrl-d> to to page down")
s.helpwin.MovePrint(5, 1, "<enter> or <space> to open coin link")
s.helpwin.MovePrint(6, 1, "<1> to sort by 1 hour change")
s.helpwin.MovePrint(7, 1, "<2> to sort by 24 hour volume")
s.helpwin.MovePrint(8, 1, "<7> to sort by 7 day change")
s.helpwin.MovePrint(9, 1, "<a> to sort by available supply")
s.helpwin.MovePrint(10, 1, "<h> or <?> to toggle help")
s.helpwin.MovePrint(11, 1, "<l> to sort by last updated")
s.helpwin.MovePrint(12, 1, "<m> to sort by market cap")
s.helpwin.MovePrint(13, 1, "<n> to sort by name")
s.helpwin.MovePrint(14, 1, "<r> to sort by rank")
s.helpwin.MovePrint(15, 1, "<s> to sort by symbol")
s.helpwin.MovePrint(16, 1, "<t> to sort by total supply")
s.helpwin.MovePrint(17, 1, "<p> to sort by price")
s.helpwin.MovePrint(18, 1, "<v> to sort by 24 hour volume")
s.helpwin.MovePrint(19, 1, "<q> or <esc> to quit application.")
s.helpwin.Refresh()
return nil
}
// OnWindowResize sends event to channel when resize event occurs
func (s *Service) onWindowResize(channel chan os.Signal) {
//stdScr, _ := gc.Init()
//stdScr.ScrollOk(true)
//gc.NewLines(true)
for {
<-channel
//gc.StdScr().Clear()
//rows, cols := gc.StdScr().MaxYX()
cols, rows := GetScreenSize()
s.screenRows = rows
s.screenCols = cols
s.resizeWindows()
//gc.End()
//gc.Update()
//gc.StdScr().Refresh()
}
}
// RenderMenu renders menu
func (s *Service) renderMenu() error | {
s.menuwinWidth = s.screenCols
s.menuwinHeight = s.screenRows - 1
s.menuWidth = s.screenCols
s.menuHeight = s.screenRows - 2
//if len(s.menuItems) == 0 {
items := make([]*gc.MenuItem, len(s.menuData))
var err error
for i, val := range s.menuData {
items[i], err = gc.NewItem(val, "")
if err != nil {
return err
}
//defer items[i].Free()
}
s.menuItems = items
//}
| identifier_body |
|
table.go | str == "106": // "j"
if s.currentItem < len(s.menuItems)-1 {
s.currentItem = s.currentItem + 1
s.menu.Current(s.menuItems[s.currentItem])
}
form.Driver(gc.REQ_NEXT_FIELD)
form.Driver(gc.REQ_END_LINE)
case ch == gc.KEY_UP, chstr == "107": // "k"
if s.currentItem > 0 {
s.currentItem = s.currentItem - 1
s.menu.Current(s.menuItems[s.currentItem])
}
case ch == gc.KEY_RETURN, ch == gc.KEY_ENTER, chstr == "32":
s.menu.Driver(gc.REQ_TOGGLE)
for _, item := range s.menu.Items() {
if item.Value() {
s.handleClick(item.Index())
break
}
}
s.menu.Driver(gc.REQ_TOGGLE)
case chstr == "114": // "r"
s.handleSort("rank", false)
case chstr == "110": // "n"
s.handleSort("name", true)
case chstr == "115": // "s"
s.handleSort("symbol", false)
case chstr == "112": // "p
s.handleSort("price", true)
case chstr == "109": // "m
s.handleSort("marketcap", true)
case chstr == "118": // "v
s.handleSort("24hvolume", true)
case chstr == "49": // "1"
s.handleSort("1hchange", true)
case chstr == "50": // "2"
s.handleSort("24hchange", true)
case chstr == "55": // "7"
s.handleSort("7dchange", true)
case chstr == "116": // "t"
s.handleSort("totalsupply", true)
case chstr == "97": // "a"
s.handleSort("availablesupply", true)
case chstr == "108": // "l"
s.handleSort("lastupdated", true)
case chstr == "21": // ctrl-u
s.currentItem = s.currentItem - s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case fmt.Sprint(ch) == "4": // ctrl-d
s.currentItem = s.currentItem + s.menuHeight
if s.currentItem < 0 {
s.currentItem = 0
}
if s.currentItem >= len(s.menuItems) {
s.currentItem = len(s.menuItems) - 1
}
//s.log(fmt.Sprintf("%v %v", s.currentItem, s.screenRows))
s.menu.Current(s.menuItems[s.currentItem])
case chstr == "104", chstr == "63": // "h", "?"
s.toggleHelp()
case chstr == "3", chstr == "113", chstr == "27": // ctrl-c, "q", esc
if s.helpVisible && chstr == "27" {
s.toggleHelp()
} else {
// quit
return nil
}
default:
s.menu.Driver(gc.DriverActions[ch])
}
}
}
func (s *Service) fetchData() error {
coins, err := cmc.GetAllCoinData(int(s.limit))
if err != nil {
return err
}
s.coins = []*cmc.Coin{}
for i := range coins {
coin := coins[i]
s.coins = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name | else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue | {
s.sortDesc = !s.sortDesc
} | conditional_block |
table.go | = append(s.coins, &coin)
}
return nil
}
func (s *Service) handleClick(idx int) {
slug := strings.ToLower(strings.Replace(s.coins[idx].Name, " ", "-", -1))
exec.Command("open", fmt.Sprintf("https://coinmarketcap.com/currencies/%s", slug)).Output()
}
func (s *Service) handleSort(name string, desc bool) {
if s.sortBy == name {
s.sortDesc = !s.sortDesc
} else {
s.sortBy = name
s.sortDesc = desc
}
s.setMenuData()
err := s.renderMenu()
if err != nil {
panic(err)
}
}
func (s *Service) setMenuData() {
slice.Sort(s.coins[:], func(i, j int) bool {
if s.sortDesc == true {
i, j = j, i
}
switch s.sortBy {
case "rank":
return s.coins[i].Rank < s.coins[j].Rank
case "name":
return s.coins[i].Name < s.coins[j].Name
case "symbol":
return s.coins[i].Symbol < s.coins[j].Symbol
case "price":
return s.coins[i].PriceUsd < s.coins[j].PriceUsd
case "marketcap":
return s.coins[i].MarketCapUsd < s.coins[j].MarketCapUsd
case "24hvolume":
return s.coins[i].Usd24hVolume < s.coins[j].Usd24hVolume
case "1hchange":
return s.coins[i].PercentChange1h < s.coins[j].PercentChange1h
case "24hchange":
return s.coins[i].PercentChange24h < s.coins[j].PercentChange24h
case "7dchange":
return s.coins[i].PercentChange7d < s.coins[j].PercentChange7d
case "totalsupply":
return s.coins[i].TotalSupply < s.coins[j].TotalSupply
case "availablesupply":
return s.coins[i].AvailableSupply < s.coins[j].AvailableSupply
case "lastupdated":
return s.coins[i].LastUpdated < s.coins[j].LastUpdated
default:
return s.coins[i].Rank < s.coins[j].Rank
}
})
var menuData []string
for _, coin := range s.coins {
unix, _ := strconv.ParseInt(coin.LastUpdated, 10, 64)
lastUpdated := time.Unix(unix, 0).Format("15:04:05 Jan 02")
fields := []string{
pad.Right(fmt.Sprint(coin.Rank), 4, " "),
pad.Right(coin.Name, 22, " "),
pad.Right(coin.Symbol, 6, " "),
pad.Left(humanize.Commaf(coin.PriceUsd), 12, " "),
pad.Left(humanize.Commaf(coin.MarketCapUsd), 17, " "),
pad.Left(humanize.Commaf(coin.Usd24hVolume), 15, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange1h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange24h), 9, " "),
pad.Left(fmt.Sprintf("%.2f%%", coin.PercentChange7d), 9, " "),
pad.Left(humanize.Commaf(coin.TotalSupply), 20, " "),
pad.Left(humanize.Commaf(coin.AvailableSupply), 18, " "),
pad.Left(fmt.Sprintf("%s", lastUpdated), 18, " "),
// add %percent of cap
}
var str string
for _, f := range fields {
str = fmt.Sprintf("%s%s", str, f)
}
menuData = append(menuData, str)
}
s.menuData = menuData
headers := []string{
pad.Right("[r]ank", 13, " "),
pad.Right("[n]ame", 13, " "),
pad.Right("[s]ymbol", 8, " "),
pad.Left("[p]rice", 10, " "),
pad.Left("[m]arket cap", 17, " "),
pad.Left("24H [v]olume", 15, " "),
pad.Left("[1]H%", 9, " "),
pad.Left("[2]4H%", 9, " "),
pad.Left("[7]D%", 9, " "),
pad.Left("[t]otal supply", 20, " "),
pad.Left("[a]vailable supply", 19, " "),
pad.Left("[l]ast updated", 17, " "),
}
header := ""
for _, h := range headers {
header = fmt.Sprintf("%s%s", header, h)
}
s.menuHeader = header
}
// SetColorPairs sets color pairs
func (s *Service) setColorPairs() {
switch s.primaryColor {
case "green":
gc.InitPair(1, gc.C_GREEN, gc.C_BLACK)
case "cyan", "blue":
gc.InitPair(1, gc.C_CYAN, gc.C_BLACK)
case "magenta", "pink", "purple":
gc.InitPair(1, gc.C_MAGENTA, gc.C_BLACK)
case "white":
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
case "red":
gc.InitPair(1, gc.C_RED, gc.C_BLACK)
case "yellow", "orange":
gc.InitPair(1, gc.C_YELLOW, gc.C_BLACK)
default:
gc.InitPair(1, gc.C_WHITE, gc.C_BLACK)
}
gc.InitPair(2, gc.C_BLACK, gc.C_BLACK)
gc.InitPair(3, gc.C_BLACK, gc.C_GREEN)
gc.InitPair(4, gc.C_BLACK, gc.C_CYAN)
gc.InitPair(5, gc.C_WHITE, gc.C_BLUE)
gc.InitPair(6, gc.C_BLACK, -1)
}
// RenderMainWindow renders main window
func (s *Service) renderMainWindow() error {
if s.mainwin == nil {
var err error
s.mainwin, err = gc.NewWindow(s.screenRows, s.screenCols, 0, 0)
if err != nil {
return err
}
}
s.mainwin.Clear()
s.mainwin.ColorOn(2)
s.mainwin.MoveWindow(0, 0)
s.mainwin.Resize(s.screenRows, s.screenCols)
s.mainwin.Box(0, 0)
s.mainwin.Refresh()
return nil
}
// ResizeWindows resizes windows
func (s *Service) resizeWindows() {
gc.ResizeTerm(s.screenRows, s.screenCols)
//s.log(fmt.Sprintf("%v %v", s.screenCols, s.screenRows))
s.renderMainWindow()
s.renderMenu()
s.renderHelpBar()
s.renderLogWindow()
s.renderHelpWindow()
}
func (s *Service) renderHelpBar() error {
var err error
if s.helpbarwin == nil {
s.helpbarwin, err = gc.NewWindow(1, s.screenCols, s.screenRows-1, 0)
if err != nil {
return err
}
}
s.helpbarwin.Clear()
s.helpbarwin.Resize(1, s.screenCols)
s.helpbarwin.MoveWindow(s.screenRows-1, 0)
s.helpbarwin.ColorOn(2)
s.helpbarwin.Box(0, 0)
s.helpbarwin.ColorOff(2)
s.helpbarwin.ColorOn(1)
s.helpbarwin.MovePrint(0, 0, "[q]uit [h]elp")
s.helpbarwin.ColorOff(1)
s.helpbarwin.Refresh()
return nil
}
func (s *Service) renderLogWindow() error {
var err error
if s.logwin == nil {
s.logwin, err = gc.NewWindow(1, 20, s.screenRows-1, s.screenCols-20)
if err != nil {
return err
}
}
s.logwin.Clear()
s.logwin.Resize(1, 20)
s.logwin.MoveWindow(s.screenRows-1, s.screenCols-20)
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, s.lastLog)
s.logwin.ColorOff(1)
s.logwin.Refresh()
return nil
}
// Log logs debug messages
func (s *Service) log(msg string) {
s.lastLog = msg
s.logwin.Clear()
s.logwin.ColorOn(2)
s.logwin.Box(0, 0)
s.logwin.ColorOff(2)
s.logwin.ColorOn(1)
s.logwin.MovePrint(0, 0, msg)
s.logwin.ColorOff(1)
s.logwin.Refresh()
}
func (s *Service) toggleHelp() {
s.helpVisible = !s.helpVisible
s.renderHelpWindow()
}
func (s *Service) | renderHelpWindow | identifier_name |
|
cli.rs | use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) | else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint | {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} | conditional_block |
cli.rs | use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn | (origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint | is_origin_in_cache | identifier_name |
cli.rs | : &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
}
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold().paint("No"),
White.paint("/quit]"))
}
}
None => format!("{}", White.paint("[yes/no/quit]")),
};
loop {
try!(io::stdout().flush());
print!("{} {} ", Cyan.paint(question), choice);
try!(io::stdout().flush()); | let mut response = String::new(); | random_line_split |
|
cli.rs | use std::path::Path;
use std::process;
use ansi_term::Colour::{Cyan, Green, White};
use hcore::crypto::SigKeyPair;
use hcore::env;
use analytics;
use command;
use config;
use error::Result;
pub fn start(cache_path: &Path, analytics_path: &Path) -> Result<()> {
let mut generated_origin = false;
println!("");
title("Habitat CLI Setup");
para("Welcome to hab setup. Let's get started.");
heading("Set up a default origin");
para("Every package in Habitat belongs to an origin, which indicates the person or \
organization responsible for maintaining that package. Each origin also has \
a key used to cryptographically sign packages in that origin.");
para("Selecting a default origin tells package building operations such as 'hab pkg \
build' what key should be used to sign the packages produced. If you do not \
set a default origin now, you will have to tell package building commands each \
time what origin to use.");
para("For more information on origins and how they are used in building packages, \
please consult the docs at https://www.habitat.sh/docs/create-packages-overview/");
if try!(ask_default_origin()) {
println!("");
para("Enter the name of your origin. If you plan to publish your packages publicly, \
we recommend that you select one that is not already in use on the Habitat \
build service found at https://app.habitat.sh/.");
let origin = try!(prompt_origin());
try!(write_cli_config_origin(&origin));
println!("");
if is_origin_in_cache(&origin, cache_path) {
para(&format!("You already have an origin key for {} created and installed. \
Great work!",
&origin));
} else {
heading("Create origin key pair");
para(&format!("It doesn't look like you have a signing key for the origin `{}'. \
Without it, you won't be able to build new packages successfully.",
&origin));
para("You can either create a new signing key now, or, if you are building \
packages for an origin that already exists, ask the owner to give you the \
signing key.");
para("For more information on the use of origin keys, please consult the \
documentation at https://www.habitat.sh/docs/concepts-keys/#origin-keys");
if try!(ask_create_origin(&origin)) {
try!(create_origin(&origin, cache_path));
generated_origin = true;
} else {
para(&format!("You might want to create an origin key later with: `hab \
origin key generate {}'",
&origin));
}
}
} else {
para("Okay, maybe another time.");
}
heading("GitHub Access Token");
para("While you can build and run Habitat packages without sharing them on the public \
depot, doing so allows you to collaborate with the Habitat community. In addition, \
it is how you can perform continuous deployment with Habitat.");
para("The depot uses GitHub authentication with an access token \
(https://help.github.com/articles/creating-an-access-token-for-command-line-use/).");
para("If you would like to share your packages on the depot, please enter your GitHub \
access token. Otherwise, just enter No.");
para("For more information on sharing packages on the depot, please read the \
documentation at https://www.habitat.sh/docs/share-packages-overview/");
if try!(ask_default_auth_token()) {
println!("");
para("Enter your GitHub access token.");
let auth_token = try!(prompt_auth_token());
try!(write_cli_config_auth_token(&auth_token));
} else {
para("Okay, maybe another time.");
}
heading("Analytics");
para("The `hab` command-line tool will optionally send anonymous usage data to Habitat's \
Google Analytics account. This is a strictly opt-in activity and no tracking will \
occur unless you respond affirmatively to the question below.");
para("We collect this data to help improve Habitat's user experience. For example, we \
would like to know the category of tasks users are performing, and which ones they \
are having trouble with (e.g. mistyping command line arguments).");
para("To see what kinds of data are sent and how they are anonymized, please read more \
about our analytics here: https://www.habitat.sh/docs/about-analytics/");
if try!(ask_enable_analytics(analytics_path)) {
try!(opt_in_analytics(analytics_path, generated_origin));
} else {
try!(opt_out_analytics(analytics_path));
}
heading("CLI Setup Complete");
para("That's all for now. Thanks for using Habitat!");
Ok(())
}
fn ask_default_origin() -> Result<bool> {
prompt_yes_no("Set up a default origin?", Some(true))
}
fn ask_create_origin(origin: &str) -> Result<bool> {
prompt_yes_no(&format!("Create an origin key for `{}'?", origin),
Some(true))
}
fn write_cli_config_origin(origin: &str) -> Result<()> {
let mut config = try!(config::load());
config.origin = Some(origin.to_string());
config::save(&config)
}
fn write_cli_config_auth_token(auth_token: &str) -> Result<()> {
let mut config = try!(config::load());
config.auth_token = Some(auth_token.to_string());
config::save(&config)
}
fn is_origin_in_cache(origin: &str, cache_path: &Path) -> bool {
match SigKeyPair::get_latest_pair_for(origin, cache_path) {
Ok(pair) => {
match pair.secret() {
Ok(_) => true,
_ => false,
}
}
_ => false,
}
}
fn create_origin(origin: &str, cache_path: &Path) -> Result<()> {
let result = command::origin::key::generate::start(&origin, cache_path);
println!("");
result
}
fn prompt_origin() -> Result<String> {
let config = try!(config::load());
let default = match config.origin {
Some(o) => {
para(&format!("You already have a default origin set up as `{}', but feel free \
to change it if you wish.",
&o));
Some(o)
}
None => env::var("USER").ok(),
};
prompt_ask("Default origin name", default.as_ref().map(|x| &**x))
}
fn ask_default_auth_token() -> Result<bool> {
prompt_yes_no("Set up a default GitHub access token?", Some(true))
}
fn prompt_auth_token() -> Result<String> {
let config = try!(config::load());
let default = match config.auth_token {
Some(o) => {
para("You already have a default auth token set up, but feel free to change it \
if you wish.");
Some(o)
}
None => None,
};
prompt_ask("GitHub access token", default.as_ref().map(|x| &**x))
}
fn ask_enable_analytics(analytics_path: &Path) -> Result<bool> |
fn opt_in_analytics(analytics_path: &Path, generated_origin: bool) -> Result<()> {
let result = analytics::opt_in(analytics_path, generated_origin);
println!("");
result
}
fn opt_out_analytics(analytics_path: &Path) -> Result<()> {
let result = analytics::opt_out(analytics_path);
println!("");
result
}
fn title(text: &str) {
println!("{}", Green.bold().paint(text));
println!("{}\n",
Green.bold().paint(format!("{:=<width$}", "", width = text.chars().count())));
}
fn heading(text: &str) {
println!("{}\n", Green.bold().paint(text));
}
fn para(text: &str) {
print_wrapped(text, 75, 2)
}
fn print_wrapped(text: &str, wrap_width: usize, left_indent: usize) {
for line in text.split("\n\n") {
let mut buffer = String::new();
let mut width = 0;
for word in line.split_whitespace() {
let wl = word.chars().count();
if (width + wl + 1) > (wrap_width - left_indent) {
println!("{:<width$}{}", " ", buffer, width = left_indent);
buffer.clear();
width = 0;
}
width = width + wl + 1;
buffer.push_str(word);
buffer.push(' ');
}
if !buffer.is_empty() {
println!("{:<width$}{}", " ", buffer, width = left_indent);
}
println!("");
}
}
fn prompt_yes_no(question: &str, default: Option<bool>) -> Result<bool> {
let choice = match default {
Some(yes) => {
if yes {
format!("{}{}{}",
White.paint("["),
White.bold().paint("Yes"),
White.paint("/no/quit]"))
} else {
format!("{}{}{}",
White.paint("[yes/"),
White.bold(). | {
let default = match analytics::is_opted_in(analytics_path) {
Some(val) => Some(val),
None => Some(true),
};
prompt_yes_no("Enable analytics?", default)
} | identifier_body |
main.go | TERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 |
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = d | {
starthash = "0" + starthash
} | conditional_block |
main.go | {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq.NewRedisCliPool(rcfg)
if err != nil {
return err
}
return nil
}
func shutdownServer() | {
log.Info("Datanode stop...")
unRegisterDN(Config, ChordNode, EtcdCliPool)
os.Exit(0)
} | identifier_body |
|
main.go | TERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String())
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";")
Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func | (logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq | initLog | identifier_name |
main.go | TERM, syscall.SIGSTOP, syscall.SIGINT:
shutdownServer()
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}
func initConfig(configFile, entrypoint, starthash string) error {
conf, err := globalconf.NewWithOptions(&globalconf.Options{
Filename: configFile,
})
if err != nil {
return err
}
basicFlagSet := flag.NewFlagSet("basic", flag.PanicOnError)
basicFlagSet.String("bind_ip", "127.0.0.1", "server bind ip")
basicFlagSet.String("workdir", ".", "server working dir")
basicFlagSet.String("log_level", "DEBUG", "log level")
basicFlagSet.String("log_file", "./datanode.log", "log file path")
basicFlagSet.String("pid_file", "./datanode_pid", "pid file")
basicFlagSet.Int("tcp_recvbuf_size", 2048, "tcp receive buffer size")
basicFlagSet.Int("tcp_sendbuf_size", 2048, "tcp send buffer size")
basicFlagSet.Int("tcp_bufio_num", 64, "bufio num for each cache instance")
serfFlagSet := flag.NewFlagSet("serf", flag.PanicOnError)
serfFlagSet.String("bin_path", "/usr/local/bin/serf", "serf bin path")
serfFlagSet.String("node_name", "serf0101", "serf node name")
serfFlagSet.Int("bind_port", 7946, "serf bind port")
serfFlagSet.String("rpc_addr", "127.0.0.1:7373", "serf rpc addr")
serfFlagSet.String("ev_handler", "./serfev_handler.py", "serf event handler")
serfFlagSet.String("log_file", "./serf.log", "serf log file")
chordFlagSet := flag.NewFlagSet("chord", flag.PanicOnError)
chordFlagSet.String("hostname", "chod0101", "chord hostname")
chordFlagSet.Int("bind_port", 5000, "chord bind port")
chordFlagSet.Int("rpc_port", 5500, "chord rpc port")
chordFlagSet.Int("num_vnodes", 16, "chord virtual node numbers")
chordFlagSet.Int("num_successors", 3, "chord successor node numbers")
chordFlagSet.Int("hash_bits", 160, "chord hash bits")
etcdFlagSet := flag.NewFlagSet("etcd", flag.PanicOnError)
etcdFlagSet.String("machines", "http://localhost:4001", "etcd machines")
etcdFlagSet.Int("pool_size", 4, "initial etcd client pool size")
etcdFlagSet.Int("max_pool_size", 64, "max etcd client pool size")
redisFlagSet := flag.NewFlagSet("redis", flag.PanicOnError)
redisFlagSet.String("meta_redis_addr", "tcp@localhost:6379", "meta redis address")
redisFlagSet.String("attr_redis_addr", "tcp@localhost:6479",
"attr redis address list. different group is filtered by ';'")
redisFlagSet.String("max_idle", "50", "redis pool max idle clients")
redisFlagSet.String("max_active", "100", "redis pool max active clients")
redisFlagSet.String("timeout", "3600", "close idle redis client after timeout")
globalconf.Register("basic", basicFlagSet)
globalconf.Register("serf", serfFlagSet)
globalconf.Register("chord", chordFlagSet)
globalconf.Register("etcd", etcdFlagSet)
globalconf.Register("redis", redisFlagSet)
conf.ParseAll()
Config = &SrvConfig{}
Config.BindIP = basicFlagSet.Lookup("bind_ip").Value.String()
Config.Workdir = basicFlagSet.Lookup("workdir").Value.String()
Config.LogLevel = basicFlagSet.Lookup("log_level").Value.String()
Config.LogFile = basicFlagSet.Lookup("log_file").Value.String()
Config.PidFile = basicFlagSet.Lookup("pid_file").Value.String()
Config.TCPRecvBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_recvbuf_size").Value.String())
Config.TCPSendBufSize, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_sendbuf_size").Value.String())
Config.TCPBufioNum, err =
strconv.Atoi(basicFlagSet.Lookup("tcp_bufio_num").Value.String())
Config.TCPBufInsNum = runtime.NumCPU()
Config.HashFunc = sha1.New
Config.SerfBinPath = serfFlagSet.Lookup("bin_path").Value.String()
Config.SerfNodeName = serfFlagSet.Lookup("node_name").Value.String()
Config.SerfBindPort, err =
strconv.Atoi(serfFlagSet.Lookup("bind_port").Value.String())
Config.SerfBindAddr = fmt.Sprintf("0.0.0.0:%d", Config.SerfBindPort)
Config.SerfRPCAddr = serfFlagSet.Lookup("rpc_addr").Value.String()
Config.SerfEvHandler = serfFlagSet.Lookup("ev_handler").Value.String()
Config.SerfLogFile = serfFlagSet.Lookup("log_file").Value.String()
Config.Hostname = chordFlagSet.Lookup("hostname").Value.String()
Config.BindPort, err =
strconv.Atoi(chordFlagSet.Lookup("bind_port").Value.String())
Config.BindAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.BindPort)
Config.RPCPort, err =
strconv.Atoi(chordFlagSet.Lookup("rpc_port").Value.String())
Config.RPCAddr = fmt.Sprintf("%s:%d", Config.BindIP, Config.RPCPort)
Config.NumVnodes, err =
strconv.Atoi(chordFlagSet.Lookup("num_vnodes").Value.String())
Config.NumSuccessors, err =
strconv.Atoi(chordFlagSet.Lookup("num_successors").Value.String())
Config.HashBits, err =
strconv.Atoi(chordFlagSet.Lookup("hash_bits").Value.String())
machines := etcdFlagSet.Lookup("machines").Value.String()
Config.EtcdMachines = strings.Split(machines, ",")
Config.EtcdPoolSize, err =
strconv.Atoi(etcdFlagSet.Lookup("pool_size").Value.String())
Config.EtcdPoolMaxSize, err =
strconv.Atoi(etcdFlagSet.Lookup("max_pool_size").Value.String()) | Config.RedisMaxIdle, err =
strconv.Atoi(redisFlagSet.Lookup("max_idle").Value.String())
Config.RedisMaxActive, err =
strconv.Atoi(redisFlagSet.Lookup("max_active").Value.String())
Config.RedisIdleTimeout, err =
strconv.Atoi(redisFlagSet.Lookup("timeout").Value.String())
Config.Entrypoint = entrypoint
if len(starthash)%2 == 1 {
starthash = "0" + starthash
}
if sh, err := hex.DecodeString(starthash); err != nil {
return err
} else if len(sh) != Config.HashBits/8 {
return fmt.Errorf("error starthash hex string length %d, should be %d",
len(starthash), Config.HashBits/8*2)
} else {
Config.StartHash = sh[:]
}
return nil
}
func initLog(logFile, serfLogFile, logLevel string) error {
var format = logging.MustStringFormatter(
"%{time:2006-01-02 15:04:05.000} [%{level:.4s}] %{id:03x} [%{shortfunc}] %{message}",
)
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
backend1 := logging.NewLogBackend(f, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(dmq.LogLevelMap[logLevel], "")
logging.SetBackend(backend1Leveled)
serfLog, err = os.OpenFile(serfLogFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
return err
}
return nil
}
func initServer() error {
log.Info("Datanode server is starting...")
EtcdCliPool = dmq.NewEtcdClientPool(
Config.EtcdMachines, Config.EtcdPoolSize, Config.EtcdPoolMaxSize)
DispConns = make(map[string]*DispConn)
var err error
CurDispNode, err = allocateDispNode(EtcdCliPool)
if err != nil {
return err
}
rcfg := dmq.NewRedisConfig(Config.MetaRedisAddr, Config.RedisMaxIdle,
Config.RedisMaxActive, Config.RedisIdleTimeout)
MetaRCPool, err = dmq |
Config.MetaRedisAddr = redisFlagSet.Lookup("meta_redis_addr").Value.String()
attrAddrs := redisFlagSet.Lookup("attr_redis_addr").Value.String()
Config.AttrRedisAddrs = strings.Split(attrAddrs, ";") | random_line_split |
hypsometry_plots.py | # sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
| if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd | """
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan | identifier_body |
hypsometry_plots.py | GI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
tifflist.append(t)
# get elevation differences for each elevation bin
for tif in tifflist:
bname = os.path.basename(tif)
# read ddem and mask with selected glaciers
ddem = glacierMask(tif, shapes)
# classify dem to bins
digitized = np.digitize(dem, bins)
# calculate average elevation difference per bin
bin_means = bin_data(bins, ddem, dem, mode='mean', nbinned=False)
# parse column name
colname = 'mu_dh_' + bname[0:12]
# update results
for i, _ in enumerate(bins):
result.loc[result['bins'] == bins[i], colname] = bin_means[i]
# update bins column to integer
result['bins'] = result['bins'].astype(int)
# list for area sum strings
asumstr = []
# add area change to new columns
# loop through dictionary keys and values
for x, y in outlinedict.items():
# store the first four characters (the year) from the filename to variable
year = x[:4]
# check total glacierized area
g_area = sum(y.area) / 1000000
unc_a = g_area * 0.1
print('Area in ' + str(year) + ': {:.2f} ± {:.2f} km2'.format(g_area, unc_a)) #str(round(g_area / 1000000, 3)) + ' km2')
areasum_str = str(year) + ': {:.2f} ± {:.2f} $km^2$'.format(g_area, unc_a)
asumstr.append(areasum_str)
# add column for results
result[str(x[:4])+'Akm2'] = ""
# loop through elevation bins and calculate area altitude difference for each bin
for i in bins:
i = i.astype(int)
# selection by contour range before applying functions
elev_bin = contdis[contdis['low_cont'] == i.astype(str)]
# use function
out = areaDiff(y, elev_bin)
if out is None:
out = 0
# store result to dataframe
result.loc[result['bins'] == i, str(x[:4])+'Akm2'] = out
# calculate area differences (e.g.2016 - 1953 so positive values show area increase and negative decrease)
result['dA53t85'] = result['1985Akm2'] - result['1953Akm2']
result['dA53t16'] = result['2016Akm2'] - result['1953Akm2']
result['dA85t16'] = result['2016Akm2'] - result['1985Akm2']
result = result.dropna(axis=0, how='any')
# figure output
fig_out = r'/Users/apj/Documents/_HY/Greenland/contour/figures/vgridshift/hypsometry_active_surging_glaciers_filled_global_mean.png'
# create hypsometry and area altitude plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
# hypsometry plot
line5385 = axes[0].plot(result['mu_dh_1953_to_1985'], result['bins'], marker='p', color='k', linewidth= 0.9, label='dh 1953 to 1985')
line532016 = axes[0].plot(result['mu_dh_1953_to_2016'], result['bins'], marker='v', color='b', linewidth=0.9, label='dh 1953 to 2016')
line852016 = axes[0].plot(result['mu_dh_1985_to_2016'], result['bins'], marker='s', color='g', linewidth=0.9, label='dh 1985 to 2016')
axes[0].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[0].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[0].axvline(0, color='grey', ls='--')
axes[0].set_ylabel('Elevation bin (m)')
axes[0].set_xlabel('Average elevation difference (m)')
axes[0].legend(loc=2)
axes[0].grid()
# area-altitude plot
area1953 = axes[1].plot(result['1953Akm2'], result['bins'], marker='s', color='k', linewidth= 0.9, label='1953')
area1985 = axes[1].plot(result['1985Akm2'], result['bins'], marker='^', color='#994C00', linewidth=0.9, label='1985')
area2016 = axes[1].plot(result['2016Akm2'], result['bins'], marker='o', color='#006633', linewidth=0.9, label='2016')
axes[1].set_ylim(min(result['bins'])-100, max(result['bins'])+100)
axes[1].set_yticks(np.arange(min(result['bins'])-100, max(result['bins'])+100, 100))
axes[1].axvline(0, color='grey', ls='--')
axes[1].set_ylabel('Elevation bin (m)')
axes[1].set_xlabel('Area altitude distribution ($km^2$)') | axes[1].legend(loc=1) | random_line_split |
|
hypsometry_plots.py | bindata, mode='mean', nbinned=False):
"""
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i in gdf_idlist:
# if i not in testlist:
# print(i)
# check contour columns and elevation ranges
contour.columns
contour['range_cont'].unique()
# exclude NoData from contour ranges
contsel = contour[contour['range_cont'] != '<NoData>']
# dissolve by contour range
contdis = contsel.dissolve(by='range_cont')
contdis = contdis.reset_index()
# read dem
with rio.open(fp_dem) as src:
dem = src.read(1)
dem_nodata = src.nodata
dem[dem == dem_nodata] = np.nan
# get geometries from selected polygons
shapes = gdf.geometry
# bins
bins = getBins(dem, 100)
# dataframe to store results
result = pd.DataFrame(bins, columns=['bins'])
# read tiff files to list
tifflist = []
for t in glob.glob(fp_tiff):
| tifflist.append(t) | conditional_block |
|
hypsometry_plots.py | (outline, elevation_bin):
"""
Function to calculate area in an elevation bin
Parameters
----------
outline : Polygon
Polygon containing outlines.
elevation_bin : Polygon
Polygon containing elevation ranges
contour_range : String
Elevation range to be selected
Returns
-------
elev_range_area_sum : float
Sum of areas from outline polygon inside the elevation bin
"""
# clip outlines by selected elevation range
outline_elev_range = gpd.clip(outline, elevation_bin, keep_geom_type=(True))
# check that clipped dataframe is not empty
if outline_elev_range.empty == True:
return
# compute area in km2
elev_range_area = outline_elev_range.geometry.area / 1000000
# sum areas
elev_range_area_sum = elev_range_area.sum()
return elev_range_area_sum
# function from pybob
def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):
"""
Place data into bins based on a secondary dataset, and calculate statistics on them.
:param bins: array-like structure indicating the bins into which data should be placed.
:param data2bin: data that should be binned.
:param bindata: secondary dataset that decides how data2bin should be binned. Should have same size/shape
as data2bin.
:param mode: How to calculate statistics of binned data. One of 'mean', 'median', 'std', 'max', or 'min'.
:param nbinned: Return a second array, nbinned, with number of data points that fit into each bin.
Default is False.
:type bins: array-like
:type data2bin: array-like
:type bindata: array-like
:type mode: str
:type nbinned: bool
:returns binned, nbinned: calculated, binned data with same size as bins input. If nbinned is True, returns a second
array with the number of inputs for each bin.
"""
assert mode in ['mean', 'median', 'std', 'max', 'min'], "mode not recognized: {}".format(mode)
digitized = np.digitize(bindata, bins)
binned = np.zeros(len(bins)) * np.nan
if nbinned:
numbinned = np.zeros(len(bins))
if mode == 'mean':
for i, _ in enumerate(bins):
binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'median':
for i, _ in enumerate(bins):
binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'std':
for i, _ in enumerate(bins):
binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'max':
for i, _ in enumerate(bins):
binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
elif mode == 'min':
for i, _ in enumerate(bins):
binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])
if nbinned:
numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))
else:
raise ValueError('mode must be mean, median, std, max, or min')
if nbinned:
return np.array(binned), np.array(numbinned)
else:
return np.array(binned)
# function to extract data by mask
def glacierMask(fp_raster, features):
with rasterio.open(fp_raster) as src:
glac_mask, glac_out_transform = rasterio.mask.mask(src, shapes, crop=False)
glac_nodata = src.nodata
masked = glac_mask[0]
masked[(masked == glac_nodata)] = np.nan
return masked
# function to get elevation bins
def getBins(array, bwidth):
# get elev min and max
elev_min = np.nanmin(array)
elev_max = np.nanmax(array)
# define elevation range
erange = elev_max - elev_min
min_el = elev_min - (elev_min % bwidth)
max_el = elev_max + (bwidth - (elev_max % bwidth))
bins = np.arange(min_el, max_el+1, bwidth)
return bins
def excludeByID(excluded_list, in_gdf, id_column):
# select all but ones in the list
selection = in_gdf[~in_gdf[id_column].isin(excluded_list)]
return selection
# read files
gdf = gpd.read_file(fp_diss_outline) # glacier outlines, the ones passing the filter in void fill
checkGeom(gdf)
contour = gpd.read_file(fp_c) # filled contours
#exclude = pd.read_csv(fp_exclude) # glaciers to exclude
# rename columns
#exclude = exclude.rename(columns={'Unnamed: 0': 'index', '0': 'ID'})
#remoce duplicates and drop extra columns
#exclude = exclude.drop_duplicates(subset='ID')
#exclude = exclude.drop(columns=['index'])
# to list
#excludelist = exclude['ID'].tolist()
# glaciers to select
#sel = ['RGI60-05.02328_1']
sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.01987_1', 'RGI60-05.02303', 'RGI60-05.02126'] # observed surges
#sel = ['RGI60-05.02281', 'RGI60-05.02087', 'RGI60-05.02297', 'RGI60-05.02280_1', 'RGI60-05.02309', 'RGI60-05.02328_1', 'RGI60-05.02108', 'RGI60-05.02303', 'RGI60-05.01920', 'RGI60-05.01987_1', 'RGI60-05.02213', 'RGI60-05.02126'] # observed and probable surge
# exclude and select certain glaciers
#gdf = excludeByID(excludelist, gdf, 'RGIId')
# get id's to list, so same selection can be made for edited outlines
gdf_idlist = list(gdf['RGIId'])
#gdf.to_file(fp_diss_outline_exc, driver='ESRI Shapefile')
gdf = gdf[gdf.RGIId.isin(sel)] # subset
#gdf.to_file(fp_surging_outline, driver='ESRI Shapefile')
# read outline dataframes and assign them to dictionary where basename is the key to each dataframe
outlinedict = {} # empty dictionary
for f in glob.glob(fp_outlines):
# get basename
bname = os.path.basename(f)
ol = gpd.read_file(f)
#ol = excludeByID(excludelist, ol, 'RGIId') # exclude certain glaciers
ol = ol[ol.RGIId.isin(gdf_idlist)] # selection based on the id's from the dissolved outlines
ol = ol[ol.RGIId.isin(sel)] # additional dataframe subsetting, ie. for surging glaciers only
# check geometry validity before creating dictionary
for geom in ol.geometry:
if explain_validity(geom) != 'Valid Geometry':
print(bname + ' Geometry has invalid parts')
print(explain_validity(geom))
# add dataframe to dictionary with basename as key
outlinedict[bname] = ol
# test if non-matching id's found
#keyslist = list(outlinedict)
#test = outlinedict.get(keyslist[2])
#testlist = list(test['RGIId'])
#for i | areaDiff | identifier_name |
|
keras_spark_rossmann_estimator.py |
# %% [markdown]
# ## Downloading the Data
#
# We define a task to download the data into a `FlyteDirectory`.
# %%
@task(
cache=True,
cache_version="0.1",
)
def download_data(dataset: str) -> FlyteDirectory:
# create a directory named 'data'
print("==============")
print("Downloading data")
print("==============")
working_dir = flytekit.current_context().working_directory
data_dir = pathlib.Path(os.path.join(working_dir, "data"))
data_dir.mkdir(exist_ok=True)
# download the dataset
download_subprocess = subprocess.run(
[
"curl",
dataset,
],
check=True,
capture_output=True,
)
# untar the data
subprocess.run(
[
"tar",
"-xz",
"-C",
data_dir,
],
input=download_subprocess.stdout,
)
# return the directory populated with Rossmann data files
return FlyteDirectory(path=str(data_dir))
# %% [markdown]
# ## Data Preprocessing
#
# 1. Let's start with cleaning and preparing the Google trend data. We create new 'Date' and 'State' columns using PySpark's `withColumn`. These columns, in addition to other features, will contribute to the prediction of sales.
# %%
def prepare_google_trend(
google_trend_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
google_trend_all = google_trend_csv.withColumn(
"Date", F.regexp_extract(google_trend_csv.week, "(.*?) -", 1)
).withColumn("State", F.regexp_extract(google_trend_csv.file, "Rossmann_DE_(.*)", 1))
# map state NI -> HB,NI to align with other data sources
google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def fn(rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast | batch_size: int = 100
sample_rate: float = 0.01
learning_rate: float = 0.0001
num_proc: int = 2
epochs: int = 100
local_checkpoint_file: str = "checkpoint.h5"
local_submission_csv: str = "submission.csv" | identifier_body |
|
keras_spark_rossmann_estimator.py | an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
df = df.withColumn(col, lookup(mapping)(df[col]))
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample_rate:
train_csv = train_csv.sample(withReplacement=False, fraction=hp.sample_rate)
test_csv = test_csv.sample(withReplacement=False, fraction=hp.sample_rate)
# prepare the DataFrames from the CSV files
train_df = prepare_df(
train_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
test_df = prepare_df(
test_csv,
store_csv,
store_states_csv,
state_names_csv,
google_trend_csv,
weather_csv,
).cache()
# add elapsed times from the data spanning training & test datasets
elapsed_cols = ["Promo", "StateHoliday", "SchoolHoliday"]
elapsed = add_elapsed(
train_df.select("Date", "Store", *elapsed_cols).unionAll(test_df.select("Date", "Store", *elapsed_cols)),
elapsed_cols,
)
# join with the elapsed times
train_df = train_df.join(elapsed, ["Date", "Store"]).select(
train_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
test_df = test_df.join(elapsed, ["Date", "Store"]).select(
test_df["*"],
*[prefix + col for prefix in ["Before", "After"] for col in elapsed_cols],
)
# filter out zero sales
train_df = train_df.filter(train_df.Sales > 0)
print("===================")
print("Prepared data frame")
print("===================")
train_df.show()
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
# select features
train_df = train_df.select(*(all_cols + ["Sales", "Date"])).cache()
test_df = test_df.select(*(all_cols + ["Id", "Date"])).cache()
# build a vocabulary of categorical columns
vocab = build_vocabulary(
train_df.select(*CATEGORICAL_COLS).unionAll(test_df.select(*CATEGORICAL_COLS)).cache(),
)
# cast continuous columns to float
train_df = cast_columns(train_df, CONTINUOUS_COLS + ["Sales"])
# search for a key and return a list of values based on a key
train_df = lookup_columns(train_df, vocab)
test_df = cast_columns(test_df, CONTINUOUS_COLS)
test_df = lookup_columns(test_df, vocab)
# split into training & validation
# test set is in 2015, use the same period in 2014 from the training set as a validation set
test_min_date = test_df.agg(F.min(test_df.Date)).collect()[0][0]
test_max_date = test_df.agg(F.max(test_df.Date)).collect()[0][0]
one_year = datetime.timedelta(365)
train_df = train_df.withColumn(
"Validation",
(train_df.Date > test_min_date - one_year) & (train_df.Date <= test_max_date - one_year),
)
# determine max Sales number
max_sales = train_df.agg(F.max(train_df.Sales)).collect()[0][0]
# convert Sales to log domain
train_df = train_df.withColumn("Sales", F.log(train_df.Sales))
print("===================================")
print("Data frame with transformed columns")
print("===================================")
train_df.show()
print("================")
print("Data frame sizes")
print("================")
# filter out column validation from the DataFrame, and get the count
train_rows = train_df.filter(~train_df.Validation).count()
val_rows = train_df.filter(train_df.Validation).count()
test_rows = test_df.count()
# print the number of rows in training, validation and test data
print("Training: %d" % train_rows)
print("Validation: %d" % val_rows)
print("Test: %d" % test_rows)
return max_sales, vocab, train_df, test_df
# %% [markdown]
# ## Training
#
# We use `KerasEstimator` in Horovod to train our Keras model on an existing pre-processed Spark DataFrame.
# The Estimator leverages Horovod's ability to scale across multiple workers, thereby eliminating any specialized code to perform distributed training.
# %%
def train(
max_sales: float,
vocab: Dict[str, List[Any]],
hp: Hyperparameters,
work_dir: FlyteDirectory,
train_df: pyspark.sql.DataFrame,
working_dir: FlyteDirectory,
):
print("==============")
print("Model training")
print("==============")
# a method to determine root mean square percentage error of exponential of predictions
def exp_rmspe(y_true, y_pred):
"""Competition evaluation metric, expects logarmithic inputs."""
pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))
# compute mean excluding stores with zero denominator
x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))
y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))
return tf.sqrt(x / y)
def act_sigmoid_scaled(x):
"""Sigmoid scaled to logarithm of maximum sales scaled by 20%."""
return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2
# NOTE: exp_rmse and act_sigmoid_scaled functions are not placed at the module level
# this is because we cannot explicitly send max_sales as an argument to act_sigmoid_scaled since it is an activation function
# two of them are custom objects, and placing one at the module level and the other within the function doesn't really add up
all_cols = CATEGORICAL_COLS + CONTINUOUS_COLS
CUSTOM_OBJECTS = {"exp_rmspe": exp_rmspe, "act_sigmoid_scaled": act_sigmoid_scaled}
# disable GPUs when building the model to prevent memory leaks
if LooseVersion(tf.__version__) >= LooseVersion("2.0.0"):
# See https://github.com/tensorflow/tensorflow/issues/33168
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
else:
K.set_session(tf.Session(config=tf.ConfigProto(device_count={"GPU": 0})))
# build the Keras model | inputs = {col: Input(shape=(1,), name=col) for col in all_cols}
embeddings = [
Embedding(len(vocab[col]), 10, input_length=1, name="emb_" + col)(inputs[col]) for col in CATEGORICAL_COLS
] | random_line_split |
|
keras_spark_rossmann_estimator.py | google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def fn(rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
|
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample | df = df.withColumn(col, lookup(mapping)(df[col])) | conditional_block |
keras_spark_rossmann_estimator.py | google_trend_all = google_trend_all.withColumn(
"State",
F.when(google_trend_all.State == "NI", "HB,NI").otherwise(google_trend_all.State),
)
# expand dates
return expand_date(google_trend_all)
# %% [markdown]
# 2. Next, we set a few date-specific values in the DataFrame to analyze the seasonal effects on sales.
# %%
def expand_date(df: pyspark.sql.DataFrame) -> pyspark.sql.DataFrame:
df = df.withColumn("Date", df.Date.cast(T.DateType()))
return (
df.withColumn("Year", F.year(df.Date))
.withColumn("Month", F.month(df.Date))
.withColumn("Week", F.weekofyear(df.Date))
.withColumn("Day", F.dayofmonth(df.Date))
)
# %% [markdown]
# 3. We retrieve the number of days before/after a special event (such as a promo or holiday). This data helps analyze how the sales may vary before/after a special event.
# %%
def add_elapsed(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
def add_elapsed_column(col, asc):
def | (rows):
last_store, last_date = None, None
for r in rows:
if last_store != r.Store:
last_store = r.Store
last_date = r.Date
if r[col]:
last_date = r.Date
fields = r.asDict().copy()
fields[("After" if asc else "Before") + col] = (r.Date - last_date).days
yield Row(**fields)
return fn
# repartition: rearrange the rows in the DataFrame based on the partitioning expression
# sortWithinPartitions: sort every partition in the DataFrame based on specific columns
# mapPartitions: apply the 'add_elapsed_column' method to each partition in the dataset, and convert the partitions into a DataFrame
df = df.repartition(df.Store)
for asc in [False, True]:
sort_col = df.Date.asc() if asc else df.Date.desc()
rdd = df.sortWithinPartitions(df.Store.asc(), sort_col).rdd
for col in cols:
rdd = rdd.mapPartitions(add_elapsed_column(col, asc))
df = rdd.toDF()
return df
# %% [markdown]
# 4. We define a function to merge several Spark DataFrames into a single DataFrame to create training and test data.
# %%
def prepare_df(
df: pyspark.sql.DataFrame,
store_csv: pyspark.sql.DataFrame,
store_states_csv: pyspark.sql.DataFrame,
state_names_csv: pyspark.sql.DataFrame,
google_trend_csv: pyspark.sql.DataFrame,
weather_csv: pyspark.sql.DataFrame,
) -> pyspark.sql.DataFrame:
num_rows = df.count()
# expand dates
df = expand_date(df)
# create new columns in the DataFrame by filtering out special events(promo/holiday where sales was zero or store was closed).
df = (
df.withColumn("Open", df.Open != "0")
.withColumn("Promo", df.Promo != "0")
.withColumn("StateHoliday", df.StateHoliday != "0")
.withColumn("SchoolHoliday", df.SchoolHoliday != "0")
)
# merge store information
store = store_csv.join(store_states_csv, "Store")
df = df.join(store, "Store")
# merge Google Trend information
google_trend_all = prepare_google_trend(google_trend_csv)
df = df.join(google_trend_all, ["State", "Year", "Week"]).select(df["*"], google_trend_all.trend)
# merge in Google Trend for whole Germany
google_trend_de = google_trend_all[google_trend_all.file == "Rossmann_DE"].withColumnRenamed("trend", "trend_de")
df = df.join(google_trend_de, ["Year", "Week"]).select(df["*"], google_trend_de.trend_de)
# merge weather
weather = weather_csv.join(state_names_csv, weather_csv.file == state_names_csv.StateName)
df = df.join(weather, ["State", "Date"])
# fix null values
df = (
df.withColumn(
"CompetitionOpenSinceYear",
F.coalesce(df.CompetitionOpenSinceYear, F.lit(1900)),
)
.withColumn(
"CompetitionOpenSinceMonth",
F.coalesce(df.CompetitionOpenSinceMonth, F.lit(1)),
)
.withColumn("Promo2SinceYear", F.coalesce(df.Promo2SinceYear, F.lit(1900)))
.withColumn("Promo2SinceWeek", F.coalesce(df.Promo2SinceWeek, F.lit(1)))
)
# days and months since the competition has been open, cap it to 2 years
df = df.withColumn(
"CompetitionOpenSince",
F.to_date(F.format_string("%s-%s-15", df.CompetitionOpenSinceYear, df.CompetitionOpenSinceMonth)),
)
df = df.withColumn(
"CompetitionDaysOpen",
F.when(
df.CompetitionOpenSinceYear > 1900,
F.greatest(
F.lit(0),
F.least(F.lit(360 * 2), F.datediff(df.Date, df.CompetitionOpenSince)),
),
).otherwise(0),
)
df = df.withColumn("CompetitionMonthsOpen", (df.CompetitionDaysOpen / 30).cast(T.IntegerType()))
# days and weeks of promotion, cap it to 25 weeks
df = df.withColumn(
"Promo2Since",
F.expr('date_add(format_string("%s-01-01", Promo2SinceYear), (cast(Promo2SinceWeek as int) - 1) * 7)'),
)
df = df.withColumn(
"Promo2Days",
F.when(
df.Promo2SinceYear > 1900,
F.greatest(F.lit(0), F.least(F.lit(25 * 7), F.datediff(df.Date, df.Promo2Since))),
).otherwise(0),
)
df = df.withColumn("Promo2Weeks", (df.Promo2Days / 7).cast(T.IntegerType()))
# ensure that no row was lost through inner joins
assert num_rows == df.count(), "lost rows in joins"
return df
# %% [markdown]
# 5. We build a dictionary of sorted, distinct categorical variables to create an embedding layer in our Keras model.
# %%
def build_vocabulary(df: pyspark.sql.DataFrame) -> Dict[str, List[Any]]:
vocab = {}
for col in CATEGORICAL_COLS:
values = [r[0] for r in df.select(col).distinct().collect()]
col_type = type([x for x in values if x is not None][0])
default_value = col_type()
vocab[col] = sorted(values, key=lambda x: x or default_value)
return vocab
# %% [markdown]
# 6. Next, we cast continuous columns to float as part of data preprocessing.
# %%
def cast_columns(df: pyspark.sql.DataFrame, cols: List[str]) -> pyspark.sql.DataFrame:
for col in cols:
df = df.withColumn(col, F.coalesce(df[col].cast(T.FloatType()), F.lit(0.0)))
return df
# %% [markdown]
# 7. Lastly, define a function that returns a list of values based on a key.
# %%
def lookup_columns(df: pyspark.sql.DataFrame, vocab: Dict[str, List[Any]]) -> pyspark.sql.DataFrame:
def lookup(mapping):
def fn(v):
return mapping.index(v)
return F.udf(fn, returnType=T.IntegerType())
for col, mapping in vocab.items():
df = df.withColumn(col, lookup(mapping)(df[col]))
return df
# %% [markdown]
# The `data_preparation` function consolidates all the aforementioned data processing functions.
# %%
def data_preparation(
data_dir: FlyteDirectory, hp: Hyperparameters
) -> Tuple[float, Dict[str, List[Any]], pyspark.sql.DataFrame, pyspark.sql.DataFrame]:
print("================")
print("Data preparation")
print("================")
# 'current_context' gives the handle of specific parameters in ``data_preparation`` task
spark = flytekit.current_context().spark_session
data_dir_path = data_dir.remote_source
# read the CSV data into Spark DataFrame
train_csv = spark.read.csv("%s/train.csv" % data_dir_path, header=True)
test_csv = spark.read.csv("%s/test.csv" % data_dir_path, header=True)
store_csv = spark.read.csv("%s/store.csv" % data_dir_path, header=True)
store_states_csv = spark.read.csv("%s/store_states.csv" % data_dir_path, header=True)
state_names_csv = spark.read.csv("%s/state_names.csv" % data_dir_path, header=True)
google_trend_csv = spark.read.csv("%s/googletrend.csv" % data_dir_path, header=True)
weather_csv = spark.read.csv("%s/weather.csv" % data_dir_path, header=True)
# retrieve a sampled subset of the train and test data
if hp.sample | fn | identifier_name |
container.go | ()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil |
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) | {
return fmt.Errorf("set label: %w", err)
} | conditional_block |
container.go | ()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil { |
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setup | return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false) | random_line_split |
container.go | ()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) Property(name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) |
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) | {
err = ErrNotImplemented
return
} | identifier_body |
container.go | ()
}
// Stop stops a container.
//
func (c *Container) Stop(kill bool) error {
ctx := context.Background()
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return fmt.Errorf("task lookup: %w", err)
}
behaviour := KillGracefully
if kill {
behaviour = KillUngracefully
}
err = c.killer.Kill(ctx, task, behaviour)
if err != nil {
return fmt.Errorf("kill: %w", err)
}
return nil
}
// Run a process inside the container.
//
func (c *Container) Run(
spec garden.ProcessSpec,
processIO garden.ProcessIO,
) (garden.Process, error) {
ctx := context.Background()
containerSpec, err := c.container.Spec(ctx)
if err != nil {
return nil, fmt.Errorf("container spec: %w", err)
}
procSpec, err := c.setupContainerdProcSpec(spec, *containerSpec)
if err != nil {
return nil, err
}
err = c.rootfsManager.SetupCwd(containerSpec.Root.Path, procSpec.Cwd)
if err != nil {
return nil, fmt.Errorf("setup cwd: %w", err)
}
task, err := c.container.Task(ctx, nil)
if err != nil {
return nil, fmt.Errorf("task retrieval: %w", err)
}
id := procID(spec)
cioOpts := containerdCIO(processIO, spec.TTY != nil)
proc, err := task.Exec(ctx, id, &procSpec, cio.NewCreator(cioOpts...))
if err != nil {
return nil, fmt.Errorf("task exec: %w", err)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
err = proc.Start(ctx)
if err != nil {
if isNoSuchExecutable(err) {
return nil, garden.ExecutableNotFoundError{Message: err.Error()}
}
return nil, fmt.Errorf("proc start: %w", err)
}
// If there is no TTY allocated for the process, we can call CloseIO right
// away. The reason we don't do this when there is a TTY is that runc
// signals such processes with SIGHUP when stdin is closed and we have
// called CloseIO (which doesn't actually close the stdin stream for the
// container - it just marks the stream as "closable").
//
// If we were to call CloseIO immediately on processes with a TTY, if the
// Stdin stream ever receives an error (e.g. an io.EOF due to worker
// rebalancing, or the worker restarting gracefully), runc will kill the
// process with SIGHUP (because we would have marked the stream as
// closable).
//
// Note: resource containers are the only ones without a TTY - task and
// hijack processes have a TTY enabled.
if spec.TTY == nil {
err = proc.CloseIO(ctx, containerd.WithStdinCloser)
if err != nil {
return nil, fmt.Errorf("proc closeio: %w", err)
}
}
return NewProcess(proc, exitStatusC), nil
}
// Attach starts streaming the output back to the client from a specified process.
//
func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {
ctx := context.Background()
if pid == "" {
return nil, ErrInvalidInput("empty pid")
}
task, err := c.container.Task(ctx, cio.Load)
if err != nil {
return nil, fmt.Errorf("task: %w", err)
}
cioOpts := containerdCIO(processIO, false)
proc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))
if err != nil {
return nil, fmt.Errorf("load proc: %w", err)
}
status, err := proc.Status(ctx)
if err != nil {
return nil, fmt.Errorf("proc status: %w", err)
}
if status.Status != containerd.Running {
return nil, fmt.Errorf("proc not running: status = %s", status.Status)
}
exitStatusC, err := proc.Wait(ctx)
if err != nil {
return nil, fmt.Errorf("proc wait: %w", err)
}
return NewProcess(proc, exitStatusC), nil
}
// Properties returns the current set of properties
//
func (c *Container) Properties() (garden.Properties, error) {
ctx := context.Background()
labels, err := c.container.Labels(ctx)
if err != nil {
return garden.Properties{}, fmt.Errorf("labels retrieval: %w", err)
}
return labelsToProperties(labels), nil
}
// Property returns the value of the property with the specified name.
//
func (c *Container) | (name string) (string, error) {
properties, err := c.Properties()
if err != nil {
return "", err
}
v, found := properties[name]
if !found {
return "", ErrNotFound(name)
}
return v, nil
}
// Set a named property on a container to a specified value.
//
func (c *Container) SetProperty(name string, value string) error {
labelSet, err := propertiesToLabels(garden.Properties{name: value})
if err != nil {
return err
}
_, err = c.container.SetLabels(context.Background(), labelSet)
if err != nil {
return fmt.Errorf("set label: %w", err)
}
return nil
}
// RemoveProperty - Not Implemented
func (c *Container) RemoveProperty(name string) (err error) {
err = ErrNotImplemented
return
}
// Info - Not Implemented
func (c *Container) Info() (info garden.ContainerInfo, err error) {
err = ErrNotImplemented
return
}
// Metrics - Not Implemented
func (c *Container) Metrics() (metrics garden.Metrics, err error) {
err = ErrNotImplemented
return
}
// StreamIn - Not Implemented
func (c *Container) StreamIn(spec garden.StreamInSpec) (err error) {
err = ErrNotImplemented
return
}
// StreamOut - Not Implemented
func (c *Container) StreamOut(spec garden.StreamOutSpec) (readCloser io.ReadCloser, err error) {
err = ErrNotImplemented
return
}
// SetGraceTime stores the grace time as a containerd label with key "garden.grace-time"
//
func (c *Container) SetGraceTime(graceTime time.Duration) error {
err := c.SetProperty(GraceTimeKey, fmt.Sprintf("%d", graceTime))
if err != nil {
return fmt.Errorf("set grace time: %w", err)
}
return nil
}
// CurrentBandwidthLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentBandwidthLimits() (garden.BandwidthLimits, error) {
return garden.BandwidthLimits{}, nil
}
// CurrentCPULimits returns the CPU shares allocated to the container
func (c *Container) CurrentCPULimits() (garden.CPULimits, error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.CPULimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.CPU == nil ||
spec.Linux.Resources.CPU.Shares == nil {
return garden.CPULimits{}, nil
}
return garden.CPULimits{
Weight: *spec.Linux.Resources.CPU.Shares,
}, nil
}
// CurrentDiskLimits returns no limits (achieves parity with Guardian)
func (c *Container) CurrentDiskLimits() (garden.DiskLimits, error) {
return garden.DiskLimits{}, nil
}
// CurrentMemoryLimits returns the memory limit in bytes allocated to the container
func (c *Container) CurrentMemoryLimits() (limits garden.MemoryLimits, err error) {
spec, err := c.container.Spec(context.Background())
if err != nil {
return garden.MemoryLimits{}, err
}
if spec == nil ||
spec.Linux == nil ||
spec.Linux.Resources == nil ||
spec.Linux.Resources.Memory == nil ||
spec.Linux.Resources.Memory.Limit == nil {
return garden.MemoryLimits{}, nil
}
return garden.MemoryLimits{
LimitInBytes: uint64(*spec.Linux.Resources.Memory.Limit),
}, nil
}
// NetIn - Not Implemented
func (c *Container) NetIn(hostPort, containerPort uint32) (a, b uint32, err error) {
err = ErrNotImplemented
return
}
// NetOut - Not Implemented
func (c *Container) NetOut(netOutRule garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
// BulkNetOut - Not Implemented
func (c *Container) BulkNetOut(netOutRules []garden.NetOutRule) (err error) {
err = ErrNotImplemented
return
}
func procID(gdnProcSpec garden.ProcessSpec) string {
id := gdnProcSpec.ID
if id == "" {
uuid, err := uuid.NewV4()
if err != nil {
panic(fmt.Errorf("uuid gen: %w", err))
}
id = uuid.String()
}
return id
}
func (c *Container) setup | Property | identifier_name |
k8s.go | }
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name
func (cloud *K8SCloud) Name() string {
return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) | () (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used {
resource.Used[string(k)] = NewQuantityFor(v)
}
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status | Resource | identifier_name |
k8s.go | if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name
func (cloud *K8SCloud) Name() string {
return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) Resource() (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used |
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status | {
resource.Used[string(k)] = NewQuantityFor(v)
} | conditional_block |
k8s.go |
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase)
}
}
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Create(worker.pod)
if err != nil {
return err
}
// wait until pod is running
err = wait.Poll(7*time.Second, 2*time.Minute, check)
if err != nil {
logdog.Error("K8SPodWorker: do worker error", logdog.Fields{"err": err})
return err
}
// add time
worker.createTime = time.Now()
worker.dueTime = worker.createTime.Add(time.Duration(WorkerTimeout))
worker.pod = pod
return nil
}
// GetWorkerInfo returns worker's infomation
func (worker *K8SPodWorker) GetWorkerInfo() WorkerInfo {
return WorkerInfo{
CloudName: worker.Name(),
CloudKind: worker.Kind(),
CreateTime: worker.createTime,
DueTime: worker.dueTime,
PodName: worker.pod.Name,
Namespace: worker.namespace,
}
}
// IsTimeout returns true if worker is timeout
// and returns the time left until it is due
func (worker *K8SPodWorker) IsTimeout() (bool, time.Duration) {
now := time.Now()
if now.After(worker.dueTime) {
return true, time.Duration(0)
}
return false, worker.dueTime.Sub(now)
}
// Terminate terminates the worker and destroy it
func (worker *K8SPodWorker) Terminate() error {
client := worker.Client().CoreV1().Pods(worker.namespace)
GracePeriodSeconds := int64(0)
logdog.Debug("worker terminating...", logdog.Fields{"cloud": worker.Name(), "kind": worker.Kind(), "podName": worker.pod.Name})
if Debug {
req := client.GetLogs(worker.pod.Name, &apiv1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
logdog.Error("Can not read log from pod", logdog.Fields{
"cloud": worker.Name(),
"kind": worker.Kind(),
"podName": worker.pod.Name,
"err": err,
})
} else {
defer readCloser.Close()
content, _ := ioutil.ReadAll(readCloser)
logdog.Debug(string(content))
}
}
err := client.Delete(
worker.pod.Name,
&meta_v1.DeleteOptions{
GracePeriodSeconds: &GracePeriodSeconds,
})
return err
}
func buildK8SEnv(id string, opts WorkerOptions) []apiv1.EnvVar | {
env := []apiv1.EnvVar{
{
Name: WorkerEventID,
Value: id,
},
{
Name: CycloneServer,
Value: opts.WorkerEnvs.CycloneServer,
},
{
Name: ConsoleWebEndpoint,
Value: opts.WorkerEnvs.ConsoleWebEndpoint,
},
{
Name: RegistryLocation,
Value: opts.WorkerEnvs.RegistryLocation,
},
{ | identifier_body |
|
k8s.go | }
if opts.K8SNamespace == "" {
opts.K8SNamespace = apiv1.NamespaceDefault
}
cloud := &K8SCloud{
name: opts.Name,
host: opts.Host,
bearerToken: opts.K8SBearerToken,
namespace: opts.K8SNamespace,
insecure: opts.Insecure,
}
config := &rest.Config{
Host: opts.Host,
BearerToken: opts.K8SBearerToken,
TLSClientConfig: rest.TLSClientConfig{
Insecure: opts.Insecure,
},
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// NewK8SCloudInCluster returns a cloud object which uses the service account
// kubernetes gives to pods
func NewK8SCloudInCluster(opts Options) (CloudProvider, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + apiv1.ServiceAccountNamespaceKey)
if err != nil {
return nil, err
}
cloud := &K8SCloud{
name: opts.Name,
host: config.Host,
bearerToken: config.BearerToken,
namespace: string(namespace),
insecure: opts.Insecure,
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
cloud.client = clientset
return cloud, nil
}
// Client returns k8s clientset
func (cloud *K8SCloud) Client() *kubernetes.Clientset {
return cloud.client
}
// Name returns k8s cloud name | return cloud.name
}
// Kind returns cloud type.
func (cloud *K8SCloud) Kind() string {
return KindK8SCloud
}
// Ping returns nil if cloud is accessible
func (cloud *K8SCloud) Ping() error {
_, err := cloud.client.CoreV1().Pods(cloud.namespace).List(meta_v1.ListOptions{})
return err
}
// Resource returns the limit and used quotas of the cloud
func (cloud *K8SCloud) Resource() (*Resource, error) {
quotas, err := cloud.client.CoreV1().ResourceQuotas(cloud.namespace).List(meta_v1.ListOptions{})
if err != nil {
return nil, err
}
resource := &Resource{
Limit: ZeroQuota.DeepCopy(),
Used: ZeroQuota.DeepCopy(),
}
if len(quotas.Items) == 0 {
// TODO get quota from metrics
return resource, nil
}
quota := quotas.Items[0]
for k, v := range quota.Status.Hard {
resource.Limit[string(k)] = NewQuantityFor(v)
}
for k, v := range quota.Status.Used {
resource.Used[string(k)] = NewQuantityFor(v)
}
return resource, nil
}
// CanProvision returns true if the cloud can provision a worker meetting the quota
func (cloud *K8SCloud) CanProvision(quota Quota) (bool, error) {
resource, err := cloud.Resource()
if err != nil {
return false, err
}
if resource.Limit.IsZero() {
return true, nil
}
if resource.Limit.Enough(resource.Used, quota) {
return true, nil
}
return false, nil
}
// Provision returns a worker if the cloud can provison
func (cloud *K8SCloud) Provision(id string, wopts WorkerOptions) (Worker, error) {
logdog.Infof("Create worker %s with options %v", id, wopts)
var cp *K8SCloud
// If specify the namespace for worker in worker options, new a cloud pointer and set its namespace.
if len(wopts.Namespace) != 0 {
nc := *cloud
cp = &nc
cp.namespace = wopts.Namespace
} else {
cp = cloud
}
can, err := cp.CanProvision(wopts.Quota)
if err != nil {
return nil, err
}
if !can {
// wait
return nil, ErrNoEnoughResource
}
name := "cyclone-worker-" + id
Privileged := true
pod := &apiv1.Pod{
ObjectMeta: meta_v1.ObjectMeta{
Namespace: cp.namespace,
Name: name,
Labels: map[string]string{
"cyclone": "worker",
"cyclone/id": id,
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "cyclone-worker",
Image: wopts.WorkerEnvs.WorkerImage,
Env: buildK8SEnv(id, wopts),
WorkingDir: WorkingDir,
Resources: wopts.Quota.ToK8SQuota(),
SecurityContext: &apiv1.SecurityContext{Privileged: &Privileged},
ImagePullPolicy: apiv1.PullAlways,
},
},
RestartPolicy: apiv1.RestartPolicyNever,
},
}
// Mount the cache volume to the worker.
cacheVolume := wopts.CacheVolume
mountPath := wopts.MountPath
if len(cacheVolume) != 0 && len(mountPath) != 0 {
// Check the existence and status of cache volume.
if pvc, err := cloud.client.CoreV1().PersistentVolumeClaims(cp.namespace).Get(cacheVolume, meta_v1.GetOptions{}); err == nil {
if pvc.Status.Phase == apiv1.ClaimBound {
volumeName := "cache-dependency"
pod.Spec.Containers[0].VolumeMounts = []apiv1.VolumeMount{
apiv1.VolumeMount{
Name: volumeName,
MountPath: mountPath,
},
}
pod.Spec.Volumes = []apiv1.Volume{
apiv1.Volume{
Name: volumeName,
},
}
pod.Spec.Volumes[0].PersistentVolumeClaim = &apiv1.PersistentVolumeClaimVolumeSource{
ClaimName: cacheVolume,
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as its status is %v", cacheVolume, pvc.Status.Phase)
}
} else {
// Just log error and let the pipeline to run in non-cache mode.
logdog.Errorf("Can not use cache volume %s as fail to get it: %v", cacheVolume, err)
}
}
// pod, err = cloud.Client.CoreV1().Pods(cloud.namespace).Create(pod)
// if err != nil {
// return nil, err
// }
worker := &K8SPodWorker{
K8SCloud: cp,
pod: pod,
}
return worker, nil
}
// LoadWorker rebuilds a worker from worker info
func (cloud *K8SCloud) LoadWorker(info WorkerInfo) (Worker, error) {
if cloud.Kind() != info.CloudKind {
return nil, fmt.Errorf("K8SCloud: can not load worker with another cloud kind %s", info.CloudKind)
}
pod, err := cloud.client.CoreV1().Pods(info.Namespace).Get(info.PodName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
worker := &K8SPodWorker{
K8SCloud: cloud,
createTime: info.CreateTime,
dueTime: info.DueTime,
pod: pod,
}
return worker, nil
}
// GetCloud ...
func (cloud *K8SCloud) GetCloud() Cloud {
return Cloud{
Name: cloud.name,
Type: CloudTypeKubernetes,
Insecure: cloud.insecure,
Kubernetes: &CloudKubernetes{
Host: cloud.host,
BearerToken: cloud.bearerToken,
InCluster: cloud.inCluster,
},
}
}
// ---------------------------------------------------------------------------------
// K8SPodWorker ...
type K8SPodWorker struct {
*K8SCloud
createTime time.Time
dueTime time.Time
pod *apiv1.Pod
}
// Do starts the worker and do the work
func (worker *K8SPodWorker) Do() error {
var pod *apiv1.Pod
var err error
check := func() (bool, error) {
// change pod here
pod, err = worker.Client().CoreV1().Pods(worker.namespace).Get(worker.pod.Name, meta_v1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case apiv1.PodPending:
return false, nil
case apiv1.PodRunning:
return true, nil
default:
return false, fmt.Errorf("K8SCloud: get an error pod status phase[%s]", pod.Status.Phase | func (cloud *K8SCloud) Name() string { | random_line_split |
solve_kami.go | ]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet {
a := make([]int, len(s))
copy(a, s)
return a
}
func (s intSet) contains(x int) bool {
for _, y := range s {
if x == y {
return true
}
}
return false
}
func (s *intSet) add(x int) {
if !s.contains(x) {
*s = append(*s, x)
}
}
func (s *intSet) remove(x int) {
for i, y := range *s {
if x == y {
(*s)[i] = (*s)[len(*s)-1]
*s = (*s)[:len(*s)-1]
return
}
}
}
func getRegions(b *Board) []*Region {
a := make([]*Region, 0, len(b.regions))
for _, r := range b.regions {
a = append(a, r)
}
sort.Sort(byNumNeighbors(a))
return a
}
type byNumNeighbors []*Region
func (a byNumNeighbors) Len() int { return len(a) }
func (a byNumNeighbors) Less(i, j int) bool { return len(a[i].neighbors) > len(a[j].neighbors) }
func (a byNumNeighbors) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type Move struct {
tile Tile
color int
}
type work struct {
board *Board
regionID int
color int
movesLeft int
}
func search(b *Board, regionID int, movesLeft int) []Move {
switch {
case b.numColors() > movesLeft+1:
return nil
case b.solved():
return []Move{}
case movesLeft <= 0:
return nil
}
r := b.regions[regionID]
for _, color := range b.colorsAdjacentToRegion(r) {
moves := search(b.recolor(r.id, color), r.id, movesLeft-1)
if moves != nil {
return append(moves, Move{r.tile, color})
}
}
return nil
}
func workerProcess(c1 <-chan work, c2 chan<- []Move) {
for work := range c1 {
newBoard := work.board.recolor(work.regionID, work.color)
moves := search(newBoard, work.regionID, work.movesLeft-1)
if moves != nil {
r := newBoard.regions[work.regionID]
c2 <- append(moves, Move{r.tile, work.color})
return
}
}
c2 <- nil
}
func solve(b *Board, maxMoves int, numWorkers int) []Move {
workChan := make(chan work)
solutionChan := make(chan []Move)
// Launch consumers
for i := 0; i < numWorkers; i++ {
go workerProcess(workChan, solutionChan)
}
// Launch producer
go func() {
for _, region := range getRegions(b) {
colors := b.colorsAdjacentToRegion(region)
for _, color := range colors {
fmt.Printf("go region %3d: color %d -> %d\n", region.id, region.color, color)
workChan <- work{
board: b,
regionID: region.id,
color: color,
movesLeft: maxMoves,
}
}
}
close(workChan) // no more work
}() |
// Wait for a solution
for i := 0; i < numWorkers; i++ { | random_line_split |
|
solve_kami.go | func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func savePNG(name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] |
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored | {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
} | conditional_block |
solve_kami.go | bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func savePNG(name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color = color
removed := newIntSet(len(recolored.neighbors))
for _, id := range recolored.neighbors {
neighbor := b.regions[id]
if neighbor.color == color {
removed.add(id)
}
}
regions := make(map[int]*Region, len(b.regions)-len(removed))
for _, r := range b.regions {
if !removed.contains(r.id) {
regions[r.id] = r
}
}
regions[recolored.id] = recolored
copyOnWrite := func(r *Region) *Region {
if regions[r.id] != b.regions[r.id] {
return r
}
c := r.Copy()
regions[c.id] = c
return c
}
for _, removedID := range removed {
for _, neighborID := range b.regions[removedID].neighbors {
if neighborID == recolored.id {
continue
}
if nn, ok := regions[neighborID]; ok {
nn = copyOnWrite(nn)
nn.neighbors.remove(removedID)
nn.neighbors.add(recolored.id)
recolored.neighbors.add(nn.id)
}
}
recolored.neighbors.remove(removedID)
}
return &Board{regions: regions}
}
type intSet []int
func newIntSet(cap int) intSet { return make([]int, 0, cap) }
func (s intSet) Copy() intSet | {
a := make([]int, len(s))
copy(a, s)
return a
} | identifier_body |
|
solve_kami.go | func extractSwatches(src *image.RGBA, numColors int) []colorful.Color {
const (
W = 400
H = 75
)
var swatches []colorful.Color
b := src.Bounds()
sw := W / numColors
for i := 0; i < numColors; i++ {
m := src.SubImage(trimRect(image.Rect(b.Min.X+i*sw, b.Max.Y-H, b.Min.X+(i+1)*sw, b.Max.Y), 10))
swatches = append(swatches, toColorful(averageColor(m)))
savePNG(strconv.Itoa(i), m) // for debugging
}
const dim = 50
m := image.NewRGBA(image.Rect(0, 0, dim*len(swatches), dim))
for i, c := range swatches {
r := image.Rect(i*dim, 0, (i+1)*dim, dim)
draw.Draw(m, r, &image.Uniform{fromColorful(c)}, image.ZP, draw.Src)
}
savePNG("swatches", m) // for debugging
return swatches
}
// Reports which of the swatches is most similar to c.
func nearestSwatch(swatches []colorful.Color, c color.Color) (color.Color, int) {
c0 := toColorful(c)
minDist := math.MaxFloat64
var best colorful.Color
bestIndex := -1
for i, s := range swatches {
if d := c0.DistanceCIE94(s); d < minDist {
minDist, best, bestIndex = d, s, i
}
}
return fromColorful(best), bestIndex
}
const outputPrefix = "/tmp/kami_" // see savePNG
// Encodes the image as PNG with the filename outputPrefix+name+".png".
func | (name string, img image.Image) {
filename := outputPrefix + name + ".png"
fp, err := os.Create(filename)
if err != nil {
log.Fatal(err)
}
err = png.Encode(fp, img)
if err != nil {
log.Fatal(err)
}
}
// Decodes the specified file as a PNG image.
func loadPNG(filename string) image.Image {
fp, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
img, err := png.Decode(fp)
if err != nil {
log.Fatal(err)
}
return img
}
func toColorful(c color.Color) colorful.Color {
c0 := color.RGBAModel.Convert(c).(color.RGBA)
return colorful.Color{
R: float64(c0.R) / float64(0xFFFF),
G: float64(c0.G) / float64(0xFFFF),
B: float64(c0.B) / float64(0xFFFF),
}
}
func fromColorful(c colorful.Color) color.Color {
r, g, b, a := c.RGBA()
return color.RGBA{R: uint8(r), G: uint8(g), B: uint8(b), A: uint8(a)}
}
// Reports the average color of an image.
func averageColor(src image.Image) color.Color {
b := src.Bounds()
var sum struct{ c, y, m, k float64 }
n := 0
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
c := color.CMYKModel.Convert(src.At(x, y)).(color.CMYK)
sum.c += float64(c.C)
sum.m += float64(c.M)
sum.y += float64(c.Y)
sum.k += float64(c.K)
n++
}
}
d := float64(n)
return color.CMYK{
C: uint8(sum.c / d),
M: uint8(sum.m / d),
Y: uint8(sum.y / d),
K: uint8(sum.k / d),
}
}
// Converts an image to the RGBA type.
func convertToRGBA(src image.Image) *image.RGBA {
b := src.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), src, b.Min, draw.Src)
return m
}
func parseInt(s string) int {
n, err := strconv.Atoi(s)
if err != nil {
log.Fatal(err)
}
return n
}
type Tile struct{ row, col int }
func (t Tile) add(v Tile) Tile {
return Tile{row: t.row + v.row, col: t.col + v.col}
}
var dirs = []Tile{{-1, 0}, {+1, 0}, {0, -1}, {0, +1}}
func adjacentTiles(t Tile) [4]Tile {
var a [4]Tile
for i, d := range dirs {
a[i] = t.add(d)
}
return a
}
type TileSet struct {
id int
color int
tiles map[Tile]bool
}
func newTileSet(id, color int) *TileSet {
return &TileSet{
id: id,
color: color,
tiles: make(map[Tile]bool),
}
}
func (ts *TileSet) add(t Tile) { ts.tiles[t] = true }
func (ts *TileSet) remove(t Tile) { delete(ts.tiles, t) }
func (ts *TileSet) pop() Tile {
var t Tile
for t = range ts.tiles {
break
}
ts.remove(t)
return t
}
func (ts *TileSet) contains(t Tile) bool { return ts.tiles[t] }
func (ts *TileSet) empty() bool { return len(ts.tiles) == 0 }
func (ts *TileSet) topLeft() Tile {
t0 := Tile{row: ROWS, col: COLS}
for t1 := range ts.tiles {
if t1.row < t0.row || (t1.row == t0.row && t1.col < t0.col) {
t0 = t1
}
}
return t0
}
func findRegions(grid [][]int) map[int]*Region {
tilesLeft := newTileSet(-1, -1)
for r := 0; r < ROWS; r++ {
for c := 0; c < COLS; c++ {
tilesLeft.add(Tile{r, c})
}
}
var findReachable func(tiles *TileSet, color int, from Tile)
findReachable = func(tiles *TileSet, color int, from Tile) {
for _, tile := range adjacentTiles(from) {
if tilesLeft.contains(tile) && grid[tile.row][tile.col] == color {
tilesLeft.remove(tile)
tiles.add(tile)
findReachable(tiles, color, tile)
}
}
}
var tileSets []*TileSet
idSeq := 0
for !tilesLeft.empty() {
tile := tilesLeft.pop()
color := grid[tile.row][tile.col]
tiles := newTileSet(idSeq, color)
tiles.add(tile)
idSeq++
findReachable(tiles, color, tile)
tileSets = append(tileSets, tiles)
}
adjacent := func(ts1, ts2 *TileSet) bool {
for a := range ts1.tiles {
for _, b := range adjacentTiles(a) {
if ts2.contains(b) {
return true
}
}
}
return false
}
regions := make(map[int]*Region)
for i, ts := range tileSets {
regions[i] = &Region{
id: ts.id,
color: ts.color,
tile: ts.topLeft(),
}
}
for i, ts1 := range tileSets {
r1 := regions[ts1.id]
for _, ts2 := range tileSets[i+1:] {
if adjacent(ts1, ts2) {
r2 := regions[ts2.id]
r1.neighbors.add(r2.id)
r2.neighbors.add(r1.id)
}
}
}
return regions
}
type Region struct {
id, color int
tile Tile
neighbors intSet
}
func (r *Region) Copy() *Region {
r1 := *r
r1.neighbors = r.neighbors.Copy()
return &r1
}
type Board struct {
regions map[int]*Region
}
func newBoard(grid [][]int) *Board { return &Board{regions: findRegions(grid)} }
func (b *Board) solved() bool {
return len(b.regions) == 1
}
func (b *Board) numColors() int {
seen := make([]bool, MAX_COLOR)
for _, r := range b.regions {
seen[r.color] = true
}
n := 0
for _, b := range seen {
if b {
n++
}
}
return n
}
func (b *Board) colorsAdjacentToRegion(region *Region) intSet {
colors := newIntSet(MAX_COLOR)
for _, neighborID := range region.neighbors {
neighbor := b.regions[neighborID]
colors.add(neighbor.color)
}
return colors
}
func (b *Board) recolor(regionID, color int) *Board {
recolored := b.regions[regionID].Copy()
recolored.color | savePNG | identifier_name |
sexprs.go | receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
}
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l |
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, | {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
} | conditional_block |
sexprs.go | .WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] { | random_line_split |
||
sexprs.go | }
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, nil
case first == '|':
s, err := readBase64(r)
if err != nil {
return nil, err
}
return s, nil
case first == '"':
s, err := readQuotedString(r, -1)
if err != nil {
return nil, err
}
return s, nil
case bytes.IndexByte(tokenChar, first) > -1:
s = append(s, first)
for {
c, err := r.ReadByte()
if bytes.IndexByte(tokenChar, c) == -1 {
r.UnreadByte()
return s, err
}
s = append(s, c)
if err != nil {
return nil, err
}
}
}
panic("can't get here")
}
func readLengthDelimited(r *bufio.Reader, first byte) (s []byte, err error) {
acc := make([]byte, 1)
acc[0] = first
for {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch {
case bytes.IndexByte(decimalDigit, c) > -1:
acc = append(acc, c)
case c == ':':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
acc = make([]byte, 0, length)
buf := make([]byte, length)
for n, err := r.Read(buf); int64(len(acc)) < length; n, err = r.Read(buf[:length-int64(len(acc))]) {
acc = append(acc, buf[:n]...)
if err != nil {
return acc, err
}
}
return acc, nil
case c == '#':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readHex(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
case c == '|':
length, err := strconv.ParseInt(string(acc), 10, 32)
if err != nil {
return nil, err
}
s, err := readBase64(r)
switch {
case len(s) != int(length):
return nil, fmt.Errorf("Expected %d bytes; got %d", length, len(s))
default:
return s, err
}
default:
return nil, fmt.Errorf("Expected integer; found %c", c)
}
}
panic("Can't get here")
}
func readHex(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('#')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, hex.DecodedLen(len(acc)))
n, err := hex.Decode(s, acc)
return s[:n], err
}
func readBase64(r *bufio.Reader) (s []byte, err error) {
raw, err := r.ReadBytes('|')
acc := make([]byte, 0, len(raw)-1)
for _, c := range raw[:len(raw)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
s = make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(s, acc)
return s[:n], err
}
type quoteState int
const (
inQuote quoteState = iota
inEscape
inNewlineEscape
inReturnEscape
inHex1
inHex2
inOctal1
inOctal2
inOctal3
)
func | readQuotedString | identifier_name |
|
sexprs.go | receiver and argument are
// identical.
Equal(b Sexp) bool
}
type List []Sexp
type Atom struct {
DisplayHint []byte
Value []byte
}
func (a Atom) Pack() []byte {
buf := bytes.NewBuffer(nil)
a.pack(buf)
return buf.Bytes()
}
func (a Atom) pack(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[" + strconv.Itoa(len(a.DisplayHint)) + ":")
buf.Write(a.DisplayHint)
buf.WriteString("]")
}
buf.WriteString(strconv.Itoa(len(a.Value)) + ":")
buf.Write(a.Value)
}
func (a Atom) PackedLen() (size int) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
size += 3 // [:]
size += len(strconv.Itoa(len(a.DisplayHint))) // decimal length
size += len(a.DisplayHint)
}
size += len(strconv.Itoa(len(a.DisplayHint)))
size++ // :
return size + len(a.Value)
}
func (a Atom) String() string |
const (
tokenEnc = iota
quotedEnc
base64Enc
)
// write a string in a legible encoding to buf
func writeString(buf *bytes.Buffer, a []byte) {
// test to see what sort of encoding is best to use
encoding := tokenEnc
acc := make([]byte, len(a), len(a))
for i, c := range a {
acc[i] = c
switch {
case bytes.IndexByte(tokenChar, c) > -1:
continue
case (encoding == tokenEnc) && bytes.IndexByte(stringEncChar, c) > -1:
encoding = quotedEnc
strAcc := make([]byte, i, len(a))
copy(strAcc, acc)
for j := i; j < len(a); j++ {
c := a[j]
if bytes.IndexByte(stringEncChar, c) < 0 {
encoding = base64Enc
break
}
switch c {
case '\b':
acc = append(strAcc, []byte("\\b")...)
case '\t':
strAcc = append(strAcc, []byte("\\t")...)
case '\v':
strAcc = append(strAcc, []byte("\\v")...)
case '\n':
strAcc = append(strAcc, []byte("\\n")...)
case '\f':
strAcc = append(strAcc, []byte("\\f")...)
case '"':
strAcc = append(strAcc, []byte("\\\"")...)
case '\'':
strAcc = append(strAcc, []byte("'")...)
case '\\':
strAcc = append(strAcc, []byte("\\\\")...)
case '\r':
strAcc = append(strAcc, []byte("\\r")...)
default:
strAcc = append(strAcc, c)
}
}
if encoding == quotedEnc {
buf.WriteString("\"")
buf.Write(strAcc)
buf.WriteString("\"")
return
}
default:
encoding = base64Enc
break
}
}
switch encoding {
case base64Enc:
buf.WriteString("|" + base64Encoding.EncodeToString(acc) + "|")
case tokenEnc:
buf.Write(acc)
default:
panic("Encoding is neither base64 nor token")
}
}
func (a Atom) string(buf *bytes.Buffer) {
if a.DisplayHint != nil && len(a.DisplayHint) > 0 {
buf.WriteString("[")
writeString(buf, a.DisplayHint)
buf.WriteString("]")
}
if len(a.Value) == 0 {
buf.WriteString("")
} else {
writeString(buf, a.Value)
}
return
}
func (a Atom) Base64String() (s string) {
return "{" + base64Encoding.EncodeToString(a.Pack()) + "}"
}
func (a Atom) Equal(b Sexp) bool {
switch b := b.(type) {
case Atom:
return bytes.Equal(a.DisplayHint, b.DisplayHint) && bytes.Equal(a.Value, b.Value)
default:
return false
}
return false
}
func (l List) Pack() []byte {
buf := bytes.NewBuffer(nil)
l.pack(buf)
return buf.Bytes()
}
func (l List) pack(buf *bytes.Buffer) {
buf.WriteString("(")
for _, datum := range l {
datum.pack(buf)
}
buf.WriteString(")")
}
func (l List) Base64String() string {
return "{" + base64Encoding.EncodeToString(l.Pack()) + "}"
}
func (l List) String() string {
buf := bytes.NewBuffer(nil)
l.string(buf)
return buf.String()
}
func (l List) string(buf *bytes.Buffer) {
buf.WriteString("(")
for i, datum := range l {
datum.string(buf)
if i < len(l)-1 {
buf.WriteString(" ")
}
}
buf.WriteString(")")
}
func (a List) Equal(b Sexp) bool {
switch b := b.(type) {
case List:
if len(a) != len(b) {
return false
} else {
for i := range a {
if !a[i].Equal(b[i]) {
return false
}
}
return true
}
default:
return false
}
return false
}
func (l List) PackedLen() (size int) {
size = 2 // ()
for _, element := range l {
size += element.PackedLen()
}
return size
}
// Parse returns the first S-expression in byte string s, the unparsed
// rest of s and any error encountered
func Parse(s []byte) (sexpr Sexp, rest []byte, err error) {
//return parseSexp(bytes)
r := bufio.NewReader(bytes.NewReader(s))
sexpr, err = Read(r)
if err != nil && err != io.EOF {
return nil, nil, err
}
rest, err = ioutil.ReadAll(r)
// don't confuse calling code with EOFs
if err == io.EOF {
err = nil
}
return sexpr, rest, err
}
func IsList(s Sexp) bool {
s, ok := s.(List)
return ok
}
// Read a single S-expression from buffered IO r, returning any error
// encountered. May return io.EOF if at end of r; may return a valid
// S-expression and io.EOF if the EOF was encountered at the end of
// parsing.
func Read(r *bufio.Reader) (s Sexp, err error) {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
switch c {
case '{':
enc, err := r.ReadBytes('}')
acc := make([]byte, 0, len(enc)-1)
for _, c := range enc[:len(enc)-1] {
if bytes.IndexByte(whitespaceChar, c) == -1 {
acc = append(acc, c)
}
}
str := make([]byte, base64.StdEncoding.DecodedLen(len(acc)))
n, err := base64.StdEncoding.Decode(str, acc)
if err != nil {
return nil, err
}
s, err = Read(bufio.NewReader(bytes.NewReader(str[:n])))
if err == nil || err == io.EOF {
return s, nil
} else {
return nil, err
}
case '(':
l := List{}
// skip whitespace
for {
c, err := r.ReadByte()
switch {
case c == ')':
return l, err
case bytes.IndexByte(whitespaceChar, c) == -1:
r.UnreadByte()
element, err := Read(r)
if err != nil {
return nil, err
}
l = append(l, element)
}
if err != nil {
return nil, err
}
}
default:
return readString(r, c)
}
if err != nil {
return s, err
}
panic("Can't reach here")
}
func readString(r *bufio.Reader, first byte) (s Sexp, err error) {
var displayHint []byte
if first == '[' {
c, err := r.ReadByte()
if err != nil {
return nil, err
}
displayHint, err = readSimpleString(r, c)
if err != nil {
return nil, err
}
c, err = r.ReadByte()
if err != nil {
return nil, err
}
if c != ']' {
return nil, fmt.Errorf("']' expected to end display hint; %c found", c)
}
first, _ = r.ReadByte() // let error be caught by readSimpleString
}
str, err := readSimpleString(r, first)
return Atom{Value: str, DisplayHint: displayHint}, err
}
func readSimpleString(r *bufio.Reader, first byte) (s []byte, err error) {
switch {
case bytes.IndexByte(decimalDigit, first) > -1:
return readLengthDelimited(r, first)
case first == '#':
s, err := readHex(r)
if err != nil {
return nil, err
}
return s, | {
buf := bytes.NewBuffer(nil)
a.string(buf)
return buf.String()
} | identifier_body |
dpfile.go | () {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string |
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic | {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
} | identifier_body |
dpfile.go | () {} // turn XML into diffs until you run out
dpw.Close() // write end marker and any etc., flush output
dpr := dpfile.NewReader(in, workingDir, streaming) // streaming=true readinf from stdin
for dpr.ReadSegment {} // turn diffs into XML as long as you can
dpr.Close() // wrap up
The writer implementation does some paperwork to run multiple DiffTasks at once.
There are some vestiges of support for diffing successive revs of a page
against each other (e.g., in an incremental file). This would be nice to revive
but isn't that close now.
Some potential format changes, some breaking, some not:
- append an "index" after the end marker mapping either SegmentKeys or byte ranges
to where their diffs start in the file (non-breaking)
- replace the placeholder source URL with the URL of a "manifest" file listing
everything available from the source
- replace the source names with URLs, and add (likely tab-separated) other data like
size/timestamp/crypto and non-crypto checksums
- any crypto checksum might be a tweaked mode we can run in parallel, e.g., break
input into 64kb pages then hash the hashes
- breaking, but could edit format now to make it a TSV table so it'd be sort of
nonbreaking
*/
// 386: individual values (segment lengths) need to be <2GB because of the ints
// here
func writeVarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutVarint(encBuf[:], int64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (varint)")
}
}
func writeUvarint(w io.Writer, val int) {
var encBuf [10]byte
i := binary.PutUvarint(encBuf[:], uint64(val))
_, err := w.Write(encBuf[:i])
if err != nil {
panic("failed to write number (uvarint)")
}
}
type checksum uint32
func dpchecksum(text []byte) checksum {
h := fnv.New32a()
h.Write(text)
return checksum(h.Sum32())
}
type DiffTask struct {
s diff.MatchState
source sref.SourceRef
resultBuf []byte
done chan int
}
type DPBlock struct {
Key mwxmlchunk.SegmentKey
Offs int64
}
type DPBlocks []DPBlock
type DPWriter struct {
out *bufio.Writer
zOut io.WriteCloser
sources []*mwxmlchunk.SegmentReader
lastSeg []byte
tasks []DiffTask
blocks DPBlocks
taskCh chan *DiffTask
slots int
winner int
}
type DPReader struct {
in *bufio.Reader
out *bufio.Writer
sources []io.ReaderAt
lastSeg []byte
ChangeDump bool
}
var MaxSourceLength = uint64(1e8)
func NewWriter(zOut io.WriteCloser, workingDir *os.File, sourceNames []string, lastRevOnly bool, limitToNS bool, ns int, cutMeta bool) (dpw DPWriter) {
for i, name := range sourceNames {
r, err := zip.Open(name, workingDir)
if err != nil {
panic("cannot open source: " + err.Error())
}
f := stream.NewReaderAt(r)
dpw.sources = append(
dpw.sources,
mwxmlchunk.NewSegmentReader(f, int64(i), lastRevOnly, limitToNS, ns, cutMeta),
)
// only use snipping options when reading first source
lastRevOnly = false
limitToNS = false
cutMeta = false
}
dpw.zOut = zOut
dpw.out = bufio.NewWriter(zOut)
_, err := dpw.out.WriteString("DeltaPacker\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
} |
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(d | badFormat = true
} | random_line_split |
dpfile.go | Packer\nno format URL yet\nno source URL\n\n")
if err != nil {
panic(err)
}
for _, name := range sourceNames {
// baseName is right for both URLs + Windows file paths
baseName := path.Base(filepath.Base(name))
niceOutName := zip.UnzippedName(baseName)
fmt.Fprintln(dpw.out, niceOutName)
}
err = dpw.out.WriteByte('\n')
if err != nil {
panic(err)
}
dpw.out.Flush()
dpw.slots = 100 // really a queue len, not thread count
dpw.taskCh = make(chan *DiffTask, dpw.slots)
for workerNum := 0; workerNum < runtime.NumCPU(); workerNum++ {
go doDiffTasks(dpw.taskCh)
}
dpw.tasks = make([]DiffTask, dpw.slots)
for i := range dpw.tasks {
t := &dpw.tasks[i]
t.s.Out = &bytes.Buffer{}
t.done = make(chan int, 1)
t.done <- 1
}
return
}
// a DiffTask wraps a MatchState with channel bookkeeping
func (t *DiffTask) Diff() { // really SegmentTask but arh
bOrig := t.s.B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum | {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} | conditional_block |
|
dpfile.go | .B // is truncated by Diff
t.source.Write(t.s.Out)
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(t.s.A))
t.s.Diff()
binary.Write(t.s.Out, binary.BigEndian, dpchecksum(bOrig))
select {
case t.done <- 1:
return
default:
panic("same difftask being used twice!")
}
}
func doDiffTasks(tc chan *DiffTask) {
for t := range tc {
t.Diff()
}
}
func (dpw *DPWriter) WriteSegment() bool {
// find the matching texts
b := dpw.sources[0]
a := dpw.sources[1:]
source := sref.SourceNotFound
aText := []byte(nil)
bText, key, _, revFetchErr := b.ReadNext()
if revFetchErr != nil && revFetchErr != io.EOF {
panic(revFetchErr)
}
for _, src := range a {
err := error(nil)
aText, _, source, err = src.ReadTo(key)
if err != nil && err != io.EOF {
panic(err)
}
if len(aText) > 0 {
break
}
}
// write something out
if source.Length > MaxSourceLength {
source = sref.SourceNotFound
aText = nil
}
t := &dpw.tasks[dpw.winner%dpw.slots]
<-t.done
_, err := t.s.Out.WriteTo(dpw.out)
if err != nil {
panic("failed to write output: " + err.Error())
}
t.source = source
t.s.A = append(t.s.A[:0], aText...)
t.s.B = append(t.s.B[:0], bText...)
t.s.Out.Reset()
dpw.taskCh <- t
dpw.winner++
if revFetchErr == io.EOF {
return false
}
return true
}
func (dpw *DPWriter) Close() {
for i := range dpw.tasks { // heh, we have to use i
t := &dpw.tasks[(dpw.winner+i)%dpw.slots]
<-t.done
t.s.Out.WriteTo(dpw.out)
}
close(dpw.taskCh)
sref.EOFMarker.Write(dpw.out)
dpw.out.Flush()
if dpw.zOut != nil {
dpw.zOut.Close()
}
for _, sr := range dpw.sources {
sr.Close()
}
//fmt.Println("Packed successfully")
}
func readLineOrPanic(in *bufio.Reader) string {
line, err := in.ReadString('\n')
if err != nil {
if err == io.EOF {
panic("Premature EOF reading line")
} else {
panic(err)
}
}
if len(line) > 0 {
return line[:len(line)-1] // chop off \n
}
return line
}
var safeFilenamePat *regexp.Regexp
const safeFilenameStr = "^[-a-zA-Z0-9_.]*$"
func panicOnUnsafeName(filename string) string {
if safeFilenamePat == nil {
safeFilenamePat = regexp.MustCompile(safeFilenameStr)
}
if !safeFilenamePat.MatchString(filename) {
panic(fmt.Sprint("unsafe filename: ", filename))
}
return filename
}
func NewReader(in io.Reader, workingDir *os.File, streaming bool) (dpr DPReader) {
dpr.in = bufio.NewReader(in)
formatName := readLineOrPanic(dpr.in)
expectedFormatName := "DeltaPacker"
badFormat := false
if formatName != expectedFormatName {
badFormat = true
}
formatUrl := readLineOrPanic(dpr.in)
if formatUrl != "no format URL yet" {
if formatUrl[:4] == "http" {
panic("Format has been updated. Go to " + formatUrl + " for an updated version of this utility.")
}
badFormat = true
}
if badFormat {
panic("Didn't see the expected format name in the header. Either the input isn't actually a dltp file or the format has changed you need to download a newer version of this tool.")
}
sourceUrl := readLineOrPanic(dpr.in) // discard source URL
if sourceUrl == "" {
panic("Expected a non-blank source URL line")
}
expectedBlank := readLineOrPanic(dpr.in)
if expectedBlank != "" {
panic("Expected a blank line after source URL")
}
// open the first source, a.k.a. the output, for writing:
dirName := workingDir.Name()
outputName := panicOnUnsafeName(readLineOrPanic(dpr.in))
outputPath := path.Join(dirName, outputName)
var outFile *os.File
var err error
if streaming {
outFile = os.Stdout
} else {
outFile, err = os.Create(outputPath)
if err != nil {
panic("cannot create output")
}
}
dpr.out = bufio.NewWriter(outFile)
// open all sources for reading, including the output
for sourceName := outputName; sourceName != ""; sourceName = panicOnUnsafeName(readLineOrPanic(dpr.in)) {
if streaming && sourceName == outputName {
dpr.sources = append(dpr.sources, nil) // don't read from me!
continue
}
sourcePath := path.Join(dirName, sourceName)
zipReader, err := zip.Open(sourcePath, workingDir)
if err != nil {
panic("could not open source " + sourceName + ": " + err.Error())
}
dpr.sources = append(dpr.sources, zipReader)
}
if len(dpr.sources) < 2 {
panic("Need at least one source besides the output")
}
// we've read the blank line so we're ready for business
return
}
var readBuf []byte // not parallel-safe, but reading isn't threaded
func (dpr *DPReader) ReadSegment() bool { // writes to self.out
source := sref.ReadSource(dpr.in)
if source == sref.EOFMarker {
if dpr.ChangeDump {
_, err := dpr.out.Write(dpr.lastSeg)
if err != nil {
panic("couldn't write expanded file")
}
}
return false
}
if source.Length > MaxSourceLength {
//fmt.Println("Max source len set to", MaxSourceLength)
panic("input file (segment) using too large a source")
}
readBuf = alloc.Bytes(readBuf, int(source.Length))
orig := readBuf
// TODO: validate source number, start, length validity here
if source == sref.PreviousSegment {
panic("segment chaining not implemented")
} else if source != sref.SourceNotFound {
if int(source.SourceNumber) >= len(dpr.sources) {
panic("too-high source number provided")
}
srcFile := dpr.sources[source.SourceNumber]
_, err := srcFile.ReadAt(orig, int64(source.Start))
if err != nil {
//fmt.Println("error reading from source", source)
panic(err)
}
}
var sourceCksum checksum
err := binary.Read(dpr.in, binary.BigEndian, &sourceCksum)
if err != nil {
panic("couldn't read expected checksum")
}
text := diff.Patch(orig, dpr.in)
cksum := dpchecksum(text)
var fileCksum checksum
err = binary.Read(dpr.in, binary.BigEndian, &fileCksum)
if err != nil {
panic("couldn't read expected checksum")
}
if cksum != fileCksum {
origCksum := dpchecksum(orig)
panicMsg := ""
if origCksum == sourceCksum {
if sourceCksum == 0 { // no source checksum
panicMsg = "checksum mismatch. it's possible you don't have the original file this diff was created against, or it could be a bug in dltp."
} else {
panicMsg = "sorry; it looks like source file you have isn't original file this diff was created against."
}
} else {
panicMsg = "checksum mismatch. this looks likely to be a bug in dltp."
}
os.Remove("dltp-error-report.txt")
crashReport, err := os.Create("dltp-error-report.txt")
if err == nil {
// wish filenames etc were available here
fmt.Fprintln(crashReport, panicMsg)
fmt.Fprintln(crashReport, "SourceRef:", source)
crashReport.WriteString("Original text:\n\n")
crashReport.Write(orig)
crashReport.WriteString("\n\nPatched output:\n\n")
crashReport.Write(text)
crashReport.Close()
panicMsg += " wrote additional information to dltp-error-report.txt"
} else {
panicMsg += " couldn't write additional information (" + err.Error() + ")"
}
panic(panicMsg)
}
// write if not ChangeDump or if changed or if this is preamble
if !dpr.ChangeDump || !bytes.Equal(text, orig) || dpr.lastSeg == nil {
_, err := dpr.out.Write(text)
if err != nil {
panic("couldn't write expanded file")
}
}
dpr.lastSeg = text
return true
}
func (dpr *DPReader) | Close | identifier_name |
|
main.rs | struct Args {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid | s >= max_children { break; }
}
q.return_test(active_test.id, history);
| { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_run | conditional_block |
main.rs | ::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
q.save_latest(statistics.take_snapshot());
if canceled.load(Ordering::SeqCst) {
println!("User interrupted fuzzing. Going to shut down....");
break;
}
}
server.sync();
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
q.save_latest(statistics.take_snapshot());
// done with the main fuzzing part
statistics.done();
// final bitmap
let bitmap = analysis.get_bitmap();
// print inputs from queue
if args.print_queue {
println!("\n");
println!("Formated Inputs and Coverage!");
for entry in q.entries() {
q.print_entry_summary(entry.id, &mutations);
config.print_inputs(&entry.inputs);
println!("Achieved Coverage:");
let coverage = fuzz_one(server, &entry.inputs);
config.print_test_coverage(&coverage);
println!("\n");
}
}
if args.print_total_cov {
println!("Total Coverage:");
config.print_bitmap(&bitmap);
}
// print statistics
print!("{}", statistics.get_final_snapshot().unwrap());
println!("Bitmap: {:?}", bitmap);
}
fn fuzz_one(server: &mut FuzzServer, input: &[u8]) -> Vec<u8> {
let mut mutator = mutation::identity(input);
if let Run::Done(count, _) = server.run(&mut mutator, 0) {
assert_eq!(count, 1);
} else { assert!(false); }
server.sync();
let feedback = server.pop_coverage().expect("should get exactly one coverage back!");
feedback.data.to_vec()
}
fn get_time() -> std::time::Duration {
let raw = time::get_ti | me();
std::time::Duration::new(raw.sec as u64, raw.nsec as u32)
} | identifier_body |
|
main.rs | struct | {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config);
} else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
| Args | identifier_name |
main.rs | struct Args {
toml_config: String,
print_queue: bool,
print_total_cov: bool,
skip_deterministic: bool,
skip_non_deterministic: bool,
random: bool,
input_directory: Option<String>,
output_directory: String,
test_mode: bool,
jqf: analysis::JQFLevel,
fuzz_server_id: String,
seed_cycles: usize,
}
fn main() {
let matches = App::new(APP_NAME).version(VERSION).author(AUTHOR)
.about("AFL-style fuzzer specialized for fuzzing RTL circuits.")
.version_short("v")
.arg(Arg::with_name("TOML")
.help("TOML file describing the circuit being fuzzed")
.required(true).index(1))
.arg(Arg::with_name("print_queue")
.long("print-queue").short("q")
.help("Prints queue content at the end of a fuzzing run."))
.arg(Arg::with_name("print_total_cov")
.long("print-total-cov").short("c")
.help("Prints the union coverage at the end of a fuzzing run."))
.arg(Arg::with_name("skip_deterministic")
.long("skip-deterministic").short("d")
.help("Skip all deterministic mutation strategies."))
.arg(Arg::with_name("skip_non_deterministic")
.long("skip-non-deterministic").short("n")
.help("Skip all non-deterministic mutation strategies."))
.arg(Arg::with_name("random")
.long("random").short("r")
.help("Generate independent random inputs instead of using the fuzzing algorithm."))
.arg(Arg::with_name("jqf_level")
.long("jqf-level").short("j")
.help("Select which level of JQF to apply.")
.takes_value(true).possible_values(&["0", "1", "2"])
.default_value("2"))
.arg(Arg::with_name("seed_cycles")
.long("seed-cycles")
.help("The starting seed consits of all zeros for N cycles.")
.takes_value(true)
.default_value("5"))
.arg(Arg::with_name("input_directory")
.long("input-directory").short("i").value_name("DIR")
.takes_value(true)
.help("The output directory of a previous run from which to resume."))
.arg(Arg::with_name("output_directory")
.long("output-directory").short("o").value_name("DIR")
.takes_value(true)
.help("Used to log this session. Must be empty!")
.required(true))
.arg(Arg::with_name("test_mode")
.long("test-mode").short("t")
.help("Test the fuzz server with known input/coverage pairs."))
.arg(Arg::with_name("fuzz_server_id")
.long("server-id").short("s")
.help("The id of the fuzz server isntance to connect to.")
.takes_value(true).default_value("0"))
.get_matches();
let args = Args {
toml_config: matches.value_of("TOML").unwrap().to_string(),
print_queue: matches.is_present("print_queue"),
print_total_cov: matches.is_present("print_total_cov"),
skip_deterministic: matches.is_present("skip_deterministic"),
skip_non_deterministic: matches.is_present("skip_non_deterministic"),
random: matches.is_present("random"),
input_directory: matches.value_of("input_directory").map(|s| s.to_string()),
output_directory: matches.value_of("output_directory").unwrap().to_string(),
test_mode: matches.is_present("test_mode"),
jqf: analysis::JQFLevel::from_arg(matches.value_of("jqf_level").unwrap()),
fuzz_server_id: matches.value_of("fuzz_server_id").unwrap().to_string(),
seed_cycles: matches.value_of("seed_cycles").unwrap().parse::<usize>().unwrap(),
};
// "Ctrl + C" handling
let canceled = Arc::new(AtomicBool::new(false));
let c = canceled.clone();
ctrlc::set_handler(move || { c.store(true, Ordering::SeqCst); })
.expect("Error setting Ctrl-C handler");
// load test config
let config = config::Config::from_file(WORD_SIZE, &args.toml_config);
let test_size = config.get_test_size();
config.print_header();
// test runner
let srv_config = BufferedFuzzServerConfig {
test_size : test_size,
max_cycles : 200,
test_buffer_size : 64 * 1024 * 16,
coverage_buffer_size : 64 * 1024 * 16,
buffer_count: 3,
max_runs: 1024 * 16,
};
println!("Test Buffer: {} KiB", srv_config.test_buffer_size / 1024);
println!("Coverage Buffer: {} KiB", srv_config.coverage_buffer_size / 1024);
println!("Max Inputs: {}", srv_config.test_buffer_size / 16 / 3);
| } else {
fuzzer(args, canceled, config, test_size, &mut server);
}
}
fn test_mode(server: &mut FuzzServer, config: &config::Config) {
println!("⚠️ Test mode selected! ⚠️");
test::test_fuzz_server(server, config);
}
fn fuzzer(args: Args, canceled: Arc<AtomicBool>, config: config::Config,
test_size: run::TestSize, server: &mut FuzzServer) {
// starting seed
let start_cycles = args.seed_cycles;
let starting_seed = vec![0u8; test_size.input * start_cycles];
// analysis
let ranges = config.gen_ranges();
//println!("ranges:]\n{:?}", ranges);
let mut analysis = analysis::Analysis::new(test_size, ranges, args.jqf);
let seed_coverage = fuzz_one(server, &starting_seed);
let seed_analysis_res = analysis.run(start_cycles as u16, &seed_coverage);
if seed_analysis_res.is_invalid {
println!("❌ Invalid seed!");
}
if !seed_analysis_res.is_interesting {
println!("⚠️ Uninteresting seed! Might be 🐟!");
}
// TODO: support multiple seeds
// mutation
let mut_config = mutation::MutationScheduleConfig {
skip_deterministic: args.skip_deterministic,
skip_non_deterministic: args.skip_non_deterministic,
independent_random: args.random,
};
if mut_config.independent_random {
println!("⚠️ Mutation disabled. Generating independent random inputs! ⚠️");
}
let mutations = mutation::MutationSchedule::initialize(mut_config, test_size, config.get_inputs());
// statistics
let start_ts = get_time();
let mut statistics = stats::Stats::new(mutations.get_names(), start_ts.clone(), analysis.get_bitmap());
// queue
let mut q = queue::Queue::create(
&args.output_directory,
&starting_seed,
seed_analysis_res.new_cov,
!seed_analysis_res.is_invalid,
start_ts,
config.to_json(),
statistics.take_snapshot(),
&seed_coverage);
let max_entries = 1_000_000;
let max_children = 100_000; // TODO: better mechanism to determine length of the havoc stage
println!("fuzzing a maximum of {} queue entries", max_entries);
for _ in 0..max_entries {
let active_test = q.get_next_test();
let mut history = active_test.mutation_history;
let mut new_runs : u64 = 0;
q.print_entry_summary(active_test.id, &mutations);
while let Some(mut mutator) = mutations.get_mutator(&mut history, &active_test.inputs) {
// println!("running {} mutation", mutations.get_name(mutator.id()));
let mut done = false;
let mut start = 0;
while !done {
match server.run(&mut mutator, start) {
Run::Done(runs, cycles) => {
statistics.update_test_count(mutator.id().id, runs as u64, cycles);
new_runs += runs as u64;
done = true;
}
Run::Yield(ii) => { start = ii; }
}
while let Some(feedback) = server.pop_coverage() {
let rr = analysis.run(feedback.cycles, &feedback.data);
if rr.is_interesting {
if rr.is_invalid { println!("invalid input...."); }
let (info, interesting_input) = server.get_info(feedback.id);
let now = get_time();
statistics.update_new_discovery(info.mutator.id, now, analysis.get_bitmap());
q.add_new_test(interesting_input, info, rr.new_cov,
!rr.is_invalid, now,
statistics.take_snapshot(), &feedback.data);
}
}
}
if new_runs >= max_children { break; }
}
q.return_test(active_test.id, history);
q | let server_dir = format!("{}/{}", FPGA_DIR, args.fuzz_server_id);
let mut server = find_one_fuzz_server(&server_dir, srv_config).expect("failed to find a fuzz server");
if args.test_mode {
test_mode(&mut server, &config); | random_line_split |
http_remote.rs | header_map: Headers,
}
impl<'cfg> HttpRegistry<'cfg> {
/// Creates a HTTP-rebased remote registry for `source_id`.
///
/// * `name` --- Name of a path segment where `.crate` tarballs and the
/// registry index are stored. Expect to be unique.
pub fn new(
source_id: SourceId,
config: &'cfg Config,
name: &str,
) -> CargoResult<HttpRegistry<'cfg>> {
let url = source_id.url().as_str();
// Ensure the url ends with a slash so we can concatenate paths.
if !url.ends_with('/') {
anyhow::bail!("sparse registry url must end in a slash `/`: {url}")
}
assert!(source_id.is_sparse());
let url = url
.strip_prefix("sparse+")
.expect("sparse registry needs sparse+ prefix")
.into_url()
.expect("a url with the sparse+ stripped should still be valid");
Ok(HttpRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id,
config,
url,
multi: Multi::new(),
multiplexing: false,
downloads: Downloads {
next: 0,
pending: HashMap::new(),
pending_paths: HashSet::new(),
sleeping: SleepTracker::new(),
results: HashMap::new(),
progress: RefCell::new(Some(Progress::with_style(
"Fetch",
ProgressStyle::Indeterminate,
config,
))),
downloads_finished: 0,
blocking_calls: 0,
},
fresh: HashSet::new(),
requested_update: false,
fetch_started: false,
registry_config: None,
auth_required: false,
login_url: None,
auth_error_headers: vec![],
quiet: false,
})
}
/// Splits HTTP `HEADER: VALUE` to a tuple.
fn | (buf: &[u8]) -> Option<(&str, &str)> {
if buf.is_empty() {
return None;
}
let buf = std::str::from_utf8(buf).ok()?.trim_end();
// Don't let server sneak extra lines anywhere.
if buf.contains('\n') {
return None;
}
let (tag, value) = buf.split_once(':')?;
let value = value.trim();
Some((tag, value))
}
/// Setup the necessary works before the first fetch gets started.
///
/// This is a no-op if called more than one time.
fn start_fetch(&mut self) -> CargoResult<()> {
if self.fetch_started {
// We only need to run the setup code once.
return Ok(());
}
self.fetch_started = true;
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
self.multiplexing = self.config.http_config()?.multiplexing.unwrap_or(true);
self.multi
.pipelining(false, self.multiplexing)
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood the server with connections
self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results {
let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
}
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is | handle_http_header | identifier_name |
http_remote.rs | self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results {
let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
}
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is never stored in the index cache",
))),
}
}
/// Moves failed [`Download`]s that are ready to retry to the pending queue.
fn add_sleepers(&mut self) -> CargoResult<()> {
for (dl, handle) in self.downloads.sleeping.to_retry() {
let mut handle = self.multi.add(handle)?;
handle.set_token(dl.token)?;
let is_new = self.downloads.pending_paths.insert(dl.path.to_path_buf());
assert!(is_new, "path queued for download more than once");
let previous = self.downloads.pending.insert(dl.token, (dl, handle));
assert!(previous.is_none(), "dl token queued more than once");
}
Ok(())
}
}
impl<'cfg> RegistryData for HttpRegistry<'cfg> {
fn prepare(&self) -> CargoResult<()> {
Ok(())
}
fn index_path(&self) -> &Filesystem {
&self.index_path
}
fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path {
self.config.assert_package_cache_locked(path)
}
fn is_updated(&self) -> bool {
self.requested_update
}
fn load(
&mut self,
_root: &Path,
path: &Path,
index_version: Option<&str>,
) -> Poll<CargoResult<LoadResponse>> {
trace!("load: {}", path.display());
if let Some(_token) = self.downloads.pending_paths.get(path) {
debug!("dependency is still pending: {}", path.display());
return Poll::Pending;
}
if let Some(index_version) = index_version {
trace!(
"local cache of {} is available at version `{}`",
path.display(),
index_version
);
if self.is_fresh(path) {
return Poll::Ready(Ok(LoadResponse::CacheValid));
}
} else if self.fresh.contains(path) {
// We have no cached copy of this file, and we already downloaded it.
debug!(
"cache did not contain previously downloaded file {}",
path.display()
);
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if self.config.offline() || self.config.cli_unstable().no_index_update {
// Return NotFound in offline mode when the file doesn't exist in the cache.
// If this results in resolution failure, the resolver will suggest
// removing the --offline flag.
return Poll::Ready(Ok(LoadResponse::NotFound));
}
if let Some(result) = self.downloads.results.remove(path) {
let result =
result.with_context(|| format!("download of {} failed", path.display()))?;
let is_new = self.fresh.insert(path.to_path_buf());
assert!(
is_new,
"downloaded the index file `{}` twice",
path.display()
);
// The status handled here need to be kept in sync with the codes handled
// in `handle_completed_downloads`
match result.response_code {
StatusCode::Success => {
let response_index_version = if let Some(etag) = result.header_map.etag { | format!("{}: {}", ETAG, etag)
} else if let Some(lm) = result.header_map.last_modified {
format!("{}: {}", LAST_MODIFIED, lm) | random_line_split |
|
http_remote.rs | header_map: Headers,
}
impl<'cfg> HttpRegistry<'cfg> {
/// Creates a HTTP-rebased remote registry for `source_id`.
///
/// * `name` --- Name of a path segment where `.crate` tarballs and the
/// registry index are stored. Expect to be unique.
pub fn new(
source_id: SourceId,
config: &'cfg Config,
name: &str,
) -> CargoResult<HttpRegistry<'cfg>> {
let url = source_id.url().as_str();
// Ensure the url ends with a slash so we can concatenate paths.
if !url.ends_with('/') {
anyhow::bail!("sparse registry url must end in a slash `/`: {url}")
}
assert!(source_id.is_sparse());
let url = url
.strip_prefix("sparse+")
.expect("sparse registry needs sparse+ prefix")
.into_url()
.expect("a url with the sparse+ stripped should still be valid");
Ok(HttpRegistry {
index_path: config.registry_index_path().join(name),
cache_path: config.registry_cache_path().join(name),
source_id,
config,
url,
multi: Multi::new(),
multiplexing: false,
downloads: Downloads {
next: 0,
pending: HashMap::new(),
pending_paths: HashSet::new(),
sleeping: SleepTracker::new(),
results: HashMap::new(),
progress: RefCell::new(Some(Progress::with_style(
"Fetch",
ProgressStyle::Indeterminate,
config,
))),
downloads_finished: 0,
blocking_calls: 0,
},
fresh: HashSet::new(),
requested_update: false,
fetch_started: false,
registry_config: None,
auth_required: false,
login_url: None,
auth_error_headers: vec![],
quiet: false,
})
}
/// Splits HTTP `HEADER: VALUE` to a tuple.
fn handle_http_header(buf: &[u8]) -> Option<(&str, &str)> {
if buf.is_empty() {
return None;
}
let buf = std::str::from_utf8(buf).ok()?.trim_end();
// Don't let server sneak extra lines anywhere.
if buf.contains('\n') {
return None;
}
let (tag, value) = buf.split_once(':')?;
let value = value.trim();
Some((tag, value))
}
/// Setup the necessary works before the first fetch gets started.
///
/// This is a no-op if called more than one time.
fn start_fetch(&mut self) -> CargoResult<()> {
if self.fetch_started {
// We only need to run the setup code once.
return Ok(());
}
self.fetch_started = true;
// We've enabled the `http2` feature of `curl` in Cargo, so treat
// failures here as fatal as it would indicate a build-time problem.
self.multiplexing = self.config.http_config()?.multiplexing.unwrap_or(true);
self.multi
.pipelining(false, self.multiplexing)
.with_context(|| "failed to enable multiplexing/pipelining in curl")?;
// let's not flood the server with connections
self.multi.set_max_host_connections(2)?;
if !self.quiet {
self.config
.shell()
.status("Updating", self.source_id.display_index())?;
}
Ok(())
}
/// Checks the results inside the [`HttpRegistry::multi`] handle, and
/// updates relevant state in [`HttpRegistry::downloads`] accordingly.
fn handle_completed_downloads(&mut self) -> CargoResult<()> | let (mut download, handle) = self.downloads.pending.remove(&token).unwrap();
let was_present = self.downloads.pending_paths.remove(&download.path);
assert!(
was_present,
"expected pending_paths to contain {:?}",
download.path
);
let mut handle = self.multi.remove(handle)?;
let data = download.data.take();
let url = self.full_url(&download.path);
let result = match download.retry.r#try(|| {
result.with_context(|| format!("failed to download from `{}`", url))?;
let code = handle.response_code()?;
// Keep this list of expected status codes in sync with the codes handled in `load`
let code = match code {
200 => StatusCode::Success,
304 => StatusCode::NotModified,
401 => StatusCode::Unauthorized,
404 | 410 | 451 => StatusCode::NotFound,
_ => {
return Err(HttpNotSuccessful::new_from_handle(
&mut handle,
&url,
data,
download.header_map.take().all,
)
.into());
}
};
Ok((data, code))
}) {
RetryResult::Success((data, code)) => Ok(CompletedDownload {
response_code: code,
data,
header_map: download.header_map.take(),
}),
RetryResult::Err(e) => Err(e),
RetryResult::Retry(sleep) => {
debug!(target: "network", "download retry {:?} for {sleep}ms", download.path);
self.downloads.sleeping.push(sleep, (download, handle));
continue;
}
};
self.downloads.results.insert(download.path, result);
self.downloads.downloads_finished += 1;
}
self.downloads.tick()?;
Ok(())
}
/// Constructs the full URL to download a index file.
fn full_url(&self, path: &Path) -> String {
// self.url always ends with a slash.
format!("{}{}", self.url, path.display())
}
/// Check if an index file of `path` is up-to-date.
///
/// The `path` argument is the same as in [`RegistryData::load`].
fn is_fresh(&self, path: &Path) -> bool {
if !self.requested_update {
trace!(
"using local {} as user did not request update",
path.display()
);
true
} else if self.config.cli_unstable().no_index_update {
trace!("using local {} in no_index_update mode", path.display());
true
} else if self.config.offline() {
trace!("using local {} in offline mode", path.display());
true
} else if self.fresh.contains(path) {
trace!("using local {} as it was already fetched", path.display());
true
} else {
debug!("checking freshness of {}", path.display());
false
}
}
/// Get the cached registry configuration, if it exists.
fn config_cached(&mut self) -> CargoResult<Option<&RegistryConfig>> {
if self.registry_config.is_some() {
return Ok(self.registry_config.as_ref());
}
let config_json_path = self
.assert_index_locked(&self.index_path)
.join(RegistryConfig::NAME);
match fs::read(&config_json_path) {
Ok(raw_data) => match serde_json::from_slice(&raw_data) {
Ok(json) => {
self.registry_config = Some(json);
}
Err(e) => tracing::debug!("failed to decode cached config.json: {}", e),
},
Err(e) => {
if e.kind() != ErrorKind::NotFound {
tracing::debug!("failed to read config.json cache: {}", e)
}
}
}
Ok(self.registry_config.as_ref())
}
/// Get the registry configuration from either cache or remote.
fn config(&mut self) -> Poll<CargoResult<&RegistryConfig>> {
debug!("loading config");
let index_path = self.assert_index_locked(&self.index_path);
let config_json_path = index_path.join(RegistryConfig::NAME);
if self.is_fresh(Path::new(RegistryConfig::NAME)) && self.config_cached()?.is_some() {
return Poll::Ready(Ok(self.registry_config.as_ref().unwrap()));
}
match ready!(self.load(Path::new(""), Path::new(RegistryConfig::NAME), None)?) {
LoadResponse::Data {
raw_data,
index_version: _,
} => {
trace!("config loaded");
self.registry_config = Some(serde_json::from_slice(&raw_data)?);
if paths::create_dir_all(&config_json_path.parent().unwrap()).is_ok() {
if let Err(e) = fs::write(&config_json_path, &raw_data) {
tracing::debug!("failed to write config.json cache: {}", e);
}
}
Poll::Ready(Ok(self.registry_config.as_ref().unwrap()))
}
LoadResponse::NotFound => {
Poll::Ready(Err(anyhow::anyhow!("config.json not found in registry")))
}
LoadResponse::CacheValid => Poll::Ready(Err(crate::util::internal(
"config.json is | {
assert_eq!(
self.downloads.pending.len(),
self.downloads.pending_paths.len()
);
// Collect the results from the Multi handle.
let results = {
let mut results = Vec::new();
let pending = &mut self.downloads.pending;
self.multi.messages(|msg| {
let token = msg.token().expect("failed to read token");
let (_, handle) = &pending[&token];
if let Some(result) = msg.result_for(handle) {
results.push((token, result));
};
});
results
};
for (token, result) in results { | identifier_body |
lexer.rs | continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target |
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory | {
&self.recog
} | identifier_body |
lexer.rs | ` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.recog
}
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF | {
self.hit_eof = true;
} | conditional_block |
|
lexer.rs | mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn set_text(&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
#[inline]
#[allow(unused_labels)]
fn next_token(&mut self) -> <Self::TF as TokenFactory<'input>>::Tok {
assert!(self.input.is_some());
let _marker = self.input().mark();
'outer: loop {
if self.hit_eof {
self.emit_eof();
break;
}
self.token = None;
self.channel = LEXER_DEFAULT_TOKEN_CHANNEL;
self.token_start_column = self
.interpreter
.as_ref()
.unwrap()
.get_char_position_in_line();
self.token_start_line = self.interpreter.as_ref().unwrap().get_line();
self.text = None;
let index = self.input().index();
self.token_start_char_index = index;
'inner: loop {
self.token_type = TOKEN_INVALID_TYPE;
// detach from self, to allow self to be passed deeper
let mut interpreter = self.interpreter.take().unwrap();
// let mut input = self.input.take().unwrap();
let result = interpreter.match_token(self.mode, self);
self.interpreter = Some(interpreter);
let ttype = result.unwrap_or_else(|err| {
// println!("error, recovering");
notify_listeners(&mut self.error_listeners.borrow_mut(), &err, self);
self.interpreter
.as_mut()
.unwrap()
.recover(err, self.input.as_mut().unwrap());
LEXER_SKIP
});
// self.input = Some(input)
if self.input().la(1) == super::int_stream::EOF {
self.hit_eof = true;
}
if self.token_type == TOKEN_INVALID_TYPE {
self.token_type = ttype;
}
if self.token_type == LEXER_SKIP {
continue 'outer;
}
if self.token_type != LEXER_MORE {
break;
}
}
if self.token.is_none() {
self.emit();
break;
}
}
self.input().release(_marker);
self.token.take().unwrap()
}
fn get_line(&self) -> isize {
self.current_pos.line.get()
}
fn get_char_position_in_line(&self) -> isize {
self.current_pos.char_position_in_line.get()
}
fn get_input_stream(&mut self) -> Option<&mut dyn IntStream> {
match &mut self.input {
None => None,
Some(x) => Some(x as _),
}
}
fn get_source_name(&self) -> String {
self.input
.as_ref()
.map(|it| it.get_source_name())
.unwrap_or("<none>".to_string())
}
// fn set_token_factory<'c: 'b>(&mut self, f: &'c TokenFactory) {
// self.factory = f;
// }
fn get_token_factory(&self) -> &'input TF {
self.factory
}
}
#[cold]
#[inline(never)]
fn notify_listeners<'input, T, Input, TF>(
liseners: &mut Vec<Box<dyn ErrorListener<'input, BaseLexer<'input, T, Input, TF>>>>,
e: &ANTLRError,
lexer: &BaseLexer<'input, T, Input, TF>,
) where
T: LexerRecog<'input, BaseLexer<'input, T, Input, TF>> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
let inner = lexer
.input
.as_ref()
.unwrap()
.get_text(lexer.token_start_char_index, lexer.get_char_index());
let text = format!(
"token recognition error at: '{}'",
TF::get_data(inner).to_display()
);
for listener in liseners.iter_mut() { | listener.syntax_error(
lexer,
None,
lexer.token_start_line, | random_line_split |
|
lexer.rs | a continuation of the current one
fn more(&mut self);
/// Tells lexer to completely ignore and not emit current token.
fn skip(&mut self);
#[doc(hidden)]
fn reset(&mut self);
#[doc(hidden)]
fn get_interpreter(&self) -> Option<&LexerATNSimulator>;
}
/// **! Usually generated by ANTLR !**
///
/// This trait combines everything that can be used to extend Lexer behavior
pub trait LexerRecog<'a, T: Recognizer<'a>>: Actions<'a, T> + Sized + 'static {
/// Callback to extend emit behavior
fn before_emit(_lexer: &mut T) {}
}
/// Default implementation of Lexer
///
/// Public fields in this struct are intended to be used by embedded actions
#[allow(missing_docs)]
pub struct BaseLexer<
'input,
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input> = CommonTokenFactory,
> {
/// `LexerATNSimulator` instance of this lexer
pub interpreter: Option<Box<LexerATNSimulator>>,
/// `CharStream` used by this lexer
pub input: Option<Input>,
recog: T,
factory: &'input TF,
error_listeners: RefCell<Vec<Box<dyn ErrorListener<'input, Self>>>>,
pub token_start_char_index: isize,
pub token_start_line: isize,
pub token_start_column: isize,
current_pos: Rc<LexerPosition>,
/// Overrides token type emitted by lexer for current token
pub token_type: isize,
/// Make it `Some` to override token that is currently being generated by lexer
pub token: Option<TF::Tok>,
hit_eof: bool,
/// Channel lexer is currently assigning tokens to
pub channel: isize,
/// stack of modes, which is used for pushMode,popMode lexer actions
pub mode_stack: Vec<usize>,
/// Mode lexer is currently in
pub mode: usize,
/// Make it `Some` to override text for token that is currently being generated by lexer
pub text: Option<<TF::Data as ToOwned>::Owned>,
}
#[derive(Debug)]
pub(crate) struct LexerPosition {
pub(crate) line: Cell<isize>,
pub(crate) char_position_in_line: Cell<isize>,
}
impl<'input, T, Input, TF> Deref for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Target = T;
fn deref(&self) -> &Self::Target {
&self.recog
}
}
impl<'input, T, Input, TF> DerefMut for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.recog
}
}
impl<'input, T, Input, TF> Recognizer<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type Node = EmptyContextType<'input, TF>;
fn sempred(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) -> bool {
<T as Actions<'input, Self>>::sempred(_localctx, rule_index, action_index, self)
}
fn action(
&mut self,
_localctx: Option<&<Self::Node as ParserNodeType<'input>>::Type>,
rule_index: isize,
action_index: isize,
) {
<T as Actions<'input, Self>>::action(_localctx, rule_index, action_index, self)
}
}
/// Default lexer mode id
pub const LEXER_DEFAULT_MODE: usize = 0;
/// Special token type to indicate that lexer should continue current token on next iteration
/// see `Lexer::more()`
pub const LEXER_MORE: isize = -2;
/// Special token type to indicate that lexer should not return current token
/// usually used to skip whitespaces and comments
/// see `Lexer::skip()`
pub const LEXER_SKIP: isize = -3;
#[doc(inline)]
pub use super::token::TOKEN_DEFAULT_CHANNEL as LEXER_DEFAULT_TOKEN_CHANNEL;
#[doc(inline)]
pub use super::token::TOKEN_HIDDEN_CHANNEL as LEXER_HIDDEN;
pub(crate) const LEXER_MIN_CHAR_VALUE: isize = 0x0000;
pub(crate) const LEXER_MAX_CHAR_VALUE: isize = 0x10FFFF;
impl<'input, T, Input, TF> BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
fn emit_token(&mut self, token: TF::Tok) {
self.token = Some(token);
}
fn emit(&mut self) {
<T as LexerRecog<Self>>::before_emit(self);
let stop = self.get_char_index() - 1;
let token = self.factory.create(
Some(self.input.as_mut().unwrap()),
self.token_type,
self.text.take(),
self.channel,
self.token_start_char_index,
stop,
self.token_start_line,
self.token_start_column,
);
self.emit_token(token);
}
fn emit_eof(&mut self) {
let token = self.factory.create(
None::<&mut Input>,
super::int_stream::EOF,
None,
LEXER_DEFAULT_TOKEN_CHANNEL,
self.get_char_index(),
self.get_char_index() - 1,
self.get_line(),
self.get_char_position_in_line(),
);
self.emit_token(token)
}
/// Current position in input stream
pub fn get_char_index(&self) -> isize {
self.input.as_ref().unwrap().index()
}
/// Current token text
pub fn get_text<'a>(&'a self) -> Cow<'a, TF::Data>
where
'input: 'a,
{
self.text
.as_ref()
.map(|it| Borrowed(it.borrow()))
// .unwrap_or("")
.unwrap_or_else(|| {
let text = self
.input
.as_ref()
.unwrap()
.get_text(self.token_start_char_index, self.get_char_index() - 1);
TF::get_data(text)
})
}
/// Used from lexer actions to override text of the token that will be emitted next
pub fn | (&mut self, _text: <TF::Data as ToOwned>::Owned) {
self.text = Some(_text);
}
// fn get_all_tokens(&mut self) -> Vec<TF::Tok> { unimplemented!() }
// fn get_char_error_display(&self, _c: char) -> String { unimplemented!() }
/// Add error listener
pub fn add_error_listener(&mut self, listener: Box<dyn ErrorListener<'input, Self>>) {
self.error_listeners.borrow_mut().push(listener);
}
/// Remove and drop all error listeners
pub fn remove_error_listeners(&mut self) {
self.error_listeners.borrow_mut().clear();
}
/// Creates new lexer instance
pub fn new_base_lexer(
input: Input,
interpreter: LexerATNSimulator,
recog: T,
factory: &'input TF,
) -> Self {
let mut lexer = Self {
interpreter: Some(Box::new(interpreter)),
input: Some(input),
recog,
factory,
error_listeners: RefCell::new(vec![Box::new(ConsoleErrorListener {})]),
token_start_char_index: 0,
token_start_line: 0,
token_start_column: 0,
current_pos: Rc::new(LexerPosition {
line: Cell::new(1),
char_position_in_line: Cell::new(0),
}),
token_type: super::token::TOKEN_INVALID_TYPE,
text: None,
token: None,
hit_eof: false,
channel: super::token::TOKEN_DEFAULT_CHANNEL,
// token_factory_source_pair: None,
mode_stack: Vec::new(),
mode: self::LEXER_DEFAULT_MODE,
};
let pos = lexer.current_pos.clone();
lexer.interpreter.as_mut().unwrap().current_pos = pos;
lexer
}
}
impl<'input, T, Input, TF> TokenAware<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory<'input>,
{
type TF = TF;
}
impl<'input, T, Input, TF> TokenSource<'input> for BaseLexer<'input, T, Input, TF>
where
T: LexerRecog<'input, Self> + 'static,
Input: CharStream<TF::From>,
TF: TokenFactory | set_text | identifier_name |
app_2_wl.py | down("[Tweetru ministre gui eub walu wergu yaram ](https://twitter.com/MinisteredelaS1)")
st.sidebar.markdown("[Booleb xéeti mbir ak màndargaay jumtukaayu ](https://github.com/maelfabien/COVID-19-Senegal)")
st.sidebar.markdown("---")
st.sidebar.header("Jokko ak wa ministere")
st.sidebar.markdown("Ministre gui eub walu wergu yaram ak boolem boko / Fann Residence")
st.sidebar.markdown("Rue Aimé Césaire, Dakar, Senegal")
st.sidebar.markdown("+221 800 00 50 50 - [email protected]")
st.sidebar.markdown("---")
st.sidebar.markdown("Ñi ka derale moye [Maël Fabien](https://maelfabien.github.io/) ak [Dakar Institute of Technology](https://dit.sn/)")
# I. Dataframe
df = pd.read_csv("COVID_Dakar.csv", sep=";")
df['Date'] = pd.to_datetime(df['Date'], dayfirst=True)
#st.write(df)
evol_cases = df[['Date', 'Positif', 'Negatif', 'Décédé', 'Guéri']].groupby("Date").sum().cumsum()
st.subheader("Ci tënkk")
total_positif = evol_cases.tail(1)['Positif'][0]
total_negatif = evol_cases.tail(1)['Negatif'][0]
total_decede = evol_cases.tail(1)['Décédé'][0]
total_geuri = evol_cases.tail(1)['Guéri'][0]
st.markdown("Limu ñi feebar: <span style='font-size:1.5em;'>%s</span>"%(total_positif - total_geuri), unsafe_allow_html=True)
st.markdown("Limu ñi faatu: <span style='font-size:1.5em;'>%s</span>"%(total_decede), unsafe_allow_html=True)
st.markdown("Limu ñi wer: <span style='font-size:1.5em;'>%s</span>"%(total_geuri), unsafe_allow_html=True)
st.markdown("dayob ñi wer : <span style='font-size:1.5em;'>%s</span>"%(np.round(total_geuri / total_positif, 3) * 100), unsafe_allow_html=True)
st.markdown("dàyob yoqute ñi feebar bis bu ay : <span style='font-size:1.5em;'>%s</span>"%(np.round(pd.DataFrame(np.sqrt(evol_cases['Positif'].pct_change(periods=2)+1)-1).tail(1)['Positif'][0] * 100, 2)), unsafe_allow_html=True)
st.markdown("Mboolem ñi ame Koronaa: <span style='font-size:1.5em;'>%s</span>"%(total_positif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu te ñu mùcc ci feebar bi: <span style='font-size:1.5em;'>%s</span>"%(total_negatif), unsafe_allow_html=True)
st.markdown("Mboolem ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(total_positif + total_negatif), unsafe_allow_html=True)
st.markdown("dayob ñi ame feebar bi ci ñi ñu saytu: <span style='font-size:1.5em;'>%s</span>"%(np.round(total_positif / (total_positif + total_negatif), 3) * 100), unsafe_allow_html=True)
# II. Map
st.markdown("---")
st.subheader("ñi ame feebar bi fu ñu féete")
shapefile = 'app/ne_110m_admin_0_countries.shp'
#Read shapefile using Geopandas
gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]
gdf.columns = ['country', 'country_code', 'geometry']
gdf = gdf[gdf['country']=="Senegal"]
grid_crs=gdf.crs
gdf_json = json.loads(gdf.to_json())
grid = json.dumps(gdf_json)
cities = pd.read_csv("city_coordinates.csv", index_col=0)
def find_lat(x):
try:
return float(cities[cities['Ville'] == x]['Latitude'])
except TypeError:
return None
def find_long(x):
try:
return float(cities[cities['Ville'] == x]['Longitude'])
except TypeError:
return None
summary = df[['Positif', 'Ville']].groupby("Ville").sum().reset_index()
summary['latitude'] = summary['Ville'].apply(lambda x: find_lat(x))
summary['longitude'] = summary['Ville'].apply(lambda x: find_long(x))
geosource = GeoJSONDataSource(geojson = grid)
pointsource = ColumnDataSource(summary)
hover = HoverTool(
tooltips = [('Ville', '@Ville'), ('Limu ñi ame Koronaa ', '@Positif')]
)
#Create figure object.
p = figure(plot_height = 550 , plot_width = 700, tools=[hover, 'pan', 'wheel_zoom'])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.visible = False
p.yaxis.visible = False
p.outline_line_color = None
patch = p.patches('xs','ys', source = geosource, fill_color = '#fff7bc',
line_color = 'black', line_width = 0.35, fill_alpha = 1,
hover_fill_color="#fec44f")
#Add patch renderer to figure.
patch = p.patches('xs','ys', source = geosource, fill_color = 'lightgrey',
line_color = 'black', line_width = 0.25, fill_alpha = 1)
p.circle('longitude','latitude',source=pointsource, size=15)
st.bokeh_chart(p)
# III. Map
st.markdown("---")
st.subheader(" Yoqute limu ñi ame Koronaa ci Senegal")
highlight = alt.selection(type='single', on='mouseover',
fields=['Positif'], nearest=True)
chart = alt.Chart(evol_cases.reset_index()).mark_line(point=True, strokeWidth=5).encode(
x='Date:T',
y='Positif:Q',
tooltip='Positif:Q'
).add_selection(
highlight
).properties(height=400, width=700)
st.write(chart.interactive())
st.markdown("---")
st.subheader("Mingalé rewu Pays-Bas")
st.write("Senegaal rewle bigua xamanetané limu way-dëkké dafa méggo ak rewu Pays-bas (Fukk ak jurrom benn million), ba taxna ab mégele meuna dox di diganté ñaari dëkk yoyé. Doneté yoqute Jangorëy Koronaa gui ci rewum Senegaal la geune yéxé ci sinu dioni yalla taye, luñu setlu ci ni Jangoro gui di doxé diarna bayi xel wayé itameu lathe na niou xalate ci.Fi gua xamené mome leu rewu Senegaal tolu ci Jangorëy Koronaa dafa mengo ci fukki fan ak juroom ci guinaw fi rew mi di Pays-Bas Tolone,wayé xayma gogu boye seteu juroom ñaari faney le guir rew pays-bas té Senegaal fukki fan ak juroom ñeet. Lim yi aju ci rewu Pays-Bas ñuguike jeulé ci Wikipedia: https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_the_Netherlands")
df_nl = pd.read_csv("df_nl.csv")
plt.figure(figsize=(16,10))
plt.plot(df_nl['Netherlands'], linestyle="--", linewidth=5, label="Pays-Bas")
plt.plot(df_nl['Senegal'],label="Sénégal", linewidth=5)
plt.figtext(.5,.9,'Evolution des cas au Sénégal et aux Pays-Bas', fontsize=30, ha='center')
plt.legend()
st.pyplot(plt)
# IV. Contamination
| st.markdown("---")
st.subheader("Tassarok Jangorogui")
st.write("Ñugui xamé ñeneu ñu jeulé Jangoroji ci ñu jugué bimeu rew, ci niit ñu feebar yigua xamené ño waleu ñeni niit.Limu ñigua xamné ño ameu Jangoroji té jeuléko ci biir rewmi, moye waleu gui geuna ragalu ci walanté Jangoroji..")
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.