file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
jsonast.go | // oneOf:
// - External ARM resources
// oneOf:
// allOf:
// $ref: {{ base resource for ARM specific stuff like locks, deployments, etc }}
// oneOf:
// - ARM specific resources. I'm not 100% sure why...
//
// allOf acts like composition which composites each schema from the child oneOf with the base reference from allOf.
func (scanner *SchemaScanner) GenerateDefinitions(ctx context.Context, schema *gojsonschema.SubSchema, opts ...BuilderOption) ([]astmodel.TypeDefiner, error) {
ctx, span := tab.StartSpan(ctx, "GenerateDefinitions")
defer span.End()
for _, opt := range opts {
if err := opt(scanner); err != nil {
return nil, err
}
}
// get initial topic from ID and Title:
url := schema.ID.GetUrl()
if schema.Title == nil {
return nil, fmt.Errorf("Given schema has no Title")
}
rootName := *schema.Title
rootGroup, err := groupOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract group for schema: %w", err)
}
rootVersion, err := versionOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract version for schema: %w", err)
}
rootPackage := astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(rootGroup),
scanner.idFactory.CreatePackageNameFromVersion(rootVersion))
rootTypeName := astmodel.NewTypeName(*rootPackage, rootName)
_, err = generateDefinitionsFor(ctx, scanner, rootTypeName, false, url, schema)
if err != nil {
return nil, err
}
// produce the results
var defs []astmodel.TypeDefiner
for _, def := range scanner.definitions {
defs = append(defs, def)
}
return defs, nil
}
// DefaultTypeHandlers will create a default map of JSONType to AST transformers
func DefaultTypeHandlers() map[SchemaType]TypeHandler {
return map[SchemaType]TypeHandler{
Array: arrayHandler,
OneOf: oneOfHandler,
AnyOf: anyOfHandler,
AllOf: allOfHandler,
Ref: refHandler,
Object: objectHandler,
Enum: enumHandler,
String: fixedTypeHandler(astmodel.StringType, "string"),
Int: fixedTypeHandler(astmodel.IntType, "int"),
Number: fixedTypeHandler(astmodel.FloatType, "number"),
Bool: fixedTypeHandler(astmodel.BoolType, "bool"),
}
}
func enumHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, "enumHandler")
defer span.End()
// Default to a string base type
baseType := astmodel.StringType
for _, t := range []SchemaType{Bool, Int, Number, String} {
if schema.Types.Contains(string(t)) {
bt, err := getPrimitiveType(t)
if err != nil {
return nil, err
}
baseType = bt
}
}
var values []astmodel.EnumValue
for _, v := range schema.Enum {
id := scanner.idFactory.CreateIdentifier(v, astmodel.Exported)
values = append(values, astmodel.EnumValue{Identifier: id, Value: v})
}
enumType := astmodel.NewEnumType(baseType, values)
return enumType, nil
}
func fixedTypeHandler(typeToReturn astmodel.Type, handlerName string) TypeHandler {
return func(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, handlerName+"Handler")
defer span.End()
return typeToReturn, nil
}
}
func objectHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "objectHandler")
defer span.End()
fields, err := getFields(ctx, scanner, schema)
if err != nil {
return nil, err
}
// if we _only_ have an 'additionalProperties' field, then we are making
// a dictionary-like type, and we won't generate a struct; instead, we
// will just use the 'additionalProperties' type directly
if len(fields) == 1 && fields[0].FieldName() == "additionalProperties" {
return fields[0].FieldType(), nil
}
structDefinition := astmodel.NewStructType(fields...)
return structDefinition, nil
}
func generateFieldDefinition(ctx context.Context, scanner *SchemaScanner, prop *gojsonschema.SubSchema) (*astmodel.FieldDefinition, error) {
fieldName := scanner.idFactory.CreateFieldName(prop.Property, astmodel.Exported)
schemaType, err := getSubSchemaType(prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
propType, err := scanner.RunHandler(ctx, schemaType, prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
field := astmodel.NewFieldDefinition(fieldName, prop.Property, propType)
return field, nil
}
func getFields(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) ([]*astmodel.FieldDefinition, error) {
ctx, span := tab.StartSpan(ctx, "getFields")
defer span.End()
var fields []*astmodel.FieldDefinition
for _, prop := range schema.PropertiesChildren {
fieldDefinition, err := generateFieldDefinition(ctx, scanner, prop)
if err != nil |
// add documentation
fieldDefinition = fieldDefinition.WithDescription(prop.Description)
// add validations
isRequired := false
for _, required := range schema.Required {
if prop.Property == required {
isRequired = true
break
}
}
if isRequired {
fieldDefinition = fieldDefinition.MakeRequired()
} else {
fieldDefinition = fieldDefinition.MakeOptional()
}
fields = append(fields, fieldDefinition)
}
// see: https://json-schema.org/understanding-json-schema/reference/object.html#properties
if schema.AdditionalProperties == nil {
// if not specified, any additional properties are allowed (TODO: tell all Azure teams this fact and get them to update their API definitions)
// for now we aren't following the spec 100% as it pollutes the generated code
// only generate this field if there are no other fields:
if len(fields) == 0 {
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsField := astmodel.NewFieldDefinition("additionalProperties", "additionalProperties", astmodel.NewStringMapType(astmodel.AnyType))
fields = append(fields, additionalPropsField)
}
} else if schema.AdditionalProperties != false {
// otherwise, if not false then it is a type for all additional fields
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsType, err := scanner.RunHandlerForSchema(ctx, schema.AdditionalProperties.(*gojsonschema.SubSchema))
if err != nil {
return nil, err
}
additionalPropsField := astmodel.NewFieldDefinition(astmodel.FieldName("additionalProperties"), "additionalProperties", astmodel.NewStringMapType(additionalPropsType))
fields = append(fields, additionalPropsField)
}
return fields, nil
}
func refHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "refHandler")
defer span.End()
url := schema.Ref.GetUrl()
if url.Fragment == expressionFragment {
// skip expressions
return nil, nil
}
// make a new topic based on the ref URL
name, err := objectTypeOf(url)
if err != nil {
return nil, err
}
group, err := groupOf(url)
if err != nil {
return nil, err
}
version, err := versionOf(url)
if err != nil {
return nil, err
}
isResource := isResource(url)
// produce a usable name:
typeName := astmodel.NewTypeName(
*astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(group),
scanner.idFactory.CreatePackageNameFromVersion(version)),
scanner.idFactory.CreateIdentifier(name, astmodel.Exported))
return generateDefinitionsFor(ctx, scanner, typeName, isResource, url, schema.RefSchema)
}
func generateDefinitionsFor(ctx context.Context, scanner *SchemaScanner, typeName *astmodel.TypeName, isResource bool, url *url.URL, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
| {
return nil, err
} | conditional_block |
jsonast.go | }}
// oneOf:
// - External ARM resources
// oneOf:
// allOf:
// $ref: {{ base resource for ARM specific stuff like locks, deployments, etc }}
// oneOf:
// - ARM specific resources. I'm not 100% sure why...
//
// allOf acts like composition which composites each schema from the child oneOf with the base reference from allOf.
func (scanner *SchemaScanner) GenerateDefinitions(ctx context.Context, schema *gojsonschema.SubSchema, opts ...BuilderOption) ([]astmodel.TypeDefiner, error) {
ctx, span := tab.StartSpan(ctx, "GenerateDefinitions")
defer span.End()
for _, opt := range opts {
if err := opt(scanner); err != nil {
return nil, err
}
}
// get initial topic from ID and Title:
url := schema.ID.GetUrl()
if schema.Title == nil {
return nil, fmt.Errorf("Given schema has no Title")
}
rootName := *schema.Title
rootGroup, err := groupOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract group for schema: %w", err)
}
rootVersion, err := versionOf(url)
if err != nil {
return nil, fmt.Errorf("Unable to extract version for schema: %w", err)
}
rootPackage := astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(rootGroup),
scanner.idFactory.CreatePackageNameFromVersion(rootVersion))
rootTypeName := astmodel.NewTypeName(*rootPackage, rootName)
_, err = generateDefinitionsFor(ctx, scanner, rootTypeName, false, url, schema)
if err != nil {
return nil, err
}
// produce the results
var defs []astmodel.TypeDefiner
for _, def := range scanner.definitions {
defs = append(defs, def)
}
return defs, nil
}
// DefaultTypeHandlers will create a default map of JSONType to AST transformers
func DefaultTypeHandlers() map[SchemaType]TypeHandler {
return map[SchemaType]TypeHandler{
Array: arrayHandler,
OneOf: oneOfHandler,
AnyOf: anyOfHandler,
AllOf: allOfHandler,
Ref: refHandler,
Object: objectHandler,
Enum: enumHandler,
String: fixedTypeHandler(astmodel.StringType, "string"),
Int: fixedTypeHandler(astmodel.IntType, "int"),
Number: fixedTypeHandler(astmodel.FloatType, "number"),
Bool: fixedTypeHandler(astmodel.BoolType, "bool"),
}
}
func enumHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, "enumHandler")
defer span.End()
// Default to a string base type
baseType := astmodel.StringType
for _, t := range []SchemaType{Bool, Int, Number, String} {
if schema.Types.Contains(string(t)) {
bt, err := getPrimitiveType(t)
if err != nil {
return nil, err
}
baseType = bt
}
}
var values []astmodel.EnumValue
for _, v := range schema.Enum {
id := scanner.idFactory.CreateIdentifier(v, astmodel.Exported)
values = append(values, astmodel.EnumValue{Identifier: id, Value: v})
}
enumType := astmodel.NewEnumType(baseType, values)
return enumType, nil
}
func fixedTypeHandler(typeToReturn astmodel.Type, handlerName string) TypeHandler {
return func(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
_, span := tab.StartSpan(ctx, handlerName+"Handler")
defer span.End()
return typeToReturn, nil
}
}
func objectHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "objectHandler")
defer span.End()
fields, err := getFields(ctx, scanner, schema)
if err != nil {
return nil, err
}
// if we _only_ have an 'additionalProperties' field, then we are making
// a dictionary-like type, and we won't generate a struct; instead, we
// will just use the 'additionalProperties' type directly
if len(fields) == 1 && fields[0].FieldName() == "additionalProperties" {
return fields[0].FieldType(), nil
}
structDefinition := astmodel.NewStructType(fields...)
return structDefinition, nil
}
func generateFieldDefinition(ctx context.Context, scanner *SchemaScanner, prop *gojsonschema.SubSchema) (*astmodel.FieldDefinition, error) {
fieldName := scanner.idFactory.CreateFieldName(prop.Property, astmodel.Exported)
schemaType, err := getSubSchemaType(prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
propType, err := scanner.RunHandler(ctx, schemaType, prop)
if _, ok := err.(*UnknownSchemaError); ok {
// if we don't know the type, we still need to provide the property, we will just provide open interface
field := astmodel.NewFieldDefinition(fieldName, prop.Property, astmodel.AnyType)
return field, nil
}
if err != nil {
return nil, err
}
field := astmodel.NewFieldDefinition(fieldName, prop.Property, propType)
return field, nil
}
func getFields(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) ([]*astmodel.FieldDefinition, error) {
ctx, span := tab.StartSpan(ctx, "getFields")
defer span.End()
var fields []*astmodel.FieldDefinition
for _, prop := range schema.PropertiesChildren {
fieldDefinition, err := generateFieldDefinition(ctx, scanner, prop)
if err != nil {
return nil, err
}
// add documentation
fieldDefinition = fieldDefinition.WithDescription(prop.Description)
// add validations
isRequired := false
for _, required := range schema.Required {
if prop.Property == required { | if isRequired {
fieldDefinition = fieldDefinition.MakeRequired()
} else {
fieldDefinition = fieldDefinition.MakeOptional()
}
fields = append(fields, fieldDefinition)
}
// see: https://json-schema.org/understanding-json-schema/reference/object.html#properties
if schema.AdditionalProperties == nil {
// if not specified, any additional properties are allowed (TODO: tell all Azure teams this fact and get them to update their API definitions)
// for now we aren't following the spec 100% as it pollutes the generated code
// only generate this field if there are no other fields:
if len(fields) == 0 {
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsField := astmodel.NewFieldDefinition("additionalProperties", "additionalProperties", astmodel.NewStringMapType(astmodel.AnyType))
fields = append(fields, additionalPropsField)
}
} else if schema.AdditionalProperties != false {
// otherwise, if not false then it is a type for all additional fields
// TODO: for JSON serialization this needs to be unpacked into "parent"
additionalPropsType, err := scanner.RunHandlerForSchema(ctx, schema.AdditionalProperties.(*gojsonschema.SubSchema))
if err != nil {
return nil, err
}
additionalPropsField := astmodel.NewFieldDefinition(astmodel.FieldName("additionalProperties"), "additionalProperties", astmodel.NewStringMapType(additionalPropsType))
fields = append(fields, additionalPropsField)
}
return fields, nil
}
func refHandler(ctx context.Context, scanner *SchemaScanner, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
ctx, span := tab.StartSpan(ctx, "refHandler")
defer span.End()
url := schema.Ref.GetUrl()
if url.Fragment == expressionFragment {
// skip expressions
return nil, nil
}
// make a new topic based on the ref URL
name, err := objectTypeOf(url)
if err != nil {
return nil, err
}
group, err := groupOf(url)
if err != nil {
return nil, err
}
version, err := versionOf(url)
if err != nil {
return nil, err
}
isResource := isResource(url)
// produce a usable name:
typeName := astmodel.NewTypeName(
*astmodel.NewLocalPackageReference(
scanner.idFactory.CreateGroupName(group),
scanner.idFactory.CreatePackageNameFromVersion(version)),
scanner.idFactory.CreateIdentifier(name, astmodel.Exported))
return generateDefinitionsFor(ctx, scanner, typeName, isResource, url, schema.RefSchema)
}
func generateDefinitionsFor(ctx context.Context, scanner *SchemaScanner, typeName *astmodel.TypeName, isResource bool, url *url.URL, schema *gojsonschema.SubSchema) (astmodel.Type, error) {
s | isRequired = true
break
}
}
| random_line_split |
main.js | out blurb, change html, change active state, fade in
$(".pill").on('click', function(event) {
event.preventDefault();
// deactivate all pills
deactivateAll();
// activate THIS one
$(this).addClass("active");
// fade out title and text, change each mid fade
var id = checkId($(this).attr("id"));
$("#title").fadeOut(150, function() {
$("#title").text(id[0]);
});
$("#text").fadeOut(150, function() {
$("#text").text(id[1]);
});
// fade back in
$("#title").fadeIn();
$("#text").fadeIn();
});
// Shut off all menu pills before reactivating selected one
function deactivateAll() {
$.each($(".pill"), function() {
$(this).removeClass("active");
});
}
// Return id of selected pill
function checkId(id) {
if(id === "about") {
return ["About Us", aboutText];
}
else if(id === "links") {
return ["Links", linksText];
}
else if(id === "contact") {
return ["Contact", contactText];
}
else if(id === "legal") {
return ["Legal", legalText];
}
}
// ----------- QUESTION JS -------------
var solved = false;
// Hint Button Click Listener
$("#hintButton").on('click', function(event) {
if(solved != true) {
event.preventDefault();
$("#resultBoxTitle").addClass("text-info");
$("#resultBoxTitle").text("Hint:")
$("#hintBox").fadeTo(500, 1);
}
});
// Submit Button Click Listener
// $("#questionSubmit").on('click', function() {
// if(solved != true) {
// checkAnswer();
// }
// });
// Next Page Button Click Listener.
// $("#nextPageButton").on('click', function() {
// if(solved == true) {
// nextPage();
// }
// });
// Check if answer is correct or not, return true if correct, false if wrong
function checkAnswer() |
// Answer is correct, highlight answer label green, change resultBox h4 text to Green "Correct!", fadeTo box
function correct(label) {
$(label).css("background", "rgba(88, 217, 88, 0.6)");
$("#resultBoxTitle").css("color", "green");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Correct!");
$("#resultBoxText").text("");
$("#resultBox").fadeTo(500, 1);
// reveal "next question" button that has "href" to next page (PAGINATE)
$("#nextPageButton").delay(700).fadeTo(500, 1);
}
// Answer is wrong, highlight answer label red, Change resultBox elements, show result box
function wrong(label) {
$(label).css("background", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").css("color", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Incorrect: ");
$("#resultBox").fadeTo(500, 1);
}
// If question is solved, get current url, increment last path element, and redirect
// function nextPage(){
// var url = window.location.pathname,
// parts = url.split("/"),
// questionNum = parts[parts.length-2],
// newQuestion = Number(questionNum) + 1,
// url = "http://127.0.0.1:8000/question/" + String(newQuestion);
// window.location.replace(url);
// }
// ----------- Profile JS -------------
// Statistics Charts
// https://mdbootstrap.com/docs/jquery/javascript/charts/
// Right/Wrong Answers
var accuracy = JSON.parse(document.getElementById('accuracy').textContent);
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [accuracy['correctAnswers'], accuracy['wrongAnswers']],
backgroundColor: ["#18c45a", "#F7464A"],
hoverBackgroundColor: ["#15d14e", "#FF5A5E"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var subject_distro = JSON.parse(document.getElementById('subject_distribution').textContent);
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
data: [subject_distro["Math_Distro"], subject_distro["Reading_Distro"], subject_distro["Science_Distro"]],
backgroundColor: ["#5860a6", "#b02a2a", "#4ec267"],
hoverBackgroundColor: ["#7981c7", "#d16464", "#6bd682"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var test_distro = JSON.parse(document.getElementById('test_distribution').textContent);
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
data: [test_distro["ACT_Distro"], test_distro["SAT_Distro"], test_distro["GRE_Distro"]],
backgroundColor: ["#4542f5", "#f542cb", "#f5de33"],
hoverBackgroundColor: ["#4272f5", "#f069f0", "#ffec61"]
}]
},
options: {
responsive: true
}
});
// ------------------------- STATS PAGE ------------------------- //
// --------------- FILTER BY TEST TYPE --------------- //
// Right/Wrong Answers
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [12, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'bar',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
label: '# of Questions Answered',
data: [15, 4, 22],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)', 'rgba(255, 99, 132, 0.2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)', 'rgba(255,99,132,1)'],
borderWidth: 1
}]
},
options: {
responsive: true,
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'radar',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
label: "# of Questions Answered",
data: [22, 42, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
| {
var radios = $("input")
var labels = $("label")
for(var i = 0; i < radios.length; i++){
if(radios[i].checked) {
if ($(radios[i]).hasClass("correct")) {
correct(labels[i]);
solved = true;
} else {
wrong(labels[i]);
}
}
}
} | identifier_body |
main.js | Fade out blurb, change html, change active state, fade in
$(".pill").on('click', function(event) {
event.preventDefault();
// deactivate all pills
deactivateAll();
// activate THIS one
$(this).addClass("active");
// fade out title and text, change each mid fade
var id = checkId($(this).attr("id"));
$("#title").fadeOut(150, function() {
$("#title").text(id[0]);
});
$("#text").fadeOut(150, function() {
$("#text").text(id[1]);
});
// fade back in
$("#title").fadeIn();
$("#text").fadeIn();
});
// Shut off all menu pills before reactivating selected one
function deactivateAll() {
$.each($(".pill"), function() {
$(this).removeClass("active");
});
}
// Return id of selected pill
function checkId(id) {
if(id === "about") {
return ["About Us", aboutText];
}
else if(id === "links") {
return ["Links", linksText];
}
else if(id === "contact") {
return ["Contact", contactText];
}
else if(id === "legal") {
return ["Legal", legalText];
}
}
// ----------- QUESTION JS -------------
var solved = false;
// Hint Button Click Listener
$("#hintButton").on('click', function(event) {
if(solved != true) {
event.preventDefault();
$("#resultBoxTitle").addClass("text-info");
$("#resultBoxTitle").text("Hint:")
$("#hintBox").fadeTo(500, 1);
}
});
// Submit Button Click Listener
// $("#questionSubmit").on('click', function() {
// if(solved != true) {
// checkAnswer();
// }
// });
// Next Page Button Click Listener.
// $("#nextPageButton").on('click', function() {
// if(solved == true) {
// nextPage();
// }
// });
// Check if answer is correct or not, return true if correct, false if wrong
function checkAnswer() {
var radios = $("input")
var labels = $("label")
for(var i = 0; i < radios.length; i++){
if(radios[i].checked) {
if ($(radios[i]).hasClass("correct")) {
correct(labels[i]);
solved = true;
} else {
wrong(labels[i]);
}
}
}
}
// Answer is correct, highlight answer label green, change resultBox h4 text to Green "Correct!", fadeTo box
function | (label) {
$(label).css("background", "rgba(88, 217, 88, 0.6)");
$("#resultBoxTitle").css("color", "green");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Correct!");
$("#resultBoxText").text("");
$("#resultBox").fadeTo(500, 1);
// reveal "next question" button that has "href" to next page (PAGINATE)
$("#nextPageButton").delay(700).fadeTo(500, 1);
}
// Answer is wrong, highlight answer label red, Change resultBox elements, show result box
function wrong(label) {
$(label).css("background", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").css("color", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Incorrect: ");
$("#resultBox").fadeTo(500, 1);
}
// If question is solved, get current url, increment last path element, and redirect
// function nextPage(){
// var url = window.location.pathname,
// parts = url.split("/"),
// questionNum = parts[parts.length-2],
// newQuestion = Number(questionNum) + 1,
// url = "http://127.0.0.1:8000/question/" + String(newQuestion);
// window.location.replace(url);
// }
// ----------- Profile JS -------------
// Statistics Charts
// https://mdbootstrap.com/docs/jquery/javascript/charts/
// Right/Wrong Answers
var accuracy = JSON.parse(document.getElementById('accuracy').textContent);
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [accuracy['correctAnswers'], accuracy['wrongAnswers']],
backgroundColor: ["#18c45a", "#F7464A"],
hoverBackgroundColor: ["#15d14e", "#FF5A5E"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var subject_distro = JSON.parse(document.getElementById('subject_distribution').textContent);
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
data: [subject_distro["Math_Distro"], subject_distro["Reading_Distro"], subject_distro["Science_Distro"]],
backgroundColor: ["#5860a6", "#b02a2a", "#4ec267"],
hoverBackgroundColor: ["#7981c7", "#d16464", "#6bd682"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var test_distro = JSON.parse(document.getElementById('test_distribution').textContent);
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
data: [test_distro["ACT_Distro"], test_distro["SAT_Distro"], test_distro["GRE_Distro"]],
backgroundColor: ["#4542f5", "#f542cb", "#f5de33"],
hoverBackgroundColor: ["#4272f5", "#f069f0", "#ffec61"]
}]
},
options: {
responsive: true
}
});
// ------------------------- STATS PAGE ------------------------- //
// --------------- FILTER BY TEST TYPE --------------- //
// Right/Wrong Answers
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [12, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'bar',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
label: '# of Questions Answered',
data: [15, 4, 22],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)', 'rgba(255, 99, 132, 0.2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)', 'rgba(255,99,132,1)'],
borderWidth: 1
}]
},
options: {
responsive: true,
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'radar',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
label: "# of Questions Answered",
data: [22, 42, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
| correct | identifier_name |
main.js | else if(id === "links") {
return ["Links", linksText];
}
else if(id === "contact") {
return ["Contact", contactText];
}
else if(id === "legal") {
return ["Legal", legalText];
}
}
// ----------- QUESTION JS -------------
var solved = false;
// Hint Button Click Listener
$("#hintButton").on('click', function(event) {
if(solved != true) {
event.preventDefault();
$("#resultBoxTitle").addClass("text-info");
$("#resultBoxTitle").text("Hint:")
$("#hintBox").fadeTo(500, 1);
}
});
// Submit Button Click Listener
// $("#questionSubmit").on('click', function() {
// if(solved != true) {
// checkAnswer();
// }
// });
// Next Page Button Click Listener.
// $("#nextPageButton").on('click', function() {
// if(solved == true) {
// nextPage();
// }
// });
// Check if answer is correct or not, return true if correct, false if wrong
function checkAnswer() {
var radios = $("input")
var labels = $("label")
for(var i = 0; i < radios.length; i++){
if(radios[i].checked) {
if ($(radios[i]).hasClass("correct")) {
correct(labels[i]);
solved = true;
} else {
wrong(labels[i]);
}
}
}
}
// Answer is correct, highlight answer label green, change resultBox h4 text to Green "Correct!", fadeTo box
function correct(label) {
$(label).css("background", "rgba(88, 217, 88, 0.6)");
$("#resultBoxTitle").css("color", "green");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Correct!");
$("#resultBoxText").text("");
$("#resultBox").fadeTo(500, 1);
// reveal "next question" button that has "href" to next page (PAGINATE)
$("#nextPageButton").delay(700).fadeTo(500, 1);
}
// Answer is wrong, highlight answer label red, Change resultBox elements, show result box
function wrong(label) {
$(label).css("background", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").css("color", "rgba(238, 21, 21, 0.52)");
$("#resultBoxTitle").removeClass("text-info");
$("#resultBoxTitle").text("Incorrect: ");
$("#resultBox").fadeTo(500, 1);
}
// If question is solved, get current url, increment last path element, and redirect
// function nextPage(){
// var url = window.location.pathname,
// parts = url.split("/"),
// questionNum = parts[parts.length-2],
// newQuestion = Number(questionNum) + 1,
// url = "http://127.0.0.1:8000/question/" + String(newQuestion);
// window.location.replace(url);
// }
// ----------- Profile JS -------------
// Statistics Charts
// https://mdbootstrap.com/docs/jquery/javascript/charts/
// Right/Wrong Answers
var accuracy = JSON.parse(document.getElementById('accuracy').textContent);
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [accuracy['correctAnswers'], accuracy['wrongAnswers']],
backgroundColor: ["#18c45a", "#F7464A"],
hoverBackgroundColor: ["#15d14e", "#FF5A5E"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var subject_distro = JSON.parse(document.getElementById('subject_distribution').textContent);
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
data: [subject_distro["Math_Distro"], subject_distro["Reading_Distro"], subject_distro["Science_Distro"]],
backgroundColor: ["#5860a6", "#b02a2a", "#4ec267"],
hoverBackgroundColor: ["#7981c7", "#d16464", "#6bd682"]
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var test_distro = JSON.parse(document.getElementById('test_distribution').textContent);
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
data: [test_distro["ACT_Distro"], test_distro["SAT_Distro"], test_distro["GRE_Distro"]],
backgroundColor: ["#4542f5", "#f542cb", "#f5de33"],
hoverBackgroundColor: ["#4272f5", "#f069f0", "#ffec61"]
}]
},
options: {
responsive: true
}
});
// ------------------------- STATS PAGE ------------------------- //
// --------------- FILTER BY TEST TYPE --------------- //
// Right/Wrong Answers
var ctxP = document.getElementById("accuracyPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'pie',
data: {
labels: ["Correct", "Incorrect"],
datasets: [{
data: [12, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("subjectDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'bar',
data: {
labels: ["Math", "Reading", "Science"],
datasets: [{
label: '# of Questions Answered',
data: [15, 4, 22],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)', 'rgba(255, 99, 132, 0.2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)', 'rgba(255,99,132,1)'],
borderWidth: 1
}]
},
options: {
responsive: true,
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
}
});
// How many Questions answered from each test
var ctxP = document.getElementById("testDistroPieChart").getContext('2d');
var myPieChart = new Chart(ctxP, {
type: 'radar',
data: {
labels: ["ACT", "SAT", "GRE"],
datasets: [{
label: "# of Questions Answered",
data: [22, 42, 15],
backgroundColor: ['rgba(105, 0, 132, .2)', 'rgba(0, 137, 132, .2)'],
borderColor: ['rgba(200, 99, 132, .7)', 'rgba(0, 10, 130, .7)']
}]
},
options: {
responsive: true
}
});
// --------------- FILTER BY TEST TYPE --------------- //
// Accuracy Over Time Line Chart
var ctxL = document.getElementById("accuracyOverTimeChart").getContext('2d');
var myLineChart = new Chart(ctxL, {
type: 'line',
data: {
labels: ["January", "February", "March", "April", "May", "June", "July"],
datasets: [{
label: "My First dataset",
data: [65, 59, 80, 81, 56, 55, 40],
backgroundColor: ['rgba(105, 0, 132, .2)',
],
borderColor: ['rgba(200, 99, 132, .7)',
],
borderWidth: 2
}]
},
options: {
responsive: true
}
});
|
// Total Accuracy Pie Chart
var ctxP = document.getElementById("totalAccuracyPieChart").getContext('2d'); | random_line_split |
|
search.py | param word: String
:return: Stemmed word
"""
return stem(word)
def sanitize(text, stop_word_list):
"""
Reads a text, remove stop words, stem the words.
:param text: String
:return: List of words
"""
# convert the text into Unicode
text = unicode(text)
#print(type(text))
# replace dot with space
text = text.translate({ord("."): ord(" ")})
# replace dash with space
text = text.translate({ord("-"): ord(" ")})
# split the text on white-space
words = text.split()
sanitized_words = []
for w in words:
# ignore numbers
if w.isnumeric():
continue
# print("Word (Before Punctuation): " + w)
# remove punctuation
# Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
# w = w.translate(None, string.punctuation)
# The above method does not work for Unicode strings
# Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505
# print(type(w))
# replace punctuations with None
w = w.translate({ord(c): None for c in string.punctuation})
w = w.lower()
# print("Word (After Punctuation): "+w)
# Note: Remove stop-words before Stemming, or else the stop-word
# matching will not work.
# If the word is in Stop Word List
try:
i = stop_word_list.index(w.lower())
# skip further processing of word loop
# print("Stop Word Removed: "+w)
continue
except ValueError:
pass
w = stemm_word(w)
# hack, hack, hack
if w == '':
continue
# add the sanitized word into return list
sanitized_words.append(w)
return sanitized_words
def get_links(query_terms):
"""
Get all the links which contains the terms in the query string.
:param query_terms as list of strings
:return
"""
# the set of links all of which contains all the terms in the query string
final_links = None
for term in query_terms:
# get all links containing the term and put in a set
links = Set(index_data.get(term))
#print("\n\nQuery Term: %s" % term)
#print(links)
# special case for first iteration, because: empty & anything = empty
if final_links == None:
final_links = links
# take intersection of links set
final_links = final_links & links
#print(final_links)
# convert the Set to List and return
return list(final_links)
def rank_links(tf_idf_table, query_terms, links):
"""
Rank the list of given links in terms of relevance.
:param TF-IDF table
:param query_terms as list of strings
:param links List of URLs
:return List of URLs ranked
"""
tf = {}
for w in query_terms:
f = query_terms.count(w)
tf[w] = f
q_tf_idf = {}
for term in tf:
# if the query term is found in files
if tf_idf_table.has_key(term):
q_tf_idf[term] = tf.get(term) # * log(N/1)
else:
# if the query term is NOT found in files, set IDF to 0
q_tf_idf[term] = 0
# score of all docs for this query
doc_vals = {}
# Wiq denominator in CosSim
DWiq = 0
for t in tf_idf_table:
DWiq = q_tf_idf.get(t)
# if the term is not in query, ignore
if DWiq == None:
continue
#print("Term: %s \t\t Query TF-IDF: %d" % (t, q_tf_idf.get(t)))
idf_row = tf_idf_table.get(t)
# if the query term is in our corpus
if idf_row != None:
#print(idf_row)
# get the document frequency
df = float(len(idf_row))
#print("DF: %d" % (df))
# Wij denominator in CosSim
DWij = 0
# Numerator in CosSim
Njq = 0
# calculate values of each document specific
for doc in idf_row:
#print(doc)
# The "df" should not be processed
if doc == "df":
continue
# skip any link that are not relevant
try:
_ = links.index(doc)
except: |
#print("Doc ID: %s \tTF: %d" % (doc, idf_row.get(doc)))
DWij = idf_row.get(doc)
#Njq = q_tf_idf.get(t) * idf_row.get(doc)
if doc_vals.has_key(doc):
vals = doc_vals.get(doc)
vals["DWiq"] += pow(DWiq, 2)
vals["DWij"] += pow(DWij, 2)
vals["NWjq"] += DWij * DWiq
doc_vals[doc] = vals
else:
vals = {}
vals["DWiq"] = pow(DWiq, 2)
vals["DWij"] = pow(DWij, 2)
vals["NWjq"] = DWij * DWiq
doc_vals[doc] = vals
#print(doc_vals)
# Calculate the CosSim value
doc_score = {}
for doc in doc_vals:
#print(doc)
vals = doc_vals.get(doc)
#print(vals)
#n = vals.get("NWjq")
#d = float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(n)
#print(d)
#print(float(n/float(d)))
doc_score[doc] = float(vals.get("NWjq"))/float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(doc_score[doc])
#print(doc_score)
sorted_by_score = sorted(doc_score.items(), key=operator.itemgetter(1), reverse=True)
#print(sorted_by_score)
sorted_score = collections.OrderedDict()
for url, score in sorted_by_score:
sorted_score[url] = score
#print(sorted_score)
return sorted_score
def print_scores(ranked_list, links, topN=10):
"""
Takes an OrderdDict and print topN entries which are also in links
"""
n = topN
for url, score in ranked_list.items():
try:
_ = links.index(url)
print("Score: %f \t URL: %s" %(score, url))
n -= 1
if n <= 0:
break
except:
pass
def normalize_scores(scores):
"""
Normalize the scores so that their sum is equal to 1.
"""
#print(scores)
keys = scores.keys()
sum = 0.0
for k in keys:
#print("%06f\t" % scores.get(k)),
sum += scores.get(k)
if sum == 1.0:
return scores
new_scores = {}
for k in keys:
new_scores[k] = scores.get(k)/float(sum)
return new_scores
def calculate_pagerank_with_teleport(Graph, epsilon, iterations=50):
prev_score = None
score = None
iteration = 1
#print("No. of Nodes: %d" % len(Graph))
# Loop
while True:
#print("\nIteration: "+str(iteration))
iteration += 1
# first iteration
if score is None:
score = {}
no_of_nodes = len(Graph.keys())
for node in Graph:
score[node] = 1/float(no_of_nodes)
else:
# normal iterations
score = {}
for A in Graph:
#print("-"*10)
#print("Node: "+A)
# Reinitialize the score
score[A] = epsilon/float(no_of_nodes)
for B in Graph:
#print("Link from: "+B)
#print(Graph.get(B))
#print(Graph.get(B).index(A))
try:
_ = Graph.get(B).index(A)
#print(B+" points to "+A)
degree_B = len(Graph.get(B))
#print("Score: "+str(prev_score[B]))
#print("Degree: "+str(degree_B))
#print("Adding "+str(prev_score[B]/float(degree_B))+ " to "+str(score[A]))
score[A] += (1-epsilon) * prev_score[B]/float(degree_B)
#print("New score:"+str(score[A]))
except ValueError:
#print(A +" not in "+B)
pass
score[A] = round(score[A], ROUND_DIGITS)
#print("Before Normalization")
# | continue | random_line_split |
search.py |
def stemm_word(word):
"""
Use Porter stemmer to stem words.
:param word: String
:return: Stemmed word
"""
return stem(word)
def sanitize(text, stop_word_list):
"""
Reads a text, remove stop words, stem the words.
:param text: String
:return: List of words
"""
# convert the text into Unicode
text = unicode(text)
#print(type(text))
# replace dot with space
text = text.translate({ord("."): ord(" ")})
# replace dash with space
text = text.translate({ord("-"): ord(" ")})
# split the text on white-space
words = text.split()
sanitized_words = []
for w in words:
# ignore numbers
if w.isnumeric():
continue
# print("Word (Before Punctuation): " + w)
# remove punctuation
# Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
# w = w.translate(None, string.punctuation)
# The above method does not work for Unicode strings
# Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505
# print(type(w))
# replace punctuations with None
w = w.translate({ord(c): None for c in string.punctuation})
w = w.lower()
# print("Word (After Punctuation): "+w)
# Note: Remove stop-words before Stemming, or else the stop-word
# matching will not work.
# If the word is in Stop Word List
try:
i = stop_word_list.index(w.lower())
# skip further processing of word loop
# print("Stop Word Removed: "+w)
continue
except ValueError:
pass
w = stemm_word(w)
# hack, hack, hack
if w == '':
continue
# add the sanitized word into return list
sanitized_words.append(w)
return sanitized_words
def get_links(query_terms):
"""
Get all the links which contains the terms in the query string.
:param query_terms as list of strings
:return
"""
# the set of links all of which contains all the terms in the query string
final_links = None
for term in query_terms:
# get all links containing the term and put in a set
links = Set(index_data.get(term))
#print("\n\nQuery Term: %s" % term)
#print(links)
# special case for first iteration, because: empty & anything = empty
if final_links == None:
final_links = links
# take intersection of links set
final_links = final_links & links
#print(final_links)
# convert the Set to List and return
return list(final_links)
def rank_links(tf_idf_table, query_terms, links):
"""
Rank the list of given links in terms of relevance.
:param TF-IDF table
:param query_terms as list of strings
:param links List of URLs
:return List of URLs ranked
"""
tf = {}
for w in query_terms:
f = query_terms.count(w)
tf[w] = f
q_tf_idf = {}
for term in tf:
# if the query term is found in files
if tf_idf_table.has_key(term):
q_tf_idf[term] = tf.get(term) # * log(N/1)
else:
# if the query term is NOT found in files, set IDF to 0
q_tf_idf[term] = 0
# score of all docs for this query
doc_vals = {}
# Wiq denominator in CosSim
DWiq = 0
for t in tf_idf_table:
DWiq = q_tf_idf.get(t)
# if the term is not in query, ignore
if DWiq == None:
continue
#print("Term: %s \t\t Query TF-IDF: %d" % (t, q_tf_idf.get(t)))
idf_row = tf_idf_table.get(t)
# if the query term is in our corpus
if idf_row != None:
#print(idf_row)
# get the document frequency
df = float(len(idf_row))
#print("DF: %d" % (df))
# Wij denominator in CosSim
DWij = 0
# Numerator in CosSim
Njq = 0
# calculate values of each document specific
for doc in idf_row:
#print(doc)
# The "df" should not be processed
if doc == "df":
continue
# skip any link that are not relevant
try:
_ = links.index(doc)
except:
continue
#print("Doc ID: %s \tTF: %d" % (doc, idf_row.get(doc)))
DWij = idf_row.get(doc)
#Njq = q_tf_idf.get(t) * idf_row.get(doc)
if doc_vals.has_key(doc):
vals = doc_vals.get(doc)
vals["DWiq"] += pow(DWiq, 2)
vals["DWij"] += pow(DWij, 2)
vals["NWjq"] += DWij * DWiq
doc_vals[doc] = vals
else:
vals = {}
vals["DWiq"] = pow(DWiq, 2)
vals["DWij"] = pow(DWij, 2)
vals["NWjq"] = DWij * DWiq
doc_vals[doc] = vals
#print(doc_vals)
# Calculate the CosSim value
doc_score = {}
for doc in doc_vals:
#print(doc)
vals = doc_vals.get(doc)
#print(vals)
#n = vals.get("NWjq")
#d = float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(n)
#print(d)
#print(float(n/float(d)))
doc_score[doc] = float(vals.get("NWjq"))/float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(doc_score[doc])
#print(doc_score)
sorted_by_score = sorted(doc_score.items(), key=operator.itemgetter(1), reverse=True)
#print(sorted_by_score)
sorted_score = collections.OrderedDict()
for url, score in sorted_by_score:
sorted_score[url] = score
#print(sorted_score)
return sorted_score
def print_scores(ranked_list, links, topN=10):
"""
Takes an OrderdDict and print topN entries which are also in links
"""
n = topN
for url, score in ranked_list.items():
try:
_ = links.index(url)
print("Score: %f \t URL: %s" %(score, url))
n -= 1
if n <= 0:
break
except:
pass
def normalize_scores(scores):
"""
Normalize the scores so that their sum is equal to 1.
"""
#print(scores)
keys = scores.keys()
sum = 0.0
for k in keys:
#print("%06f\t" % scores.get(k)),
sum += scores.get(k)
if sum == 1.0:
return scores
new_scores = {}
for k in keys:
new_scores[k] = scores.get(k)/float(sum)
return new_scores
def calculate_pagerank_with_teleport(Graph, epsilon, iterations=50):
prev_score = None
score = None
iteration = 1
#print("No. of Nodes: %d" % len(Graph))
# Loop
while True:
#print("\nIteration: "+str(iteration))
iteration += 1
# first iteration
if score is None:
score = {}
no_of_nodes = len(Graph.keys())
for node in Graph:
score[node] = 1/float(no_of_nodes)
else:
# normal iterations
score = {}
for A in Graph:
#print("-"*10)
#print("Node: "+A)
# Reinitialize the score
score[A] = epsilon/float(no_of_nodes)
for B in Graph:
#print("Link from: "+B)
#print(Graph.get(B))
#print(Graph.get(B).index(A))
try:
_ = Graph.get(B).index(A)
#print(B+" points to "+A)
degree_B = len(Graph.get(B))
#print("Score: "+str(prev_score[B]))
#print("Degree: | stop_word_list = None
if stop_word_list_file_path == None:
stop_word_list = set(stopwords.words('english'))
else:
fd = open(stop_word_list_file_path, "r")
txt = fd.readlines()
fd.close()
stop_word_list = []
for l in txt:
stop_word_list.append(l.lstrip().rstrip())
return stop_word_list | identifier_body |
|
search.py | word: String
:return: Stemmed word
"""
return stem(word)
def sanitize(text, stop_word_list):
"""
Reads a text, remove stop words, stem the words.
:param text: String
:return: List of words
"""
# convert the text into Unicode
text = unicode(text)
#print(type(text))
# replace dot with space
text = text.translate({ord("."): ord(" ")})
# replace dash with space
text = text.translate({ord("-"): ord(" ")})
# split the text on white-space
words = text.split()
sanitized_words = []
for w in words:
# ignore numbers
if w.isnumeric():
continue
# print("Word (Before Punctuation): " + w)
# remove punctuation
# Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
# w = w.translate(None, string.punctuation)
# The above method does not work for Unicode strings
# Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505
# print(type(w))
# replace punctuations with None
w = w.translate({ord(c): None for c in string.punctuation})
w = w.lower()
# print("Word (After Punctuation): "+w)
# Note: Remove stop-words before Stemming, or else the stop-word
# matching will not work.
# If the word is in Stop Word List
try:
i = stop_word_list.index(w.lower())
# skip further processing of word loop
# print("Stop Word Removed: "+w)
continue
except ValueError:
pass
w = stemm_word(w)
# hack, hack, hack
if w == '':
continue
# add the sanitized word into return list
sanitized_words.append(w)
return sanitized_words
def get_links(query_terms):
"""
Get all the links which contains the terms in the query string.
:param query_terms as list of strings
:return
"""
# the set of links all of which contains all the terms in the query string
final_links = None
for term in query_terms:
# get all links containing the term and put in a set
links = Set(index_data.get(term))
#print("\n\nQuery Term: %s" % term)
#print(links)
# special case for first iteration, because: empty & anything = empty
if final_links == None:
final_links = links
# take intersection of links set
final_links = final_links & links
#print(final_links)
# convert the Set to List and return
return list(final_links)
def rank_links(tf_idf_table, query_terms, links):
"""
Rank the list of given links in terms of relevance.
:param TF-IDF table
:param query_terms as list of strings
:param links List of URLs
:return List of URLs ranked
"""
tf = {}
for w in query_terms:
f = query_terms.count(w)
tf[w] = f
q_tf_idf = {}
for term in tf:
# if the query term is found in files
|
# score of all docs for this query
doc_vals = {}
# Wiq denominator in CosSim
DWiq = 0
for t in tf_idf_table:
DWiq = q_tf_idf.get(t)
# if the term is not in query, ignore
if DWiq == None:
continue
#print("Term: %s \t\t Query TF-IDF: %d" % (t, q_tf_idf.get(t)))
idf_row = tf_idf_table.get(t)
# if the query term is in our corpus
if idf_row != None:
#print(idf_row)
# get the document frequency
df = float(len(idf_row))
#print("DF: %d" % (df))
# Wij denominator in CosSim
DWij = 0
# Numerator in CosSim
Njq = 0
# calculate values of each document specific
for doc in idf_row:
#print(doc)
# The "df" should not be processed
if doc == "df":
continue
# skip any link that are not relevant
try:
_ = links.index(doc)
except:
continue
#print("Doc ID: %s \tTF: %d" % (doc, idf_row.get(doc)))
DWij = idf_row.get(doc)
#Njq = q_tf_idf.get(t) * idf_row.get(doc)
if doc_vals.has_key(doc):
vals = doc_vals.get(doc)
vals["DWiq"] += pow(DWiq, 2)
vals["DWij"] += pow(DWij, 2)
vals["NWjq"] += DWij * DWiq
doc_vals[doc] = vals
else:
vals = {}
vals["DWiq"] = pow(DWiq, 2)
vals["DWij"] = pow(DWij, 2)
vals["NWjq"] = DWij * DWiq
doc_vals[doc] = vals
#print(doc_vals)
# Calculate the CosSim value
doc_score = {}
for doc in doc_vals:
#print(doc)
vals = doc_vals.get(doc)
#print(vals)
#n = vals.get("NWjq")
#d = float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(n)
#print(d)
#print(float(n/float(d)))
doc_score[doc] = float(vals.get("NWjq"))/float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(doc_score[doc])
#print(doc_score)
sorted_by_score = sorted(doc_score.items(), key=operator.itemgetter(1), reverse=True)
#print(sorted_by_score)
sorted_score = collections.OrderedDict()
for url, score in sorted_by_score:
sorted_score[url] = score
#print(sorted_score)
return sorted_score
def print_scores(ranked_list, links, topN=10):
"""
Takes an OrderdDict and print topN entries which are also in links
"""
n = topN
for url, score in ranked_list.items():
try:
_ = links.index(url)
print("Score: %f \t URL: %s" %(score, url))
n -= 1
if n <= 0:
break
except:
pass
def normalize_scores(scores):
"""
Normalize the scores so that their sum is equal to 1.
"""
#print(scores)
keys = scores.keys()
sum = 0.0
for k in keys:
#print("%06f\t" % scores.get(k)),
sum += scores.get(k)
if sum == 1.0:
return scores
new_scores = {}
for k in keys:
new_scores[k] = scores.get(k)/float(sum)
return new_scores
def calculate_pagerank_with_teleport(Graph, epsilon, iterations=50):
prev_score = None
score = None
iteration = 1
#print("No. of Nodes: %d" % len(Graph))
# Loop
while True:
#print("\nIteration: "+str(iteration))
iteration += 1
# first iteration
if score is None:
score = {}
no_of_nodes = len(Graph.keys())
for node in Graph:
score[node] = 1/float(no_of_nodes)
else:
# normal iterations
score = {}
for A in Graph:
#print("-"*10)
#print("Node: "+A)
# Reinitialize the score
score[A] = epsilon/float(no_of_nodes)
for B in Graph:
#print("Link from: "+B)
#print(Graph.get(B))
#print(Graph.get(B).index(A))
try:
_ = Graph.get(B).index(A)
#print(B+" points to "+A)
degree_B = len(Graph.get(B))
#print("Score: "+str(prev_score[B]))
#print("Degree: "+str(degree_B))
#print("Adding "+str(prev_score[B]/float(degree_B))+ " to "+str(score[A]))
score[A] += (1-epsilon) * prev_score[B]/float(degree_B)
#print("New score:"+str(score[A]))
except ValueError:
#print(A +" not in "+B)
pass
score[A] = round(score[A], ROUND_DIGITS)
#print("Before Normalization")
#print | if tf_idf_table.has_key(term):
q_tf_idf[term] = tf.get(term) # * log(N/1)
else:
# if the query term is NOT found in files, set IDF to 0
q_tf_idf[term] = 0 | conditional_block |
search.py | _words = []
for w in words:
# ignore numbers
if w.isnumeric():
continue
# print("Word (Before Punctuation): " + w)
# remove punctuation
# Ref: https://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
# w = w.translate(None, string.punctuation)
# The above method does not work for Unicode strings
# Ref: https://stackoverflow.com/questions/23175809/typeerror-translate-takes-one-argument-2-given-python#23306505
# print(type(w))
# replace punctuations with None
w = w.translate({ord(c): None for c in string.punctuation})
w = w.lower()
# print("Word (After Punctuation): "+w)
# Note: Remove stop-words before Stemming, or else the stop-word
# matching will not work.
# If the word is in Stop Word List
try:
i = stop_word_list.index(w.lower())
# skip further processing of word loop
# print("Stop Word Removed: "+w)
continue
except ValueError:
pass
w = stemm_word(w)
# hack, hack, hack
if w == '':
continue
# add the sanitized word into return list
sanitized_words.append(w)
return sanitized_words
def get_links(query_terms):
"""
Get all the links which contains the terms in the query string.
:param query_terms as list of strings
:return
"""
# the set of links all of which contains all the terms in the query string
final_links = None
for term in query_terms:
# get all links containing the term and put in a set
links = Set(index_data.get(term))
#print("\n\nQuery Term: %s" % term)
#print(links)
# special case for first iteration, because: empty & anything = empty
if final_links == None:
final_links = links
# take intersection of links set
final_links = final_links & links
#print(final_links)
# convert the Set to List and return
return list(final_links)
def rank_links(tf_idf_table, query_terms, links):
"""
Rank the list of given links in terms of relevance.
:param TF-IDF table
:param query_terms as list of strings
:param links List of URLs
:return List of URLs ranked
"""
tf = {}
for w in query_terms:
f = query_terms.count(w)
tf[w] = f
q_tf_idf = {}
for term in tf:
# if the query term is found in files
if tf_idf_table.has_key(term):
q_tf_idf[term] = tf.get(term) # * log(N/1)
else:
# if the query term is NOT found in files, set IDF to 0
q_tf_idf[term] = 0
# score of all docs for this query
doc_vals = {}
# Wiq denominator in CosSim
DWiq = 0
for t in tf_idf_table:
DWiq = q_tf_idf.get(t)
# if the term is not in query, ignore
if DWiq == None:
continue
#print("Term: %s \t\t Query TF-IDF: %d" % (t, q_tf_idf.get(t)))
idf_row = tf_idf_table.get(t)
# if the query term is in our corpus
if idf_row != None:
#print(idf_row)
# get the document frequency
df = float(len(idf_row))
#print("DF: %d" % (df))
# Wij denominator in CosSim
DWij = 0
# Numerator in CosSim
Njq = 0
# calculate values of each document specific
for doc in idf_row:
#print(doc)
# The "df" should not be processed
if doc == "df":
continue
# skip any link that are not relevant
try:
_ = links.index(doc)
except:
continue
#print("Doc ID: %s \tTF: %d" % (doc, idf_row.get(doc)))
DWij = idf_row.get(doc)
#Njq = q_tf_idf.get(t) * idf_row.get(doc)
if doc_vals.has_key(doc):
vals = doc_vals.get(doc)
vals["DWiq"] += pow(DWiq, 2)
vals["DWij"] += pow(DWij, 2)
vals["NWjq"] += DWij * DWiq
doc_vals[doc] = vals
else:
vals = {}
vals["DWiq"] = pow(DWiq, 2)
vals["DWij"] = pow(DWij, 2)
vals["NWjq"] = DWij * DWiq
doc_vals[doc] = vals
#print(doc_vals)
# Calculate the CosSim value
doc_score = {}
for doc in doc_vals:
#print(doc)
vals = doc_vals.get(doc)
#print(vals)
#n = vals.get("NWjq")
#d = float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(n)
#print(d)
#print(float(n/float(d)))
doc_score[doc] = float(vals.get("NWjq"))/float(pow(vals.get("DWij") * vals.get("DWiq"),0.5))
#print(doc_score[doc])
#print(doc_score)
sorted_by_score = sorted(doc_score.items(), key=operator.itemgetter(1), reverse=True)
#print(sorted_by_score)
sorted_score = collections.OrderedDict()
for url, score in sorted_by_score:
sorted_score[url] = score
#print(sorted_score)
return sorted_score
def print_scores(ranked_list, links, topN=10):
"""
Takes an OrderdDict and print topN entries which are also in links
"""
n = topN
for url, score in ranked_list.items():
try:
_ = links.index(url)
print("Score: %f \t URL: %s" %(score, url))
n -= 1
if n <= 0:
break
except:
pass
def normalize_scores(scores):
"""
Normalize the scores so that their sum is equal to 1.
"""
#print(scores)
keys = scores.keys()
sum = 0.0
for k in keys:
#print("%06f\t" % scores.get(k)),
sum += scores.get(k)
if sum == 1.0:
return scores
new_scores = {}
for k in keys:
new_scores[k] = scores.get(k)/float(sum)
return new_scores
def calculate_pagerank_with_teleport(Graph, epsilon, iterations=50):
prev_score = None
score = None
iteration = 1
#print("No. of Nodes: %d" % len(Graph))
# Loop
while True:
#print("\nIteration: "+str(iteration))
iteration += 1
# first iteration
if score is None:
score = {}
no_of_nodes = len(Graph.keys())
for node in Graph:
score[node] = 1/float(no_of_nodes)
else:
# normal iterations
score = {}
for A in Graph:
#print("-"*10)
#print("Node: "+A)
# Reinitialize the score
score[A] = epsilon/float(no_of_nodes)
for B in Graph:
#print("Link from: "+B)
#print(Graph.get(B))
#print(Graph.get(B).index(A))
try:
_ = Graph.get(B).index(A)
#print(B+" points to "+A)
degree_B = len(Graph.get(B))
#print("Score: "+str(prev_score[B]))
#print("Degree: "+str(degree_B))
#print("Adding "+str(prev_score[B]/float(degree_B))+ " to "+str(score[A]))
score[A] += (1-epsilon) * prev_score[B]/float(degree_B)
#print("New score:"+str(score[A]))
except ValueError:
#print(A +" not in "+B)
pass
score[A] = round(score[A], ROUND_DIGITS)
#print("Before Normalization")
#print_scores(score)
#normalize the scores
#print("After Normalization")
score = normalize_scores(score)
#print_scores(score)
# check for convergence
if score == prev_score:
break
prev_score = score
if iteration > iterations:
break
# sort by score
sorted_by_score = sorted(score.items(), key=operator.itemgetter(1), reverse=True)
#print(sorted_by_score)
sorted_score = collections.OrderedDict()
for url, score in sorted_by_score:
sorted_score[url] = score
#print(sorted_score)
return sorted_score
def | build_graph | identifier_name |
|
offline_replica.rs | ::{
Cmd, CmdResponse, CreateRegister, EditRegister, Query, QueryResponse, RegisterCmd,
RegisterQuery, Request, Response, SignedRegisterCreate, SignedRegisterEdit,
},
register::{Action, Entry, EntryHash, Permissions, Policy, Register as RegisterReplica, User},
};
use bincode::serialize;
use std::collections::{BTreeSet, LinkedList};
use xor_name::XorName;
/// Ops made to an offline Register instance are applied locally only,
/// and accumulated till the user explicitly calls 'sync'. The user can
/// switch back to sync with the network for every op by invoking `online` API.
pub struct RegisterOffline {
client: Client,
register: RegisterReplica,
ops: LinkedList<RegisterCmd>, // Cached operations.
}
impl RegisterOffline {
/// Create a new Register offline.
pub fn create(client: Client, name: XorName, tag: u64) -> Result<Self> {
Self::new(client, name, tag)
}
/// Retrieve a Register from the network to work on it offline.
pub(super) async fn retrieve(client: Client, name: XorName, tag: u64) -> Result<Self> {
let register = Self::get_register(&client, name, tag).await?;
Ok(Self {
client,
register,
ops: LinkedList::new(),
})
}
/// Instantiate a ReplicaOffline from a given Register instance.
pub(super) fn from(replica: Register) -> Self {
Self {
client: replica.offline_reg.client,
register: replica.offline_reg.register,
ops: LinkedList::new(),
}
}
/// Switch to 'online' mode where each op made locally is immediatelly pushed to the network.
pub async fn online(mut self) -> Result<Register> {
self.push().await?;
Ok(Register { offline_reg: self })
}
/// Return the Policy of the Register.
pub fn policy(&self) -> &Policy {
self.register.policy()
}
/// Return the XorName of the Register.
pub fn name(&self) -> &XorName {
self.register.name()
}
/// Return the tag value of the Register.
pub fn tag(&self) -> u64 {
self.register.tag()
}
/// Return the number of items held in the register
pub fn size(&self) -> u64 {
self.register.size()
}
/// Return a value corresponding to the provided 'hash', if present.
pub fn get(&self, hash: EntryHash) -> Result<&Entry> {
let entry = self.register.get(hash)?;
Ok(entry)
}
/// Read the last entry, or entries when there are branches, if the register is not empty.
pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> {
self.register.read()
}
/// Write a new value onto the Register atop latest value.
/// It returns an error if it finds branches in the content/entries; if it is
/// required to merge/resolve the branches, invoke the `write_merging_branches` API.
pub fn | (&mut self, entry: &[u8]) -> Result<()> {
let children = self.register.read();
if children.len() > 1 {
return Err(Error::ContentBranchDetected(children));
}
self.write_atop(entry, children.into_iter().map(|(hash, _)| hash).collect())
}
/// Write a new value onto the Register atop latest value.
/// If there are branches of content/entries, it automatically merges them
/// all leaving the new value as a single latest value of the Register.
/// Note you can use `write` API instead if you need to handle
/// content/entries branches in a diffeerent way.
pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result<()> {
let children: BTreeSet<EntryHash> = self
.register
.read()
.into_iter()
.map(|(hash, _)| hash)
.collect();
self.write_atop(entry, children)
}
/// Write a new value onto the Register atop the set of braches/entries
/// referenced by the provided list of their corresponding entry hash.
/// Note you can use `write_merging_branches` API instead if you
/// want to write atop all exiting branches/entries.
pub fn write_atop(&mut self, entry: &[u8], children: BTreeSet<EntryHash>) -> Result<()> {
// we need to check permissions first
let public_key = self.client.signer_pk();
self.register
.check_permissions(Action::Write, Some(User::Key(public_key)))?;
let (_hash, edit) = self.register.write(entry.into(), children)?;
let op = EditRegister {
address: *self.register.address(),
edit,
};
let auth = DataAuthority {
public_key,
signature: self.client.sign(&serialize(&op)?),
};
let cmd = RegisterCmd::Edit(SignedRegisterEdit { op, auth });
self.ops.push_front(cmd);
Ok(())
}
/// Sync this Register with the replicas on the network.
pub async fn sync(&mut self) -> Result<()> {
debug!("Syncing Register at {}, {}!", self.name(), self.tag(),);
// FIXME: handle the scenario where the Register doesn't exist on the network yet
let remote_replica = Self::get_register(&self.client, *self.name(), self.tag()).await?;
self.register.merge(remote_replica);
self.push().await
}
/// Push all operations made locally to the replicas of this Register on the network.
pub async fn push(&mut self) -> Result<()> {
let ops_len = self.ops.len();
if ops_len > 0 {
let name = *self.name();
let tag = self.tag();
debug!("Pushing {ops_len} cached Register cmds at {name}, {tag}!",);
// TODO: send them all concurrently
while let Some(cmd) = self.ops.pop_back() {
let result = match cmd {
RegisterCmd::Create { .. } => self.publish_register_create(cmd.clone()).await,
RegisterCmd::Edit { .. } => self.publish_register_edit(cmd.clone()).await,
};
if let Err(err) = result {
warn!("Did not push Register cmd on all nodes in the close group!: {err}");
// We keep the cmd for next sync to retry
self.ops.push_back(cmd);
return Err(err);
}
}
debug!("Successfully pushed {ops_len} Register cmds at {name}, {tag}!",);
}
Ok(())
}
// ********* Private helpers *********
// Create a new RegisterOffline instance with the given name and tag.
fn new(client: Client, name: XorName, tag: u64) -> Result<Self> {
let public_key = client.signer_pk();
let owner = User::Key(public_key);
let policy = Policy {
owner,
permissions: [(User::Anyone, Permissions::new(true))]
.into_iter()
.collect(),
};
let op = CreateRegister {
name,
tag,
policy: policy.clone(),
};
let auth = DataAuthority {
public_key,
signature: client.sign(&serialize(&op)?),
};
let create_cmd = RegisterCmd::Create(SignedRegisterCreate { op, auth });
let register = RegisterReplica::new(owner, name, tag, policy);
let reg = Self {
client,
register,
ops: LinkedList::from([create_cmd]),
};
Ok(reg)
}
// Publish a `Register` creation command on the network.
async fn publish_register_create(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register create cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::CreateRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(CmdResponse::CreateRegister(result)) = resp {
result.clone()?;
};
}
// If there were no success or fail to the expected query,
// we check if there were any send errors.
for resp in responses {
let _ = resp?;
}
// If there were no register errors, then we had unexpected responses.
Err(Error::Protocol(ProtocolError::UnexpectedResponses))
}
// Publish a `Register` edit command in the network.
async fn publish_register_edit(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register edit cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::EditRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(C | write | identifier_name |
offline_replica.rs | ::{
Cmd, CmdResponse, CreateRegister, EditRegister, Query, QueryResponse, RegisterCmd,
RegisterQuery, Request, Response, SignedRegisterCreate, SignedRegisterEdit,
},
register::{Action, Entry, EntryHash, Permissions, Policy, Register as RegisterReplica, User},
};
use bincode::serialize;
use std::collections::{BTreeSet, LinkedList};
use xor_name::XorName;
/// Ops made to an offline Register instance are applied locally only,
/// and accumulated till the user explicitly calls 'sync'. The user can
/// switch back to sync with the network for every op by invoking `online` API.
pub struct RegisterOffline {
client: Client,
register: RegisterReplica,
ops: LinkedList<RegisterCmd>, // Cached operations.
}
impl RegisterOffline {
/// Create a new Register offline.
pub fn create(client: Client, name: XorName, tag: u64) -> Result<Self> {
Self::new(client, name, tag)
}
/// Retrieve a Register from the network to work on it offline.
pub(super) async fn retrieve(client: Client, name: XorName, tag: u64) -> Result<Self> {
let register = Self::get_register(&client, name, tag).await?;
Ok(Self {
client,
register,
ops: LinkedList::new(),
})
}
/// Instantiate a ReplicaOffline from a given Register instance.
pub(super) fn from(replica: Register) -> Self {
Self {
client: replica.offline_reg.client,
register: replica.offline_reg.register,
ops: LinkedList::new(),
}
}
/// Switch to 'online' mode where each op made locally is immediatelly pushed to the network.
pub async fn online(mut self) -> Result<Register> {
self.push().await?;
Ok(Register { offline_reg: self })
}
/// Return the Policy of the Register.
pub fn policy(&self) -> &Policy {
self.register.policy()
}
/// Return the XorName of the Register.
pub fn name(&self) -> &XorName {
self.register.name()
}
/// Return the tag value of the Register.
pub fn tag(&self) -> u64 {
self.register.tag()
}
/// Return the number of items held in the register
pub fn size(&self) -> u64 {
self.register.size()
}
/// Return a value corresponding to the provided 'hash', if present.
pub fn get(&self, hash: EntryHash) -> Result<&Entry> {
let entry = self.register.get(hash)?;
Ok(entry)
}
/// Read the last entry, or entries when there are branches, if the register is not empty.
pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> {
self.register.read()
}
/// Write a new value onto the Register atop latest value.
/// It returns an error if it finds branches in the content/entries; if it is
/// required to merge/resolve the branches, invoke the `write_merging_branches` API.
pub fn write(&mut self, entry: &[u8]) -> Result<()> {
let children = self.register.read();
if children.len() > 1 {
return Err(Error::ContentBranchDetected(children));
}
self.write_atop(entry, children.into_iter().map(|(hash, _)| hash).collect())
}
/// Write a new value onto the Register atop latest value.
/// If there are branches of content/entries, it automatically merges them
/// all leaving the new value as a single latest value of the Register.
/// Note you can use `write` API instead if you need to handle
/// content/entries branches in a diffeerent way.
pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result<()> {
let children: BTreeSet<EntryHash> = self
.register
.read()
.into_iter()
.map(|(hash, _)| hash)
.collect();
self.write_atop(entry, children)
}
/// Write a new value onto the Register atop the set of braches/entries
/// referenced by the provided list of their corresponding entry hash.
/// Note you can use `write_merging_branches` API instead if you
/// want to write atop all exiting branches/entries.
pub fn write_atop(&mut self, entry: &[u8], children: BTreeSet<EntryHash>) -> Result<()> {
// we need to check permissions first
let public_key = self.client.signer_pk();
self.register
.check_permissions(Action::Write, Some(User::Key(public_key)))?;
let (_hash, edit) = self.register.write(entry.into(), children)?;
let op = EditRegister {
address: *self.register.address(),
edit,
};
let auth = DataAuthority {
public_key,
signature: self.client.sign(&serialize(&op)?),
};
let cmd = RegisterCmd::Edit(SignedRegisterEdit { op, auth });
self.ops.push_front(cmd);
Ok(())
}
/// Sync this Register with the replicas on the network.
pub async fn sync(&mut self) -> Result<()> {
debug!("Syncing Register at {}, {}!", self.name(), self.tag(),);
// FIXME: handle the scenario where the Register doesn't exist on the network yet
let remote_replica = Self::get_register(&self.client, *self.name(), self.tag()).await?;
self.register.merge(remote_replica);
self.push().await
}
/// Push all operations made locally to the replicas of this Register on the network.
pub async fn push(&mut self) -> Result<()> {
let ops_len = self.ops.len();
if ops_len > 0 {
let name = *self.name();
let tag = self.tag();
debug!("Pushing {ops_len} cached Register cmds at {name}, {tag}!",);
// TODO: send them all concurrently
while let Some(cmd) = self.ops.pop_back() {
let result = match cmd {
RegisterCmd::Create { .. } => self.publish_register_create(cmd.clone()).await,
RegisterCmd::Edit { .. } => self.publish_register_edit(cmd.clone()).await,
};
if let Err(err) = result {
warn!("Did not push Register cmd on all nodes in the close group!: {err}");
// We keep the cmd for next sync to retry
self.ops.push_back(cmd);
return Err(err);
}
}
debug!("Successfully pushed {ops_len} Register cmds at {name}, {tag}!",);
}
Ok(())
}
// ********* Private helpers *********
// Create a new RegisterOffline instance with the given name and tag.
fn new(client: Client, name: XorName, tag: u64) -> Result<Self> {
let public_key = client.signer_pk();
let owner = User::Key(public_key);
let policy = Policy {
owner,
permissions: [(User::Anyone, Permissions::new(true))]
.into_iter()
.collect(),
};
let op = CreateRegister {
name,
tag,
policy: policy.clone(),
};
let auth = DataAuthority {
public_key,
signature: client.sign(&serialize(&op)?),
};
let create_cmd = RegisterCmd::Create(SignedRegisterCreate { op, auth });
let register = RegisterReplica::new(owner, name, tag, policy);
let reg = Self {
client,
register,
ops: LinkedList::from([create_cmd]),
};
Ok(reg)
}
// Publish a `Register` creation command on the network.
async fn publish_register_create(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register create cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::CreateRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(CmdResponse::CreateRegister(result)) = resp {
result.clone()?;
};
}
// If there were no success or fail to the expected query,
// we check if there were any send errors.
for resp in responses {
let _ = resp?;
}
// If there were no register errors, then we had unexpected responses.
Err(Error::Protocol(ProtocolError::UnexpectedResponses))
}
// Publish a `Register` edit command in the network.
async fn publish_register_edit(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register edit cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::EditRegister(Ok(()))))));
if all_ok |
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd | {
return Ok(());
} | conditional_block |
offline_replica.rs | ::{
Cmd, CmdResponse, CreateRegister, EditRegister, Query, QueryResponse, RegisterCmd,
RegisterQuery, Request, Response, SignedRegisterCreate, SignedRegisterEdit,
},
register::{Action, Entry, EntryHash, Permissions, Policy, Register as RegisterReplica, User},
};
use bincode::serialize;
use std::collections::{BTreeSet, LinkedList};
use xor_name::XorName;
/// Ops made to an offline Register instance are applied locally only,
/// and accumulated till the user explicitly calls 'sync'. The user can
/// switch back to sync with the network for every op by invoking `online` API.
pub struct RegisterOffline {
client: Client,
register: RegisterReplica,
ops: LinkedList<RegisterCmd>, // Cached operations.
}
impl RegisterOffline {
/// Create a new Register offline.
pub fn create(client: Client, name: XorName, tag: u64) -> Result<Self> {
Self::new(client, name, tag)
}
/// Retrieve a Register from the network to work on it offline.
pub(super) async fn retrieve(client: Client, name: XorName, tag: u64) -> Result<Self> {
let register = Self::get_register(&client, name, tag).await?;
Ok(Self {
client,
register,
ops: LinkedList::new(),
})
}
/// Instantiate a ReplicaOffline from a given Register instance.
pub(super) fn from(replica: Register) -> Self {
Self {
client: replica.offline_reg.client,
register: replica.offline_reg.register,
ops: LinkedList::new(),
}
}
/// Switch to 'online' mode where each op made locally is immediatelly pushed to the network.
pub async fn online(mut self) -> Result<Register> {
self.push().await?;
Ok(Register { offline_reg: self })
}
/// Return the Policy of the Register.
pub fn policy(&self) -> &Policy {
self.register.policy()
}
/// Return the XorName of the Register.
pub fn name(&self) -> &XorName {
self.register.name()
}
/// Return the tag value of the Register.
pub fn tag(&self) -> u64 |
/// Return the number of items held in the register
pub fn size(&self) -> u64 {
self.register.size()
}
/// Return a value corresponding to the provided 'hash', if present.
pub fn get(&self, hash: EntryHash) -> Result<&Entry> {
let entry = self.register.get(hash)?;
Ok(entry)
}
/// Read the last entry, or entries when there are branches, if the register is not empty.
pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> {
self.register.read()
}
/// Write a new value onto the Register atop latest value.
/// It returns an error if it finds branches in the content/entries; if it is
/// required to merge/resolve the branches, invoke the `write_merging_branches` API.
pub fn write(&mut self, entry: &[u8]) -> Result<()> {
let children = self.register.read();
if children.len() > 1 {
return Err(Error::ContentBranchDetected(children));
}
self.write_atop(entry, children.into_iter().map(|(hash, _)| hash).collect())
}
/// Write a new value onto the Register atop latest value.
/// If there are branches of content/entries, it automatically merges them
/// all leaving the new value as a single latest value of the Register.
/// Note you can use `write` API instead if you need to handle
/// content/entries branches in a diffeerent way.
pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result<()> {
let children: BTreeSet<EntryHash> = self
.register
.read()
.into_iter()
.map(|(hash, _)| hash)
.collect();
self.write_atop(entry, children)
}
/// Write a new value onto the Register atop the set of braches/entries
/// referenced by the provided list of their corresponding entry hash.
/// Note you can use `write_merging_branches` API instead if you
/// want to write atop all exiting branches/entries.
pub fn write_atop(&mut self, entry: &[u8], children: BTreeSet<EntryHash>) -> Result<()> {
// we need to check permissions first
let public_key = self.client.signer_pk();
self.register
.check_permissions(Action::Write, Some(User::Key(public_key)))?;
let (_hash, edit) = self.register.write(entry.into(), children)?;
let op = EditRegister {
address: *self.register.address(),
edit,
};
let auth = DataAuthority {
public_key,
signature: self.client.sign(&serialize(&op)?),
};
let cmd = RegisterCmd::Edit(SignedRegisterEdit { op, auth });
self.ops.push_front(cmd);
Ok(())
}
/// Sync this Register with the replicas on the network.
pub async fn sync(&mut self) -> Result<()> {
debug!("Syncing Register at {}, {}!", self.name(), self.tag(),);
// FIXME: handle the scenario where the Register doesn't exist on the network yet
let remote_replica = Self::get_register(&self.client, *self.name(), self.tag()).await?;
self.register.merge(remote_replica);
self.push().await
}
/// Push all operations made locally to the replicas of this Register on the network.
pub async fn push(&mut self) -> Result<()> {
let ops_len = self.ops.len();
if ops_len > 0 {
let name = *self.name();
let tag = self.tag();
debug!("Pushing {ops_len} cached Register cmds at {name}, {tag}!",);
// TODO: send them all concurrently
while let Some(cmd) = self.ops.pop_back() {
let result = match cmd {
RegisterCmd::Create { .. } => self.publish_register_create(cmd.clone()).await,
RegisterCmd::Edit { .. } => self.publish_register_edit(cmd.clone()).await,
};
if let Err(err) = result {
warn!("Did not push Register cmd on all nodes in the close group!: {err}");
// We keep the cmd for next sync to retry
self.ops.push_back(cmd);
return Err(err);
}
}
debug!("Successfully pushed {ops_len} Register cmds at {name}, {tag}!",);
}
Ok(())
}
// ********* Private helpers *********
// Create a new RegisterOffline instance with the given name and tag.
fn new(client: Client, name: XorName, tag: u64) -> Result<Self> {
let public_key = client.signer_pk();
let owner = User::Key(public_key);
let policy = Policy {
owner,
permissions: [(User::Anyone, Permissions::new(true))]
.into_iter()
.collect(),
};
let op = CreateRegister {
name,
tag,
policy: policy.clone(),
};
let auth = DataAuthority {
public_key,
signature: client.sign(&serialize(&op)?),
};
let create_cmd = RegisterCmd::Create(SignedRegisterCreate { op, auth });
let register = RegisterReplica::new(owner, name, tag, policy);
let reg = Self {
client,
register,
ops: LinkedList::from([create_cmd]),
};
Ok(reg)
}
// Publish a `Register` creation command on the network.
async fn publish_register_create(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register create cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::CreateRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(CmdResponse::CreateRegister(result)) = resp {
result.clone()?;
};
}
// If there were no success or fail to the expected query,
// we check if there were any send errors.
for resp in responses {
let _ = resp?;
}
// If there were no register errors, then we had unexpected responses.
Err(Error::Protocol(ProtocolError::UnexpectedResponses))
}
// Publish a `Register` edit command in the network.
async fn publish_register_edit(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register edit cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::EditRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd | {
self.register.tag()
} | identifier_body |
offline_replica.rs | messages::{
Cmd, CmdResponse, CreateRegister, EditRegister, Query, QueryResponse, RegisterCmd,
RegisterQuery, Request, Response, SignedRegisterCreate, SignedRegisterEdit,
},
register::{Action, Entry, EntryHash, Permissions, Policy, Register as RegisterReplica, User},
};
use bincode::serialize;
use std::collections::{BTreeSet, LinkedList};
use xor_name::XorName;
/// Ops made to an offline Register instance are applied locally only,
/// and accumulated till the user explicitly calls 'sync'. The user can
/// switch back to sync with the network for every op by invoking `online` API.
pub struct RegisterOffline {
client: Client,
register: RegisterReplica,
ops: LinkedList<RegisterCmd>, // Cached operations.
}
impl RegisterOffline {
/// Create a new Register offline.
pub fn create(client: Client, name: XorName, tag: u64) -> Result<Self> {
Self::new(client, name, tag)
}
/// Retrieve a Register from the network to work on it offline.
pub(super) async fn retrieve(client: Client, name: XorName, tag: u64) -> Result<Self> {
let register = Self::get_register(&client, name, tag).await?;
Ok(Self {
client,
register,
ops: LinkedList::new(),
})
}
/// Instantiate a ReplicaOffline from a given Register instance.
pub(super) fn from(replica: Register) -> Self {
Self {
client: replica.offline_reg.client,
register: replica.offline_reg.register,
ops: LinkedList::new(),
}
}
/// Switch to 'online' mode where each op made locally is immediatelly pushed to the network.
pub async fn online(mut self) -> Result<Register> {
self.push().await?;
Ok(Register { offline_reg: self })
}
/// Return the Policy of the Register.
pub fn policy(&self) -> &Policy {
self.register.policy()
}
/// Return the XorName of the Register.
pub fn name(&self) -> &XorName {
self.register.name()
}
/// Return the tag value of the Register.
pub fn tag(&self) -> u64 {
self.register.tag()
}
/// Return the number of items held in the register
pub fn size(&self) -> u64 {
self.register.size()
}
/// Return a value corresponding to the provided 'hash', if present.
pub fn get(&self, hash: EntryHash) -> Result<&Entry> {
let entry = self.register.get(hash)?;
Ok(entry)
}
/// Read the last entry, or entries when there are branches, if the register is not empty.
pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> {
self.register.read()
}
/// Write a new value onto the Register atop latest value.
/// It returns an error if it finds branches in the content/entries; if it is | }
self.write_atop(entry, children.into_iter().map(|(hash, _)| hash).collect())
}
/// Write a new value onto the Register atop latest value.
/// If there are branches of content/entries, it automatically merges them
/// all leaving the new value as a single latest value of the Register.
/// Note you can use `write` API instead if you need to handle
/// content/entries branches in a diffeerent way.
pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result<()> {
let children: BTreeSet<EntryHash> = self
.register
.read()
.into_iter()
.map(|(hash, _)| hash)
.collect();
self.write_atop(entry, children)
}
/// Write a new value onto the Register atop the set of braches/entries
/// referenced by the provided list of their corresponding entry hash.
/// Note you can use `write_merging_branches` API instead if you
/// want to write atop all exiting branches/entries.
pub fn write_atop(&mut self, entry: &[u8], children: BTreeSet<EntryHash>) -> Result<()> {
// we need to check permissions first
let public_key = self.client.signer_pk();
self.register
.check_permissions(Action::Write, Some(User::Key(public_key)))?;
let (_hash, edit) = self.register.write(entry.into(), children)?;
let op = EditRegister {
address: *self.register.address(),
edit,
};
let auth = DataAuthority {
public_key,
signature: self.client.sign(&serialize(&op)?),
};
let cmd = RegisterCmd::Edit(SignedRegisterEdit { op, auth });
self.ops.push_front(cmd);
Ok(())
}
/// Sync this Register with the replicas on the network.
pub async fn sync(&mut self) -> Result<()> {
debug!("Syncing Register at {}, {}!", self.name(), self.tag(),);
// FIXME: handle the scenario where the Register doesn't exist on the network yet
let remote_replica = Self::get_register(&self.client, *self.name(), self.tag()).await?;
self.register.merge(remote_replica);
self.push().await
}
/// Push all operations made locally to the replicas of this Register on the network.
pub async fn push(&mut self) -> Result<()> {
let ops_len = self.ops.len();
if ops_len > 0 {
let name = *self.name();
let tag = self.tag();
debug!("Pushing {ops_len} cached Register cmds at {name}, {tag}!",);
// TODO: send them all concurrently
while let Some(cmd) = self.ops.pop_back() {
let result = match cmd {
RegisterCmd::Create { .. } => self.publish_register_create(cmd.clone()).await,
RegisterCmd::Edit { .. } => self.publish_register_edit(cmd.clone()).await,
};
if let Err(err) = result {
warn!("Did not push Register cmd on all nodes in the close group!: {err}");
// We keep the cmd for next sync to retry
self.ops.push_back(cmd);
return Err(err);
}
}
debug!("Successfully pushed {ops_len} Register cmds at {name}, {tag}!",);
}
Ok(())
}
// ********* Private helpers *********
// Create a new RegisterOffline instance with the given name and tag.
fn new(client: Client, name: XorName, tag: u64) -> Result<Self> {
let public_key = client.signer_pk();
let owner = User::Key(public_key);
let policy = Policy {
owner,
permissions: [(User::Anyone, Permissions::new(true))]
.into_iter()
.collect(),
};
let op = CreateRegister {
name,
tag,
policy: policy.clone(),
};
let auth = DataAuthority {
public_key,
signature: client.sign(&serialize(&op)?),
};
let create_cmd = RegisterCmd::Create(SignedRegisterCreate { op, auth });
let register = RegisterReplica::new(owner, name, tag, policy);
let reg = Self {
client,
register,
ops: LinkedList::from([create_cmd]),
};
Ok(reg)
}
// Publish a `Register` creation command on the network.
async fn publish_register_create(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register create cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::CreateRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(CmdResponse::CreateRegister(result)) = resp {
result.clone()?;
};
}
// If there were no success or fail to the expected query,
// we check if there were any send errors.
for resp in responses {
let _ = resp?;
}
// If there were no register errors, then we had unexpected responses.
Err(Error::Protocol(ProtocolError::UnexpectedResponses))
}
// Publish a `Register` edit command in the network.
async fn publish_register_edit(&self, cmd: RegisterCmd) -> Result<()> {
debug!("Publishing Register edit cmd: {:?}", cmd.dst());
let request = Request::Cmd(Cmd::Register(cmd));
let responses = self.client.send_to_closest(request).await?;
let all_ok = responses
.iter()
.all(|resp| matches!(resp, Ok(Response::Cmd(CmdResponse::EditRegister(Ok(()))))));
if all_ok {
return Ok(());
}
// If not all were Ok, we will return the first error sent to us.
for resp in responses.iter().flatten() {
if let Response::Cmd(Cmd | /// required to merge/resolve the branches, invoke the `write_merging_branches` API.
pub fn write(&mut self, entry: &[u8]) -> Result<()> {
let children = self.register.read();
if children.len() > 1 {
return Err(Error::ContentBranchDetected(children)); | random_line_split |
scene_test.js | 50, 280, 0, new THREE.Vector3(-500, 360, -2300), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
var cone1 = createCone(400, 250, 0, new THREE.Vector3(500, 175, -1200), quat, baseMaterialRed);
var cone1 = createCone(150, 120, 0, new THREE.Vector3(600, 340, -2700), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
// cylinders
createCylinder(50.0, 510, 0, new THREE.Vector3(-400, 245, -1800), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(0, 245, -1500), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(50, 245, -700), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(500, 245, -700), quat, baseMaterialRed);
console.log("change5");
}
function createParalellepiped(sx, sy, sz, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.BoxBufferGeometry(sx, sy, sz, 1, 1, 1), material);
var shape = new Ammo.btBoxShape(new Ammo.btVector3(sx * 0.5, sy * 0.5, sz * 0.5));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createSphere(radius, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
var shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCone(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.ConeBufferGeometry(radius, height, 20, 2), material);
var shape = new Ammo.btConeShape(radius, height);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCylinder(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.CylinderBufferGeometry(radius, radius, height, 20, 1), material);
var shape = new Ammo.btCylinderShape(new Ammo.btVector3(radius, height * 0.5, radius));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createRigidBody(object, physicsShape, mass, pos, quat, vel, angVel) {
if (pos) {
object.position.copy(pos);
} else {
pos = object.position;
}
if (quat) {
object.quaternion.copy(quat);
} else {
quat = object.quaternion;
}
var transform = new Ammo.btTransform();
transform.setIdentity();
transform.setOrigin(new Ammo.btVector3(pos.x, pos.y, pos.z));
transform.setRotation(new Ammo.btQuaternion(quat.x, quat.y, quat.z, quat.w));
var motionState = new Ammo.btDefaultMotionState(transform);
var localInertia = new Ammo.btVector3(0, 0, 0);
physicsShape.calculateLocalInertia(mass, localInertia);
var rbInfo = new Ammo.btRigidBodyConstructionInfo(mass, motionState, physicsShape, localInertia);
var body = new Ammo.btRigidBody(rbInfo);
body.setFriction(0.5);
if (vel) {
body.setLinearVelocity(new Ammo.btVector3(vel.x, vel.y, vel.z));
}
if (angVel) {
body.setAngularVelocity(new Ammo.btVector3(angVel.x, angVel.y, angVel.z));
}
object.userData.physicsBody = body;
object.userData.collided = false;
scene.add(object);
if (mass > 0) {
rigidBodies.push(object);
// Disable deactivation
body.setActivationState(4);
}
physicsWorld.addRigidBody(body);
return body;
}
function createMaterial() {
return new THREE.MeshPhongMaterial({ color: Math.floor(Math.random() * (1 << 24)) });
}
function generateNewSphere() {
var threeObject = null;
var shape = null;
var objectSize = 2;
var margin = 0.05;
// Sphere
var radius = 1 + Math.random() * objectSize;
// threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), createMaterial());
var material = new THREE.MeshBasicMaterial({ color: 0xffffff, envMap: scene.background, refractionRatio: 0.5 });
material.envMap.mapping = THREE.CubeRefractionMapping;
threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
threeObject.position.set(Math.random() - 0.5, objectSize + 25, Math.random() - 0.5);
var mass = 0.1;
createRigidBody(threeObject, shape, mass, threeObject.position, new THREE.Quaternion(0, 0, 0, 1));
}
function initInput() {
// keyboard control of arm
window.addEventListener('keydown', function (event) {
switch (event.keyCode) {
// Q
case 81:
armMovement = 1;
break;
// A
case 65:
armMovement = - 1;
break;
// S
case 83:
armMovement = 0;
break;
case 87:
generateNewSphere();
console.log(GEN.test(rigidBodies));
break;
}
}, false);
window.addEventListener('keyup', function () {
// armMovement = 0;
}, false);
// mouse control of shooting
window.addEventListener('mousedown', function (event) {
// triggered by right button
if (event.which != 3) {
return;
}
mouseCoords.set(
(event.clientX / window.innerWidth) * 2 - 1,
- (event.clientY / window.innerHeight) * 2 + 1
);
raycaster.setFromCamera(mouseCoords, camera);
// Creates a ball and throws it
var ballMass = 35;
var ballRadius = 40;
var ball = new THREE.Mesh(new THREE.SphereBufferGeometry(ballRadius, 14, 10), ballMaterial);
ball.castShadow = true;
ball.receiveShadow = true;
var ballShape = new Ammo.btSphereShape(ballRadius);
ballShape.setMargin(margin);
var pos = new THREE.Vector3();
var quat = new THREE.Quaternion();
pos.copy(raycaster.ray.direction);
pos.add(raycaster.ray.origin);
quat.set(0, 0, 0, 1);
var ballBody = createRigidBody(ball, ballShape, ballMass, pos, quat);
var vel = new THREE.Vector3();
vel.copy(raycaster.ray.direction);
vel.multiplyScalar(5000);
ballBody.setLinearVelocity(new Ammo.btVector3(vel.x, vel.y, vel.z));
}, false);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() {
requestAnimationFrame(animate);
render();
stats.update();
}
function render() {
var deltaTime = clock.getDelta();
updatePhysics(deltaTime);
renderer.render(scene, camera);
}
function updatePhysics(deltaTime) {
// Step world
physicsWorld.stepSimulation(deltaTime, 10);
// Update rigid bodies
for (var i = 0, il = rigidBodies.length; i < il; i++) | {
var objThree = rigidBodies[i];
var objPhys = objThree.userData.physicsBody;
var ms = objPhys.getMotionState();
if (ms) {
ms.getWorldTransform(transformAux1);
var p = transformAux1.getOrigin();
var q = transformAux1.getRotation();
objThree.position.set(p.x(), p.y(), p.z());
objThree.quaternion.set(q.x(), q.y(), q.z(), q.w());
}
} | conditional_block |
|
scene_test.js | , 30, 30);
light.castShadow = true;
var d = 30;
light.shadow.camera.left = - d;
light.shadow.camera.right = d;
light.shadow.camera.top = d;
light.shadow.camera.bottom = - d;
light.shadow.camera.near = 1; | light.shadow.camera.far = 100;
light.shadow.mapSize.x = 1024;
light.shadow.mapSize.y = 1024;
scene.add(light);
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
container.appendChild(stats.domElement);
// cubeCamera for reflection effect
cubeCamera = new THREE.CubeCamera(1, 1000, 256);
cubeCamera.renderTarget.texture.generateMipmaps = true;
cubeCamera.renderTarget.texture.minFilter = THREE.LinearMipmapLinearFilter;
// cubeCamera.renderTarget.texture.mapping = THREE.CubeReflectionMapping;
cubeCamera.position.set(0, 12, 0);
scene.add(cubeCamera);
window.addEventListener('resize', onWindowResize, false);
}
function initPhysics() {
// Physics configuration
collisionConfiguration = new Ammo.btSoftBodyRigidBodyCollisionConfiguration();
dispatcher = new Ammo.btCollisionDispatcher(collisionConfiguration);
broadphase = new Ammo.btDbvtBroadphase();
solver = new Ammo.btSequentialImpulseConstraintSolver();
softBodySolver = new Ammo.btDefaultSoftBodySolver();
physicsWorld = new Ammo.btSoftRigidDynamicsWorld(dispatcher, broadphase, solver, collisionConfiguration, softBodySolver);
physicsWorld.setGravity(new Ammo.btVector3(0, gravityConstant * 300, 0));
physicsWorld.getWorldInfo().set_m_gravity(new Ammo.btVector3(0, gravityConstant, 0));
transformAux1 = new Ammo.btTransform();
}
function createObjects() {
/* only for showing the axis direction */
var ball0 = new THREE.Mesh(new THREE.SphereBufferGeometry(50, 20, 20), new THREE.MeshPhongMaterial({ color: 0xffffff }));
var ballx = new THREE.Mesh(new THREE.SphereBufferGeometry(50, 20, 20), new THREE.MeshPhongMaterial({ color: 0xff0000 }));
ballx.position.set(1000, 0, 0);
var bally = new THREE.Mesh(new THREE.SphereBufferGeometry(50, 20, 20), new THREE.MeshPhongMaterial({ color: 0x00ff00 }));
bally.position.set(0, 1000, 0);
var ballz = new THREE.Mesh(new THREE.SphereBufferGeometry(50, 20, 20), new THREE.MeshPhongMaterial({ color: 0x0000ff }));
ballz.position.set(0, 0, 1000);
scene.add(ball0);
scene.add(ballx);
scene.add(bally);
scene.add(ballz);
/* add physical objects to scene */
var pos = new THREE.Vector3();
var quat = new THREE.Quaternion();
var baseMaterialRed = new THREE.MeshPhongMaterial({ color: 0xaa0000 });
var baseMaterialYel = new THREE.MeshPhongMaterial({ color: 0xa0a000 });
var baseMaterialGreen = new THREE.MeshPhongMaterial({ color: 0x00a000 });
// boxes of the glsl's quads
var theta = Math.atan(0.1);
var slope = createParalellepiped(2000, 20, 3115.46, 0, new THREE.Vector3(0, 145, -3100 * 0.5), new THREE.Quaternion(Math.sin(theta / 2), 0, 0, Math.cos(theta / 2)), baseMaterialRed);
var back = createParalellepiped(2000, 510, 20, 0, new THREE.Vector3(0, 245, -3000), quat, baseMaterialYel);
var left = createParalellepiped(20, 510, 3100, 0, new THREE.Vector3(-1000, 245, -3000 * 0.5), quat, baseMaterialYel);
var right = createParalellepiped(20, 510, 3100, 0, new THREE.Vector3(1000, 245, -3000 * 0.5), quat, baseMaterialYel);
// boxes of the glsl's boxes
var box0 = createParalellepiped(164, 340, 160, 0, new THREE.Vector3(180, 170, -350), new THREE.Quaternion(0, Math.sin(Math.PI * 0.1), 0, Math.cos(Math.PI * 0.1)), baseMaterialYel);
var box1 = createParalellepiped(172, 170, 160, 0, new THREE.Vector3(0, 300, -2500), new THREE.Quaternion(0, Math.sin(-Math.PI * 0.05), 0, Math.cos(-Math.PI * 0.05)), baseMaterialGreen);
// spheres
var sphere0 = createSphere(80, 0, new THREE.Vector3(500, 270, -2000), quat, baseMaterialRed);
var sphere1 = createSphere(100, 0, new THREE.Vector3(-400, 190, -1000), quat, baseMaterialGreen);
var sphere2 = createSphere(70, 0, new THREE.Vector3(-700, 100, -300), quat, baseMaterialYel);
// cones
var cone0 = createCone(150, 280, 0, new THREE.Vector3(-500, 360, -2300), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
var cone1 = createCone(400, 250, 0, new THREE.Vector3(500, 175, -1200), quat, baseMaterialRed);
var cone1 = createCone(150, 120, 0, new THREE.Vector3(600, 340, -2700), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
// cylinders
createCylinder(50.0, 510, 0, new THREE.Vector3(-400, 245, -1800), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(0, 245, -1500), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(50, 245, -700), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(500, 245, -700), quat, baseMaterialRed);
console.log("change5");
}
function createParalellepiped(sx, sy, sz, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.BoxBufferGeometry(sx, sy, sz, 1, 1, 1), material);
var shape = new Ammo.btBoxShape(new Ammo.btVector3(sx * 0.5, sy * 0.5, sz * 0.5));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createSphere(radius, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
var shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCone(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.ConeBufferGeometry(radius, height, 20, 2), material);
var shape = new Ammo.btConeShape(radius, height);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCylinder(radius, height, mass, pos, quat, material) {
var threeObject = new | random_line_split |
|
scene_test.js | 00), new THREE.Quaternion(0, Math.sin(-Math.PI * 0.05), 0, Math.cos(-Math.PI * 0.05)), baseMaterialGreen);
// spheres
var sphere0 = createSphere(80, 0, new THREE.Vector3(500, 270, -2000), quat, baseMaterialRed);
var sphere1 = createSphere(100, 0, new THREE.Vector3(-400, 190, -1000), quat, baseMaterialGreen);
var sphere2 = createSphere(70, 0, new THREE.Vector3(-700, 100, -300), quat, baseMaterialYel);
// cones
var cone0 = createCone(150, 280, 0, new THREE.Vector3(-500, 360, -2300), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
var cone1 = createCone(400, 250, 0, new THREE.Vector3(500, 175, -1200), quat, baseMaterialRed);
var cone1 = createCone(150, 120, 0, new THREE.Vector3(600, 340, -2700), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
// cylinders
createCylinder(50.0, 510, 0, new THREE.Vector3(-400, 245, -1800), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(0, 245, -1500), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(50, 245, -700), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(500, 245, -700), quat, baseMaterialRed);
console.log("change5");
}
function createParalellepiped(sx, sy, sz, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.BoxBufferGeometry(sx, sy, sz, 1, 1, 1), material);
var shape = new Ammo.btBoxShape(new Ammo.btVector3(sx * 0.5, sy * 0.5, sz * 0.5));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createSphere(radius, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
var shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCone(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.ConeBufferGeometry(radius, height, 20, 2), material);
var shape = new Ammo.btConeShape(radius, height);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCylinder(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.CylinderBufferGeometry(radius, radius, height, 20, 1), material);
var shape = new Ammo.btCylinderShape(new Ammo.btVector3(radius, height * 0.5, radius));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createRigidBody(object, physicsShape, mass, pos, quat, vel, angVel) {
if (pos) {
object.position.copy(pos);
} else {
pos = object.position;
}
if (quat) {
object.quaternion.copy(quat);
} else {
quat = object.quaternion;
}
var transform = new Ammo.btTransform();
transform.setIdentity();
transform.setOrigin(new Ammo.btVector3(pos.x, pos.y, pos.z));
transform.setRotation(new Ammo.btQuaternion(quat.x, quat.y, quat.z, quat.w));
var motionState = new Ammo.btDefaultMotionState(transform);
var localInertia = new Ammo.btVector3(0, 0, 0);
physicsShape.calculateLocalInertia(mass, localInertia);
var rbInfo = new Ammo.btRigidBodyConstructionInfo(mass, motionState, physicsShape, localInertia);
var body = new Ammo.btRigidBody(rbInfo);
body.setFriction(0.5);
if (vel) {
body.setLinearVelocity(new Ammo.btVector3(vel.x, vel.y, vel.z));
}
if (angVel) {
body.setAngularVelocity(new Ammo.btVector3(angVel.x, angVel.y, angVel.z));
}
object.userData.physicsBody = body;
object.userData.collided = false;
scene.add(object);
if (mass > 0) {
rigidBodies.push(object);
// Disable deactivation
body.setActivationState(4);
}
physicsWorld.addRigidBody(body);
return body;
}
function createMaterial() {
return new THREE.MeshPhongMaterial({ color: Math.floor(Math.random() * (1 << 24)) });
}
function generateNewSphere() {
var threeObject = null;
var shape = null;
var objectSize = 2;
var margin = 0.05;
// Sphere
var radius = 1 + Math.random() * objectSize;
// threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), createMaterial());
var material = new THREE.MeshBasicMaterial({ color: 0xffffff, envMap: scene.background, refractionRatio: 0.5 });
material.envMap.mapping = THREE.CubeRefractionMapping;
threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
threeObject.position.set(Math.random() - 0.5, objectSize + 25, Math.random() - 0.5);
var mass = 0.1;
createRigidBody(threeObject, shape, mass, threeObject.position, new THREE.Quaternion(0, 0, 0, 1));
}
function initInput() {
// keyboard control of arm
window.addEventListener('keydown', function (event) {
switch (event.keyCode) {
// Q
case 81:
armMovement = 1;
break;
// A
case 65:
armMovement = - 1;
break;
// S
case 83:
armMovement = 0;
break;
case 87:
generateNewSphere();
console.log(GEN.test(rigidBodies));
break;
}
}, false);
window.addEventListener('keyup', function () {
// armMovement = 0;
}, false);
// mouse control of shooting
window.addEventListener('mousedown', function (event) {
// triggered by right button
if (event.which != 3) {
return;
}
mouseCoords.set(
(event.clientX / window.innerWidth) * 2 - 1,
- (event.clientY / window.innerHeight) * 2 + 1
);
raycaster.setFromCamera(mouseCoords, camera);
// Creates a ball and throws it
var ballMass = 35;
var ballRadius = 40;
var ball = new THREE.Mesh(new THREE.SphereBufferGeometry(ballRadius, 14, 10), ballMaterial);
ball.castShadow = true;
ball.receiveShadow = true;
var ballShape = new Ammo.btSphereShape(ballRadius);
ballShape.setMargin(margin);
var pos = new THREE.Vector3();
var quat = new THREE.Quaternion();
pos.copy(raycaster.ray.direction);
pos.add(raycaster.ray.origin);
quat.set(0, 0, 0, 1);
var ballBody = createRigidBody(ball, ballShape, ballMass, pos, quat);
var vel = new THREE.Vector3();
vel.copy(raycaster.ray.direction);
vel.multiplyScalar(5000);
ballBody.setLinearVelocity(new Ammo.btVector3(vel.x, vel.y, vel.z));
}, false);
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function animate() | {
requestAnimationFrame(animate);
render();
stats.update();
} | identifier_body |
|
scene_test.js | .Quaternion();
var baseMaterialRed = new THREE.MeshPhongMaterial({ color: 0xaa0000 });
var baseMaterialYel = new THREE.MeshPhongMaterial({ color: 0xa0a000 });
var baseMaterialGreen = new THREE.MeshPhongMaterial({ color: 0x00a000 });
// boxes of the glsl's quads
var theta = Math.atan(0.1);
var slope = createParalellepiped(2000, 20, 3115.46, 0, new THREE.Vector3(0, 145, -3100 * 0.5), new THREE.Quaternion(Math.sin(theta / 2), 0, 0, Math.cos(theta / 2)), baseMaterialRed);
var back = createParalellepiped(2000, 510, 20, 0, new THREE.Vector3(0, 245, -3000), quat, baseMaterialYel);
var left = createParalellepiped(20, 510, 3100, 0, new THREE.Vector3(-1000, 245, -3000 * 0.5), quat, baseMaterialYel);
var right = createParalellepiped(20, 510, 3100, 0, new THREE.Vector3(1000, 245, -3000 * 0.5), quat, baseMaterialYel);
// boxes of the glsl's boxes
var box0 = createParalellepiped(164, 340, 160, 0, new THREE.Vector3(180, 170, -350), new THREE.Quaternion(0, Math.sin(Math.PI * 0.1), 0, Math.cos(Math.PI * 0.1)), baseMaterialYel);
var box1 = createParalellepiped(172, 170, 160, 0, new THREE.Vector3(0, 300, -2500), new THREE.Quaternion(0, Math.sin(-Math.PI * 0.05), 0, Math.cos(-Math.PI * 0.05)), baseMaterialGreen);
// spheres
var sphere0 = createSphere(80, 0, new THREE.Vector3(500, 270, -2000), quat, baseMaterialRed);
var sphere1 = createSphere(100, 0, new THREE.Vector3(-400, 190, -1000), quat, baseMaterialGreen);
var sphere2 = createSphere(70, 0, new THREE.Vector3(-700, 100, -300), quat, baseMaterialYel);
// cones
var cone0 = createCone(150, 280, 0, new THREE.Vector3(-500, 360, -2300), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
var cone1 = createCone(400, 250, 0, new THREE.Vector3(500, 175, -1200), quat, baseMaterialRed);
var cone1 = createCone(150, 120, 0, new THREE.Vector3(600, 340, -2700), new THREE.Quaternion(Math.sin(Math.PI * 0.5), 0, 0, Math.cos(Math.PI * 0.5)), baseMaterialRed);
// cylinders
createCylinder(50.0, 510, 0, new THREE.Vector3(-400, 245, -1800), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(0, 245, -1500), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(50, 245, -700), quat, baseMaterialRed);
createCylinder(50.0, 510, 0, new THREE.Vector3(500, 245, -700), quat, baseMaterialRed);
console.log("change5");
}
function createParalellepiped(sx, sy, sz, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.BoxBufferGeometry(sx, sy, sz, 1, 1, 1), material);
var shape = new Ammo.btBoxShape(new Ammo.btVector3(sx * 0.5, sy * 0.5, sz * 0.5));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createSphere(radius, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
var shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCone(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.ConeBufferGeometry(radius, height, 20, 2), material);
var shape = new Ammo.btConeShape(radius, height);
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createCylinder(radius, height, mass, pos, quat, material) {
var threeObject = new THREE.Mesh(new THREE.CylinderBufferGeometry(radius, radius, height, 20, 1), material);
var shape = new Ammo.btCylinderShape(new Ammo.btVector3(radius, height * 0.5, radius));
shape.setMargin(margin);
createRigidBody(threeObject, shape, mass, pos, quat);
return threeObject;
}
function createRigidBody(object, physicsShape, mass, pos, quat, vel, angVel) {
if (pos) {
object.position.copy(pos);
} else {
pos = object.position;
}
if (quat) {
object.quaternion.copy(quat);
} else {
quat = object.quaternion;
}
var transform = new Ammo.btTransform();
transform.setIdentity();
transform.setOrigin(new Ammo.btVector3(pos.x, pos.y, pos.z));
transform.setRotation(new Ammo.btQuaternion(quat.x, quat.y, quat.z, quat.w));
var motionState = new Ammo.btDefaultMotionState(transform);
var localInertia = new Ammo.btVector3(0, 0, 0);
physicsShape.calculateLocalInertia(mass, localInertia);
var rbInfo = new Ammo.btRigidBodyConstructionInfo(mass, motionState, physicsShape, localInertia);
var body = new Ammo.btRigidBody(rbInfo);
body.setFriction(0.5);
if (vel) {
body.setLinearVelocity(new Ammo.btVector3(vel.x, vel.y, vel.z));
}
if (angVel) {
body.setAngularVelocity(new Ammo.btVector3(angVel.x, angVel.y, angVel.z));
}
object.userData.physicsBody = body;
object.userData.collided = false;
scene.add(object);
if (mass > 0) {
rigidBodies.push(object);
// Disable deactivation
body.setActivationState(4);
}
physicsWorld.addRigidBody(body);
return body;
}
function createMaterial() {
return new THREE.MeshPhongMaterial({ color: Math.floor(Math.random() * (1 << 24)) });
}
function generateNewSphere() {
var threeObject = null;
var shape = null;
var objectSize = 2;
var margin = 0.05;
// Sphere
var radius = 1 + Math.random() * objectSize;
// threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), createMaterial());
var material = new THREE.MeshBasicMaterial({ color: 0xffffff, envMap: scene.background, refractionRatio: 0.5 });
material.envMap.mapping = THREE.CubeRefractionMapping;
threeObject = new THREE.Mesh(new THREE.SphereBufferGeometry(radius, 20, 20), material);
shape = new Ammo.btSphereShape(radius);
shape.setMargin(margin);
threeObject.position.set(Math.random() - 0.5, objectSize + 25, Math.random() - 0.5);
var mass = 0.1;
createRigidBody(threeObject, shape, mass, threeObject.position, new THREE.Quaternion(0, 0, 0, 1));
}
function | initInput | identifier_name |
|
main.rs | ::TermionBackend;
use tui::Terminal;
// use simplelog::{CombinedLogger, WriteLogger, LevelFilter, Config as LogConfig};
// use std::fs::File;
use std::io::{stdout, BufWriter};
fn main() {
if let Err(e) = run() {
// If an error was raised during an interactive mode call while the alternate screen is in
// use, we have to flush stdout here or the user will not see the error message.
std::io::stdout().flush().unwrap();
// Seems like this initial newline is necessary or the error will be printed with an offset
eprintln!("\nError: {:#}", e);
std::process::exit(1);
}
}
type OurTerminal = Terminal<TermionBackend<AlternateScreen<RawTerminal<Stdout>>>>;
/// Set up/create the terminal for use in interactive mode.
fn get_terminal() -> anyhow::Result<OurTerminal> {
let stdout = std::io::stdout().into_raw_mode()?;
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let t = Terminal::new(backend)?;
Ok(t)
}
fn | () -> anyhow::Result<()> {
// CombinedLogger::init(
// vec![
// WriteLogger::new(LevelFilter::Debug, LogConfig::default(), File::create("rhc.log").unwrap()),
// ]
// ).unwrap();
let args: Args = Args::from_args();
let output_file = args
.output_file
.map(|path_buf| {
OpenOptions::new()
.create_new(true)
.write(true)
.open(path_buf.as_path())
})
.transpose()?;
let writer: Box<dyn std::io::Write> = match &output_file {
Some(f) => Box::new(f),
None => Box::new(stdout()),
};
let mut writer = BufWriter::new(writer);
// If the user specifies a config location, make sure there's actually a file there
args.config.as_ref().map_or(Ok(()), |c| {
if c.is_file() {
Ok(())
} else {
Err(anyhow!("No config file found at `{}`", c.to_string_lossy()))
}
})?;
// Load the config file using this priority:
// 1. The file specified with the --config arg, if present
// 2. $XDG_CONFIG_HOME/rhc/config.toml, if XDG_CONFIG_HOME is defined
// 3. ~/.config/rhc/config.toml, if present
// If none of the above exist, use the default Config.
let raw_config_location: PathBuf = args.config.unwrap_or_else(|| {
match env::var_os("XDG_CONFIG_HOME") {
Some(xdg_config_home) => PathBuf::from(xdg_config_home),
None => PathBuf::from("~/.config"),
}
.join("rhc")
.join("config.toml")
});
let raw_config_location = raw_config_location.to_string_lossy();
let config_location: Cow<str> = shellexpand::tilde(raw_config_location.as_ref());
let config_path = Path::new(config_location.as_ref());
if args.verbose {
writeln!(
stdout(),
"Looking for config file at {}",
config_path.display()
)?;
}
let config = {
if config_path.is_file() {
Config::new(config_path).context(format!(
"Could not load config file at {}",
config_path.to_string_lossy()
))?
} else {
writeln!(
stdout(),
"No config file found at {}, falling back to default config",
config_path.display()
)?;
Config::default()
}
};
let is_tty = atty::is(Stream::Stdout);
// These two are necessary for use in interactive mode; but conversely, when not at an
// interactive shell, trying to create this `Terminal` will cause an error. So they start as
// None, and will be created on-demand if necessary (no request definition file provided, or
// unbound variables exist).
let mut keys: Option<Keys<AsyncReader>> = None;
let mut terminal: Option<OurTerminal> = None;
// If the user specified a request definition file, just use that; otherwise, enter interactive
// mode to allow them to choose a request definition. In either case, we need to keep track of
// the file names for the request definition that's either provided or selected, as well as the
// environment being used (if any), as these are required for the prompt_for_variables
// function.
let result: anyhow::Result<Option<SelectedValues>> = {
match &args.file {
Some(path) => {
let def: RequestDefinition =
load_file(&path, RequestDefinition::new, "request definition")?;
let env_path: Option<PathBuf> = args.environment;
let env: Option<Environment> = env_path
.as_deref()
.map(|path| load_file(&path, Environment::new, "environment"))
.transpose()?;
Ok(Some(SelectedValues { def, env }))
}
None => {
if is_tty {
// If we have to enter interactive mode, check if there is at least one request
// definition file available. If not, there's nothing that can be done, so
// print a warning and exit.
if get_all_toml_files(&config.request_definition_directory).is_empty() {
Err(anyhow!("No TOML files found under {}. Running rhc in interactive mode requres at least one request definition file.", &config.request_definition_directory))
} else {
// `terminal` and `keys` must be None at this point, so just create them
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
let interactive_result = interactive::interactive_mode(
&config,
args.environment.as_deref(),
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)?;
Ok(interactive_result)
}
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
}
}
};
let result = result?;
// `interactive_mode` will return None if they Ctrl-C out without selecting anything.
// if let Some((mut request_definition, mut vars)) = result {
if let Some(SelectedValues { mut def, env }) = result {
// Split up the variables and environment name immediately to avoid difficulties with borrowing
// `env` later on
let (mut vars, env_name): (Vec<KeyValue>, String) =
env.map_or((vec![], "<none>".to_string()), |e| (e.variables, e.name));
vars.sort();
if let Some(bindings) = args.binding {
for binding in bindings {
match vars.binary_search_by(|item| item.name.cmp(&binding.name)) {
Ok(index) => {
// If variable is already present, overwrite it with the one passed on the
// command line (these have the highest priority)
vars.remove(index);
vars.insert(index, binding);
}
Err(index) => vars.insert(index, binding),
};
}
}
// Substitute the variables that we have at this point into all the places of the
// RequestDefinitions that they can be used (URL, headers, body, query string)
templating::substitute_all(&mut def, &vars);
// // If any unbound variables remain, prompt the user to enter them interactively
let unbound_variables = templating::list_unbound_variables(&def);
let additional_vars: anyhow::Result<Option<Vec<KeyValue>>> = {
if !unbound_variables.is_empty() {
if is_tty {
// `terminal` and `keys` could have been initialized above, so only initialize them
// here if necessary.
if keys.is_none() {
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
}
interactive::prompt_for_variables(
&config,
unbound_variables,
&env_name,
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
} else {
Ok(Some(vec![]))
}
};
// Switch back to the original screen
drop(terminal);
// Flush stdout so the interactive terminal screen is cleared immediately
std::io::stdout().flush().ok();
let additional_vars = additional_vars?;
// `prompt_for_variables` returning None means the user aborted with Ctrl-C and we
// should not send the request
if let Some(additional_vars) = additional_vars {
// Do the final substition with the user-provided variables
templating::substitute_all(&mut def, &additional_vars);
let mut sp: Option<Spinner> = None;
if is_tty {
sp = Some(Spinner::new(Spinners::Dots, "Sending request...".into()));
}
let res = http::send_request(def, &config).context("Failed sending request")?;
if let Some(s) = sp {
s.stop();
writeln!(writer, "\n")?;
}
let headers = res.headers();
if !(& | run | identifier_name |
main.rs | backend::TermionBackend;
use tui::Terminal;
// use simplelog::{CombinedLogger, WriteLogger, LevelFilter, Config as LogConfig};
// use std::fs::File;
use std::io::{stdout, BufWriter};
fn main() {
if let Err(e) = run() {
// If an error was raised during an interactive mode call while the alternate screen is in
// use, we have to flush stdout here or the user will not see the error message.
std::io::stdout().flush().unwrap();
// Seems like this initial newline is necessary or the error will be printed with an offset
eprintln!("\nError: {:#}", e);
std::process::exit(1);
}
}
type OurTerminal = Terminal<TermionBackend<AlternateScreen<RawTerminal<Stdout>>>>;
/// Set up/create the terminal for use in interactive mode.
fn get_terminal() -> anyhow::Result<OurTerminal> {
let stdout = std::io::stdout().into_raw_mode()?;
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let t = Terminal::new(backend)?;
Ok(t)
}
fn run() -> anyhow::Result<()> {
// CombinedLogger::init(
// vec![
// WriteLogger::new(LevelFilter::Debug, LogConfig::default(), File::create("rhc.log").unwrap()),
// ]
// ).unwrap();
let args: Args = Args::from_args();
let output_file = args
.output_file
.map(|path_buf| {
OpenOptions::new()
.create_new(true)
.write(true)
.open(path_buf.as_path())
})
.transpose()?;
let writer: Box<dyn std::io::Write> = match &output_file {
Some(f) => Box::new(f),
None => Box::new(stdout()),
};
let mut writer = BufWriter::new(writer);
// If the user specifies a config location, make sure there's actually a file there
args.config.as_ref().map_or(Ok(()), |c| {
if c.is_file() {
Ok(())
} else {
Err(anyhow!("No config file found at `{}`", c.to_string_lossy()))
}
})?;
// Load the config file using this priority:
// 1. The file specified with the --config arg, if present
// 2. $XDG_CONFIG_HOME/rhc/config.toml, if XDG_CONFIG_HOME is defined
// 3. ~/.config/rhc/config.toml, if present
// If none of the above exist, use the default Config.
let raw_config_location: PathBuf = args.config.unwrap_or_else(|| {
match env::var_os("XDG_CONFIG_HOME") {
Some(xdg_config_home) => PathBuf::from(xdg_config_home),
None => PathBuf::from("~/.config"),
}
.join("rhc")
.join("config.toml")
});
let raw_config_location = raw_config_location.to_string_lossy();
let config_location: Cow<str> = shellexpand::tilde(raw_config_location.as_ref());
let config_path = Path::new(config_location.as_ref());
if args.verbose {
writeln!(
stdout(),
"Looking for config file at {}",
config_path.display()
)?;
}
let config = {
if config_path.is_file() {
Config::new(config_path).context(format!(
"Could not load config file at {}",
config_path.to_string_lossy()
))?
} else {
writeln!(
stdout(),
"No config file found at {}, falling back to default config",
config_path.display()
)?;
Config::default()
}
};
let is_tty = atty::is(Stream::Stdout);
// These two are necessary for use in interactive mode; but conversely, when not at an
// interactive shell, trying to create this `Terminal` will cause an error. So they start as
// None, and will be created on-demand if necessary (no request definition file provided, or
// unbound variables exist).
let mut keys: Option<Keys<AsyncReader>> = None;
let mut terminal: Option<OurTerminal> = None;
// If the user specified a request definition file, just use that; otherwise, enter interactive
// mode to allow them to choose a request definition. In either case, we need to keep track of | match &args.file {
Some(path) => {
let def: RequestDefinition =
load_file(&path, RequestDefinition::new, "request definition")?;
let env_path: Option<PathBuf> = args.environment;
let env: Option<Environment> = env_path
.as_deref()
.map(|path| load_file(&path, Environment::new, "environment"))
.transpose()?;
Ok(Some(SelectedValues { def, env }))
}
None => {
if is_tty {
// If we have to enter interactive mode, check if there is at least one request
// definition file available. If not, there's nothing that can be done, so
// print a warning and exit.
if get_all_toml_files(&config.request_definition_directory).is_empty() {
Err(anyhow!("No TOML files found under {}. Running rhc in interactive mode requres at least one request definition file.", &config.request_definition_directory))
} else {
// `terminal` and `keys` must be None at this point, so just create them
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
let interactive_result = interactive::interactive_mode(
&config,
args.environment.as_deref(),
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)?;
Ok(interactive_result)
}
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
}
}
};
let result = result?;
// `interactive_mode` will return None if they Ctrl-C out without selecting anything.
// if let Some((mut request_definition, mut vars)) = result {
if let Some(SelectedValues { mut def, env }) = result {
// Split up the variables and environment name immediately to avoid difficulties with borrowing
// `env` later on
let (mut vars, env_name): (Vec<KeyValue>, String) =
env.map_or((vec![], "<none>".to_string()), |e| (e.variables, e.name));
vars.sort();
if let Some(bindings) = args.binding {
for binding in bindings {
match vars.binary_search_by(|item| item.name.cmp(&binding.name)) {
Ok(index) => {
// If variable is already present, overwrite it with the one passed on the
// command line (these have the highest priority)
vars.remove(index);
vars.insert(index, binding);
}
Err(index) => vars.insert(index, binding),
};
}
}
// Substitute the variables that we have at this point into all the places of the
// RequestDefinitions that they can be used (URL, headers, body, query string)
templating::substitute_all(&mut def, &vars);
// // If any unbound variables remain, prompt the user to enter them interactively
let unbound_variables = templating::list_unbound_variables(&def);
let additional_vars: anyhow::Result<Option<Vec<KeyValue>>> = {
if !unbound_variables.is_empty() {
if is_tty {
// `terminal` and `keys` could have been initialized above, so only initialize them
// here if necessary.
if keys.is_none() {
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
}
interactive::prompt_for_variables(
&config,
unbound_variables,
&env_name,
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
} else {
Ok(Some(vec![]))
}
};
// Switch back to the original screen
drop(terminal);
// Flush stdout so the interactive terminal screen is cleared immediately
std::io::stdout().flush().ok();
let additional_vars = additional_vars?;
// `prompt_for_variables` returning None means the user aborted with Ctrl-C and we
// should not send the request
if let Some(additional_vars) = additional_vars {
// Do the final substition with the user-provided variables
templating::substitute_all(&mut def, &additional_vars);
let mut sp: Option<Spinner> = None;
if is_tty {
sp = Some(Spinner::new(Spinners::Dots, "Sending request...".into()));
}
let res = http::send_request(def, &config).context("Failed sending request")?;
if let Some(s) = sp {
s.stop();
writeln!(writer, "\n")?;
}
let headers = res.headers();
if !(&args | // the file names for the request definition that's either provided or selected, as well as the
// environment being used (if any), as these are required for the prompt_for_variables
// function.
let result: anyhow::Result<Option<SelectedValues>> = { | random_line_split |
main.rs | ::TermionBackend;
use tui::Terminal;
// use simplelog::{CombinedLogger, WriteLogger, LevelFilter, Config as LogConfig};
// use std::fs::File;
use std::io::{stdout, BufWriter};
fn main() {
if let Err(e) = run() {
// If an error was raised during an interactive mode call while the alternate screen is in
// use, we have to flush stdout here or the user will not see the error message.
std::io::stdout().flush().unwrap();
// Seems like this initial newline is necessary or the error will be printed with an offset
eprintln!("\nError: {:#}", e);
std::process::exit(1);
}
}
type OurTerminal = Terminal<TermionBackend<AlternateScreen<RawTerminal<Stdout>>>>;
/// Set up/create the terminal for use in interactive mode.
fn get_terminal() -> anyhow::Result<OurTerminal> {
let stdout = std::io::stdout().into_raw_mode()?;
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let t = Terminal::new(backend)?;
Ok(t)
}
fn run() -> anyhow::Result<()> {
// CombinedLogger::init(
// vec![
// WriteLogger::new(LevelFilter::Debug, LogConfig::default(), File::create("rhc.log").unwrap()),
// ]
// ).unwrap();
let args: Args = Args::from_args();
let output_file = args
.output_file
.map(|path_buf| {
OpenOptions::new()
.create_new(true)
.write(true)
.open(path_buf.as_path())
})
.transpose()?;
let writer: Box<dyn std::io::Write> = match &output_file {
Some(f) => Box::new(f),
None => Box::new(stdout()),
};
let mut writer = BufWriter::new(writer);
// If the user specifies a config location, make sure there's actually a file there
args.config.as_ref().map_or(Ok(()), |c| {
if c.is_file() {
Ok(())
} else |
})?;
// Load the config file using this priority:
// 1. The file specified with the --config arg, if present
// 2. $XDG_CONFIG_HOME/rhc/config.toml, if XDG_CONFIG_HOME is defined
// 3. ~/.config/rhc/config.toml, if present
// If none of the above exist, use the default Config.
let raw_config_location: PathBuf = args.config.unwrap_or_else(|| {
match env::var_os("XDG_CONFIG_HOME") {
Some(xdg_config_home) => PathBuf::from(xdg_config_home),
None => PathBuf::from("~/.config"),
}
.join("rhc")
.join("config.toml")
});
let raw_config_location = raw_config_location.to_string_lossy();
let config_location: Cow<str> = shellexpand::tilde(raw_config_location.as_ref());
let config_path = Path::new(config_location.as_ref());
if args.verbose {
writeln!(
stdout(),
"Looking for config file at {}",
config_path.display()
)?;
}
let config = {
if config_path.is_file() {
Config::new(config_path).context(format!(
"Could not load config file at {}",
config_path.to_string_lossy()
))?
} else {
writeln!(
stdout(),
"No config file found at {}, falling back to default config",
config_path.display()
)?;
Config::default()
}
};
let is_tty = atty::is(Stream::Stdout);
// These two are necessary for use in interactive mode; but conversely, when not at an
// interactive shell, trying to create this `Terminal` will cause an error. So they start as
// None, and will be created on-demand if necessary (no request definition file provided, or
// unbound variables exist).
let mut keys: Option<Keys<AsyncReader>> = None;
let mut terminal: Option<OurTerminal> = None;
// If the user specified a request definition file, just use that; otherwise, enter interactive
// mode to allow them to choose a request definition. In either case, we need to keep track of
// the file names for the request definition that's either provided or selected, as well as the
// environment being used (if any), as these are required for the prompt_for_variables
// function.
let result: anyhow::Result<Option<SelectedValues>> = {
match &args.file {
Some(path) => {
let def: RequestDefinition =
load_file(&path, RequestDefinition::new, "request definition")?;
let env_path: Option<PathBuf> = args.environment;
let env: Option<Environment> = env_path
.as_deref()
.map(|path| load_file(&path, Environment::new, "environment"))
.transpose()?;
Ok(Some(SelectedValues { def, env }))
}
None => {
if is_tty {
// If we have to enter interactive mode, check if there is at least one request
// definition file available. If not, there's nothing that can be done, so
// print a warning and exit.
if get_all_toml_files(&config.request_definition_directory).is_empty() {
Err(anyhow!("No TOML files found under {}. Running rhc in interactive mode requres at least one request definition file.", &config.request_definition_directory))
} else {
// `terminal` and `keys` must be None at this point, so just create them
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
let interactive_result = interactive::interactive_mode(
&config,
args.environment.as_deref(),
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)?;
Ok(interactive_result)
}
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
}
}
};
let result = result?;
// `interactive_mode` will return None if they Ctrl-C out without selecting anything.
// if let Some((mut request_definition, mut vars)) = result {
if let Some(SelectedValues { mut def, env }) = result {
// Split up the variables and environment name immediately to avoid difficulties with borrowing
// `env` later on
let (mut vars, env_name): (Vec<KeyValue>, String) =
env.map_or((vec![], "<none>".to_string()), |e| (e.variables, e.name));
vars.sort();
if let Some(bindings) = args.binding {
for binding in bindings {
match vars.binary_search_by(|item| item.name.cmp(&binding.name)) {
Ok(index) => {
// If variable is already present, overwrite it with the one passed on the
// command line (these have the highest priority)
vars.remove(index);
vars.insert(index, binding);
}
Err(index) => vars.insert(index, binding),
};
}
}
// Substitute the variables that we have at this point into all the places of the
// RequestDefinitions that they can be used (URL, headers, body, query string)
templating::substitute_all(&mut def, &vars);
// // If any unbound variables remain, prompt the user to enter them interactively
let unbound_variables = templating::list_unbound_variables(&def);
let additional_vars: anyhow::Result<Option<Vec<KeyValue>>> = {
if !unbound_variables.is_empty() {
if is_tty {
// `terminal` and `keys` could have been initialized above, so only initialize them
// here if necessary.
if keys.is_none() {
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
}
interactive::prompt_for_variables(
&config,
unbound_variables,
&env_name,
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
} else {
Ok(Some(vec![]))
}
};
// Switch back to the original screen
drop(terminal);
// Flush stdout so the interactive terminal screen is cleared immediately
std::io::stdout().flush().ok();
let additional_vars = additional_vars?;
// `prompt_for_variables` returning None means the user aborted with Ctrl-C and we
// should not send the request
if let Some(additional_vars) = additional_vars {
// Do the final substition with the user-provided variables
templating::substitute_all(&mut def, &additional_vars);
let mut sp: Option<Spinner> = None;
if is_tty {
sp = Some(Spinner::new(Spinners::Dots, "Sending request...".into()));
}
let res = http::send_request(def, &config).context("Failed sending request")?;
if let Some(s) = sp {
s.stop();
writeln!(writer, "\n")?;
}
let headers = res.headers();
if ! | {
Err(anyhow!("No config file found at `{}`", c.to_string_lossy()))
} | conditional_block |
main.rs | ::TermionBackend;
use tui::Terminal;
// use simplelog::{CombinedLogger, WriteLogger, LevelFilter, Config as LogConfig};
// use std::fs::File;
use std::io::{stdout, BufWriter};
fn main() |
type OurTerminal = Terminal<TermionBackend<AlternateScreen<RawTerminal<Stdout>>>>;
/// Set up/create the terminal for use in interactive mode.
fn get_terminal() -> anyhow::Result<OurTerminal> {
let stdout = std::io::stdout().into_raw_mode()?;
let stdout = AlternateScreen::from(stdout);
let backend = TermionBackend::new(stdout);
let t = Terminal::new(backend)?;
Ok(t)
}
fn run() -> anyhow::Result<()> {
// CombinedLogger::init(
// vec![
// WriteLogger::new(LevelFilter::Debug, LogConfig::default(), File::create("rhc.log").unwrap()),
// ]
// ).unwrap();
let args: Args = Args::from_args();
let output_file = args
.output_file
.map(|path_buf| {
OpenOptions::new()
.create_new(true)
.write(true)
.open(path_buf.as_path())
})
.transpose()?;
let writer: Box<dyn std::io::Write> = match &output_file {
Some(f) => Box::new(f),
None => Box::new(stdout()),
};
let mut writer = BufWriter::new(writer);
// If the user specifies a config location, make sure there's actually a file there
args.config.as_ref().map_or(Ok(()), |c| {
if c.is_file() {
Ok(())
} else {
Err(anyhow!("No config file found at `{}`", c.to_string_lossy()))
}
})?;
// Load the config file using this priority:
// 1. The file specified with the --config arg, if present
// 2. $XDG_CONFIG_HOME/rhc/config.toml, if XDG_CONFIG_HOME is defined
// 3. ~/.config/rhc/config.toml, if present
// If none of the above exist, use the default Config.
let raw_config_location: PathBuf = args.config.unwrap_or_else(|| {
match env::var_os("XDG_CONFIG_HOME") {
Some(xdg_config_home) => PathBuf::from(xdg_config_home),
None => PathBuf::from("~/.config"),
}
.join("rhc")
.join("config.toml")
});
let raw_config_location = raw_config_location.to_string_lossy();
let config_location: Cow<str> = shellexpand::tilde(raw_config_location.as_ref());
let config_path = Path::new(config_location.as_ref());
if args.verbose {
writeln!(
stdout(),
"Looking for config file at {}",
config_path.display()
)?;
}
let config = {
if config_path.is_file() {
Config::new(config_path).context(format!(
"Could not load config file at {}",
config_path.to_string_lossy()
))?
} else {
writeln!(
stdout(),
"No config file found at {}, falling back to default config",
config_path.display()
)?;
Config::default()
}
};
let is_tty = atty::is(Stream::Stdout);
// These two are necessary for use in interactive mode; but conversely, when not at an
// interactive shell, trying to create this `Terminal` will cause an error. So they start as
// None, and will be created on-demand if necessary (no request definition file provided, or
// unbound variables exist).
let mut keys: Option<Keys<AsyncReader>> = None;
let mut terminal: Option<OurTerminal> = None;
// If the user specified a request definition file, just use that; otherwise, enter interactive
// mode to allow them to choose a request definition. In either case, we need to keep track of
// the file names for the request definition that's either provided or selected, as well as the
// environment being used (if any), as these are required for the prompt_for_variables
// function.
let result: anyhow::Result<Option<SelectedValues>> = {
match &args.file {
Some(path) => {
let def: RequestDefinition =
load_file(&path, RequestDefinition::new, "request definition")?;
let env_path: Option<PathBuf> = args.environment;
let env: Option<Environment> = env_path
.as_deref()
.map(|path| load_file(&path, Environment::new, "environment"))
.transpose()?;
Ok(Some(SelectedValues { def, env }))
}
None => {
if is_tty {
// If we have to enter interactive mode, check if there is at least one request
// definition file available. If not, there's nothing that can be done, so
// print a warning and exit.
if get_all_toml_files(&config.request_definition_directory).is_empty() {
Err(anyhow!("No TOML files found under {}. Running rhc in interactive mode requres at least one request definition file.", &config.request_definition_directory))
} else {
// `terminal` and `keys` must be None at this point, so just create them
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
let interactive_result = interactive::interactive_mode(
&config,
args.environment.as_deref(),
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)?;
Ok(interactive_result)
}
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
}
}
};
let result = result?;
// `interactive_mode` will return None if they Ctrl-C out without selecting anything.
// if let Some((mut request_definition, mut vars)) = result {
if let Some(SelectedValues { mut def, env }) = result {
// Split up the variables and environment name immediately to avoid difficulties with borrowing
// `env` later on
let (mut vars, env_name): (Vec<KeyValue>, String) =
env.map_or((vec![], "<none>".to_string()), |e| (e.variables, e.name));
vars.sort();
if let Some(bindings) = args.binding {
for binding in bindings {
match vars.binary_search_by(|item| item.name.cmp(&binding.name)) {
Ok(index) => {
// If variable is already present, overwrite it with the one passed on the
// command line (these have the highest priority)
vars.remove(index);
vars.insert(index, binding);
}
Err(index) => vars.insert(index, binding),
};
}
}
// Substitute the variables that we have at this point into all the places of the
// RequestDefinitions that they can be used (URL, headers, body, query string)
templating::substitute_all(&mut def, &vars);
// // If any unbound variables remain, prompt the user to enter them interactively
let unbound_variables = templating::list_unbound_variables(&def);
let additional_vars: anyhow::Result<Option<Vec<KeyValue>>> = {
if !unbound_variables.is_empty() {
if is_tty {
// `terminal` and `keys` could have been initialized above, so only initialize them
// here if necessary.
if keys.is_none() {
terminal = Some(get_terminal()?);
keys = Some(termion::async_stdin().keys());
}
interactive::prompt_for_variables(
&config,
unbound_variables,
&env_name,
&mut keys.as_mut().unwrap(),
&mut terminal.as_mut().unwrap(),
)
} else {
Err(anyhow!("Running in interactive mode requires a TTY"))
}
} else {
Ok(Some(vec![]))
}
};
// Switch back to the original screen
drop(terminal);
// Flush stdout so the interactive terminal screen is cleared immediately
std::io::stdout().flush().ok();
let additional_vars = additional_vars?;
// `prompt_for_variables` returning None means the user aborted with Ctrl-C and we
// should not send the request
if let Some(additional_vars) = additional_vars {
// Do the final substition with the user-provided variables
templating::substitute_all(&mut def, &additional_vars);
let mut sp: Option<Spinner> = None;
if is_tty {
sp = Some(Spinner::new(Spinners::Dots, "Sending request...".into()));
}
let res = http::send_request(def, &config).context("Failed sending request")?;
if let Some(s) = sp {
s.stop();
writeln!(writer, "\n")?;
}
let headers = res.headers();
if ! | {
if let Err(e) = run() {
// If an error was raised during an interactive mode call while the alternate screen is in
// use, we have to flush stdout here or the user will not see the error message.
std::io::stdout().flush().unwrap();
// Seems like this initial newline is necessary or the error will be printed with an offset
eprintln!("\nError: {:#}", e);
std::process::exit(1);
}
} | identifier_body |
jwt.rs | .kid
.as_ref()
.ok_or_else(|| Error::Input("Token does not specify the key id".to_string()))?;
let key = self
.jwks
.find(key_id)
.filter(|key| key.alg == header.alg && key.r#use == "sig")
.ok_or_else(|| {
Error::Configuration(format!("Signing key {:?} can't be found", key_id))
})?;
jsonwebtoken::DecodingKey::from_rsa_components(&key.n, &key.e)
}
alg => return Err(Error::Input(format!("Unsupported algorithm: {:?}", alg))),
};
match jsonwebtoken::decode::<Claims>(token, &key, &self.validation) {
Ok(data) => Ok(User::Authenticated {
name: data.claims.sub,
permissions: data.claims.permissions,
}),
Err(err) => Err(Error::Validation(err.to_string())),
}
}
}
#[cfg(test)]
mod tests {
use std::io::BufWriter;
use std::{io::Write, path::Path};
use super::*;
const KID: &str = "test-key";
const AUDIENCE: &str = "xsnippet-api-tests-aud";
const ISSUER: &str = "xsnippet-api-tests-iss";
const N: &str = "qN5dCh1M2RA3aF6ZH4IRXIQYKvaWRG59F7lpQIUyUtkDiURmVem7x86EuGmmTmQPSRhx6fL4jF0GBkduAhYnFn_A8T6WfQXzXyI2cyqXsKaTkDMKvl7nHnGttQIuG8W2m7H74pklsPKxp0rPJ0zjV1N_lr_nZG_dayJhtEnChHpScaoTwqMitcOJfVacNxcRbTSDy1IZY5AW0TLdATUmc-ddJLQXxSV7bMur_4S1MP45tHrttChgtnmPpL3q4MHZjHR8aNRYPurkJkPwY0t6nrERTPk9DE4Mk5NtNzqZRBY7eT94pmodVUBGTVYhh7bFDGB26oyTk8_5aedO6syB6w==";
const E: &str = "AQAB";
const USER_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAjFEuCvDjm7wnxoW0zDXH_zj1FITht-c3ua6KbgeevvDjpUgaR52Zou9HRyNa6ns5OKO7yJofA32IZaO7QH69iQiZ4o9WA8PfFNyuVqyQVkvZwpr68JLgl4qTTX4NIWV4wU4OWbIGN6-p4QSkS_Ljkau9sRKjnx4NYPbICMGWVThn_MKOfg26DjGZlI_0HFYDBLogJkTmmyT-5IIIWUqBgUKWYA";
const EXPIRED_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjE2MTY3NzIwMDYsInBlcm1pc3Npb25zIjpbXX0.AkC-xzeJ7OXi5fN-DXs43vKAjgsep5Cwx2e1c3hbv1jPpJVnwTD2M_A8Bphd8-mzMsuO017a_rZQIj30dzt3I5Z730Z4zHA_xPV4nl_6zsGzCYTwecT1qmOhTuiyP1PhdgveVQz-ImNDbAzD80PTUwW8Bv-r4R1wyrc5lRtj2ofF1h2_rqxWtRbQwvqmm_J4K8oklYWOrBPNFXJVOGVcji97LelBY6llWbfVUO2unNZBA7MbJLDMtuQHMIRSHn1PXSLA4MJbxOzT-kZC01OlpQWtGstxnITHc34ZDVe5M0v092PSe5J0o3_OBVCR405-rPK_EjLD8saPE3SK7X0Cfw";
const INVALID_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAjFEuCvDjm7wnxoW0zDXH_zj1FITht-c3ua6KbgeevvDjpUgaR52Zou9HRyNa6ns5OKO7yJofA32IZaO7QH69iQiZ4o9WA8PfFNyuVqyQVkvZwpr68JLgl4qTTX4NIWV4wU4OWbIGN6-p3QSkS_Ljkau9sRKjnx4NYPbICMGWVThn_MKOfg26DjGZlI_0HFYDBLogJkTmmyT-5IIIWUqBgUKWYA";
const INVALID_HEADER_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJraWQiOiJ0ZXN0LWtleSJ9.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAj | jsonwebtoken::decode_header(token).map_err(|err| Error::Input(err.to_string()))?;
let key = match header.alg {
Algorithm::RS256 | Algorithm::RS384 | Algorithm::RS512 => {
let key_id = header | random_line_split |
|
jwt.rs | (&self, key_id: &str) -> Option<&Key> {
self.keys.iter().find(|key| key.kid == key_id)
}
}
/// A set of values encoded in a JWT that the issuer claims to be true.
#[derive(Debug, Deserialize)]
struct Claims {
/// Audience (who or that the token is intended for). E.g. "https://api.xsnippet.org".
#[allow(unused)]
aud: String,
/// Issuer (who created and signed this token). E.g. "https://xsnippet.eu.auth0.com/".
#[allow(unused)]
iss: String,
/// Subject (whom the token refers to). E.g. "[email protected]".
sub: String,
/// Expiration time (seconds since Unix epoch).
#[allow(unused)]
exp: usize,
/// Subject permissions (e.g. vec!["import"])
permissions: Vec<Permission>,
}
impl Jwks {
/// Returns a Jwks retrieved from the location identified by the given URI.
pub fn from_uri(uri: &str) -> Result<Self> {
let load_err = Error::Configuration(format!("Can't load Jwks state from {}", uri));
let json = match uri.split_once("://") {
Some(("https", _)) => reqwest::blocking::get(uri)
.and_then(|response| response.text())
.map_err(|_| load_err)?,
Some(("file", path)) => std::fs::read_to_string(path).map_err(|_| load_err)?,
_ => {
return Err(Error::Configuration(
"URI scheme is not supported or URI is invalid".to_string(),
))
}
};
let jwks = serde_json::from_slice::<Jwks>(json.as_bytes())
.map_err(|_| Error::Configuration("Can't parse Jwks state as JSON".to_string()))?;
if !jwks.keys.is_empty() {
Ok(jwks)
} else {
Err(Error::Configuration("Jwks is empty".to_string()))
}
}
}
/// A facade for validation of JWT values.
pub struct JwtValidator {
jwks: Jwks,
validation: Validation,
}
impl JwtValidator {
/// Returns a new JwtValidator constructed from the given parameters.
///
/// # Arguments
///
/// * `audience` - The intended recipient of the tokens (e.g. "https://api.xsnippet.org")
/// * `issuer` - The principal that issues the tokens (e.g. "https://xsnippet.eu.auth0.com/")
/// * `jwks_uri` - The location of JWT Key Set with keys used to validate the tokens (e.g. "https://xsnippet.eu.auth0.com/.well-known/jwks.json")
pub fn new(audience: String, issuer: String, jwks_uri: &str) -> Result<Self> {
let jwks = Jwks::from_uri(jwks_uri)?;
// The following token properties are going to be verified:
// * the expiration time
// * the issuer
// * the intended audience
let validation = Validation {
algorithms: SUPPORTED_ALGORITHMS.to_vec(),
aud: Some(std::iter::once(audience).collect()),
iss: Some(issuer),
..Validation::default()
};
Ok(JwtValidator { jwks, validation })
}
/// Returns a new JwtValidator constructed from the application config.
pub fn from_config(config: &Config) -> Result<Self> {
JwtValidator::new(
config.jwt_audience.to_owned(),
config.jwt_issuer.to_owned(),
&config.jwt_jwks_uri,
)
}
}
impl AuthValidator for JwtValidator {
fn validate(&self, token: &str) -> Result<User> {
let header =
jsonwebtoken::decode_header(token).map_err(|err| Error::Input(err.to_string()))?;
let key = match header.alg {
Algorithm::RS256 | Algorithm::RS384 | Algorithm::RS512 => {
let key_id = header
.kid
.as_ref()
.ok_or_else(|| Error::Input("Token does not specify the key id".to_string()))?;
let key = self
.jwks
.find(key_id)
.filter(|key| key.alg == header.alg && key.r#use == "sig")
.ok_or_else(|| {
Error::Configuration(format!("Signing key {:?} can't be found", key_id))
})?;
jsonwebtoken::DecodingKey::from_rsa_components(&key.n, &key.e)
}
alg => return Err(Error::Input(format!("Unsupported algorithm: {:?}", alg))),
};
match jsonwebtoken::decode::<Claims>(token, &key, &self.validation) {
Ok(data) => Ok(User::Authenticated {
name: data.claims.sub,
permissions: data.claims.permissions,
}),
Err(err) => Err(Error::Validation(err.to_string())),
}
}
}
#[cfg(test)]
mod tests {
use std::io::BufWriter;
use std::{io::Write, path::Path};
use super::*;
const KID: &str = "test-key";
const AUDIENCE: &str = "xsnippet-api-tests-aud";
const ISSUER: &str = "xsnippet-api-tests-iss";
const N: &str = "qN5dCh1M2RA3aF6ZH4IRXIQYKvaWRG59F7lpQIUyUtkDiURmVem7x86EuGmmTmQPSRhx6fL4jF0GBkduAhYnFn_A8T6WfQXzXyI2cyqXsKaTkDMKvl7nHnGttQIuG8W2m7H74pklsPKxp0rPJ0zjV1N_lr_nZG_dayJhtEnChHpScaoTwqMitcOJfVacNxcRbTSDy1IZY5AW0TLdATUmc-ddJLQXxSV7bMur_4S1MP45tHrttChgtnmPpL3q4MHZjHR8aNRYPurkJkPwY0t6nrERTPk9DE4Mk5NtNzqZRBY7eT94pmodVUBGTVYhh7bFDGB26oyTk8_5aedO6syB6w==";
const E: &str = "AQAB";
const USER_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAjFEuCvDjm7wnxoW0zDXH_zj1FITht-c3ua6KbgeevvDjpUgaR52Zou9HRyNa6ns5OKO7yJofA32IZaO7QH69iQiZ4o9WA8PfFNyuVqyQVkvZwpr68JLgl4qTTX4NIWV4wU4OWbIGN6-p4QSkS_Ljkau9sRKjnx4NYPbICMGWVThn_MKOfg26DjGZlI_0HFYDBLogJkTmmyT-5IIIWUqBgUKWYA";
const EXPIRED_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjE2MTY3NzIwMDYsInBlcm1pc3Npb25zIjpbXX0.AkC-xzeJ7OXi5fN-DXs43vKAjgsep5Cwx2e1c3hbv1jPpJVnwTD2M_A8Bphd | find | identifier_name |
|
jwt.rs | }
alg => return Err(Error::Input(format!("Unsupported algorithm: {:?}", alg))),
};
match jsonwebtoken::decode::<Claims>(token, &key, &self.validation) {
Ok(data) => Ok(User::Authenticated {
name: data.claims.sub,
permissions: data.claims.permissions,
}),
Err(err) => Err(Error::Validation(err.to_string())),
}
}
}
#[cfg(test)]
mod tests {
use std::io::BufWriter;
use std::{io::Write, path::Path};
use super::*;
const KID: &str = "test-key";
const AUDIENCE: &str = "xsnippet-api-tests-aud";
const ISSUER: &str = "xsnippet-api-tests-iss";
const N: &str = "qN5dCh1M2RA3aF6ZH4IRXIQYKvaWRG59F7lpQIUyUtkDiURmVem7x86EuGmmTmQPSRhx6fL4jF0GBkduAhYnFn_A8T6WfQXzXyI2cyqXsKaTkDMKvl7nHnGttQIuG8W2m7H74pklsPKxp0rPJ0zjV1N_lr_nZG_dayJhtEnChHpScaoTwqMitcOJfVacNxcRbTSDy1IZY5AW0TLdATUmc-ddJLQXxSV7bMur_4S1MP45tHrttChgtnmPpL3q4MHZjHR8aNRYPurkJkPwY0t6nrERTPk9DE4Mk5NtNzqZRBY7eT94pmodVUBGTVYhh7bFDGB26oyTk8_5aedO6syB6w==";
const E: &str = "AQAB";
const USER_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAjFEuCvDjm7wnxoW0zDXH_zj1FITht-c3ua6KbgeevvDjpUgaR52Zou9HRyNa6ns5OKO7yJofA32IZaO7QH69iQiZ4o9WA8PfFNyuVqyQVkvZwpr68JLgl4qTTX4NIWV4wU4OWbIGN6-p4QSkS_Ljkau9sRKjnx4NYPbICMGWVThn_MKOfg26DjGZlI_0HFYDBLogJkTmmyT-5IIIWUqBgUKWYA";
const EXPIRED_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjE2MTY3NzIwMDYsInBlcm1pc3Npb25zIjpbXX0.AkC-xzeJ7OXi5fN-DXs43vKAjgsep5Cwx2e1c3hbv1jPpJVnwTD2M_A8Bphd8-mzMsuO017a_rZQIj30dzt3I5Z730Z4zHA_xPV4nl_6zsGzCYTwecT1qmOhTuiyP1PhdgveVQz-ImNDbAzD80PTUwW8Bv-r4R1wyrc5lRtj2ofF1h2_rqxWtRbQwvqmm_J4K8oklYWOrBPNFXJVOGVcji97LelBY6llWbfVUO2unNZBA7MbJLDMtuQHMIRSHn1PXSLA4MJbxOzT-kZC01OlpQWtGstxnITHc34ZDVe5M0v092PSe5J0o3_OBVCR405-rPK_EjLD8saPE3SK7X0Cfw";
const INVALID_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Qta2V5In0.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjsd8eHSAjFEuCvDjm7wnxoW0zDXH_zj1FITht-c3ua6KbgeevvDjpUgaR52Zou9HRyNa6ns5OKO7yJofA32IZaO7QH69iQiZ4o9WA8PfFNyuVqyQVkvZwpr68JLgl4qTTX4NIWV4wU4OWbIGN6-p3QSkS_Ljkau9sRKjnx4NYPbICMGWVThn_MKOfg26DjGZlI_0HFYDBLogJkTmmyT-5IIIWUqBgUKWYA";
const INVALID_HEADER_TOKEN: &str = "eyJ0eXAiOiJKV1QiLCJraWQiOiJ0ZXN0LWtleSJ9.eyJzdWIiOiJ1c2VyIiwiYXVkIjoieHNuaXBwZXQtYXBpLXRlc3RzLWF1ZCIsImlzcyI6InhzbmlwcGV0LWFwaS10ZXN0cy1pc3MiLCJleHAiOjQ3NzAzNzU1NDQsInBlcm1pc3Npb25zIjpbXX0.doA6EeVLnp-MLNRTRUzg03rw9oUn5vDGv59zNysrcFfvkEiiYAtZMu-YW_N3YtE0qv2FTaGAXHryMqsEk8rsFv4uepDuOpzutnRoB4JDFTpvJkKYE4HZjs | {
let header =
jsonwebtoken::decode_header(token).map_err(|err| Error::Input(err.to_string()))?;
let key = match header.alg {
Algorithm::RS256 | Algorithm::RS384 | Algorithm::RS512 => {
let key_id = header
.kid
.as_ref()
.ok_or_else(|| Error::Input("Token does not specify the key id".to_string()))?;
let key = self
.jwks
.find(key_id)
.filter(|key| key.alg == header.alg && key.r#use == "sig")
.ok_or_else(|| {
Error::Configuration(format!("Signing key {:?} can't be found", key_id))
})?;
jsonwebtoken::DecodingKey::from_rsa_components(&key.n, &key.e) | identifier_body |
|
stylesheet.js | }
}
};
function StyleSheet(seed, name) {
var head,
node,
sheet,
cssRules = {},
_rules,
_insertRule,
_deleteRule;
// Factory or constructor
if (!(this instanceof arguments.callee)) {
return new arguments.callee(seed,name);
}
head = d.getElementsByTagName('head')[0];
if (!head) {
Y.fail('HEAD element not found to append STYLE node');
}
node = seed && (seed.nodeName ? seed :
d.getElementById(seed.replace(/^#/,'')));
if (seed && sheets[seed]) {
return sheets[seed];
} else if (node && sheets[Y.stamp(node)]) {
return sheets[Y.stamp(node)];
}
if (!node || !/^(?:style|link)$/i.test(node.nodeName)) {
node = d.createElement('style');
node.type = 'text/css';
}
if (typeof seed === 'string') {
// Create entire sheet from seed cssText
if (seed.indexOf('{') != -1) {
// Not a load-time fork because low run-time impact and IE fails
// test for s.styleSheet at page load time (oddly)
if (node.styleSheet) {
node.styleSheet.cssText = seed;
} else {
node.appendChild(d.createTextNode(seed));
}
} else if (!name) {
name = seed;
}
}
if (node.parentNode !== head) {
// styleSheet isn't available on the style node in FF2 until appended
// to the head element. style nodes appended to body do not affect
// change in Safari.
head.appendChild(node);
}
// IE stores StyleSheet under the "styleSheet" property
sheet = node.sheet || node.styleSheet;
// IE stores the rules collection under the "rules" property
_rules = ('cssRules' in sheet) ? 'cssRules' : 'rules';
// IE supports removeRule
_deleteRule = ('deleteRule' in sheet) ?
function (i) { sheet.deleteRule(i); } :
function (i) { sheet.removeRule(i); };
// IE supports addRule with different signature
_insertRule = ('insertRule' in sheet) ?
function (sel,css,i) { sheet.insertRule(sel+' '+css,i); } :
function (sel,css,i) { sheet.addRule(sel,css,i); };
// Initialize the cssRules map from the node
// TODO if xdomain link node, copy to a local style block and replace the
// link node with the style node. CAVEAT: alternate stylesheet, @media
// TODO: test existing node with funky selectors
// TODO: Split comma delimited rules
var i,r,sel;
for (i = sheet[_rules].length - 1; i >= 0; --i) {
r = sheet[_rules][i];
sel = r.selectorText;
if (cssRules[sel]) {
cssRules[sel].style.cssText += ';' + r.style.cssText;
_deleteRule(i);
} else {
cssRules[sel] = r;
}
}
// Cache the sheet by the generated Id
StyleSheet.register(Y.stamp(node),this);
if (name) {
StyleSheet.register(name,this);
}
// Public API
Y.mix(this,{
getId : function () { return Y.stamp(node); },
// Enabling/disabling the stylesheet. Changes may be made to rules
// while disabled.
enable : function () { sheet.disabled = false; return this; },
disable : function () { sheet.disabled = true; return this; },
isEnabled : function () { return !sheet.disabled; },
/**
* Update style for a rule. Add the rule if it's not present already.
*
*/
set : function (sel,css) {
var rule = cssRules[sel],
multi = sel.split(/\s*,\s*/),i,
idx;
// IE's addRule doesn't support multiple comma delimited selectors
if (multi.length > 1) |
// Some selector values can cause IE to hang
if (!StyleSheet.isValidSelector(sel)) {
return this;
}
// Opera throws an error if there's a syntax error in assigned
// cssText. Avoid this using a worker styls collection, then
// assigning the resulting cssText.
if (rule) {
rule.style.cssText = StyleSheet.toCssText(css,rule.style.cssText);
} else {
idx = sheet[_rules].length;
_insertRule(sel, '{'+StyleSheet.toCssText(css)+'}', idx);
// Safari replaces the rules collection, but maintains the rule
// instances in the new collection when rules are added/removed
cssRules[sel] = sheet[_rules][idx];
}
return this;
},
// remove rule properties or an entire rule
unset : function (sel,css) {
var rule = cssRules[sel],
remove = !css,
rules, i;
if (rule) {
if (!remove) {
css = Y.Array(css);
style.cssText = rule.style.cssText;
for (i = css.length - 1; i >= 0; --i) {
_unsetProperty(style,css[i]);
}
if (style.cssText) {
rule.style.cssText = style.cssText;
} else {
remove = true;
}
}
if (remove) { // remove the rule altogether
rules = sheet[_rules];
for (i = rules.length - 1; i >= 0; --i) {
if (rules[i] === rule) {
delete cssRules[sel];
_deleteRule(i);
break;
}
}
}
}
return this;
}
},true);
}
_toCssText = function (css,base) {
var f = css.styleFloat || css.cssFloat || css['float'], prop;
style.cssText = base || '';
if (f && !css[floatAttr]) {
css = Y.merge(css);
delete css.styleFloat; delete css.cssFloat; delete css['float'];
css[floatAttr] = f;
}
for (prop in css) {
if (css.hasOwnProperty(prop)) {
// IE throws Invalid Value errors
try {
// IE doesn't like values with whitespace ala ' red' or 'red '
style[prop] = Y.Lang.trim(css[prop]);
}
catch (e) {
}
}
}
return style.cssText;
};
Y.mix(StyleSheet, {
// Wrap IE's toCssText to catch opacity. The copy/merge is to preserve the
// input object's integrity, but if float and opacity are set, the input will
// be copied twice in IE. Is there a way to avoid this without increasing the
// byte count?
toCssText : ('opacity' in style) ? _toCssText :
function (css, cssText) {
if ('opacity' in css) {
css = Y.merge(css,{
filter: 'alpha(opacity='+(css.opacity*100)+')'
});
delete css.opacity;
}
return _toCssText(css,cssText);
},
register : function (name,sheet) {
return !!(name && sheet instanceof StyleSheet &&
!sheets[name] && (sheets[name] = sheet));
},
// TODO: Selector should provide
isValidSelector : function (sel) {
// IE locks up on addRule(BAD_SELECTOR, '{..}');
// BAD_SELECTOR : unescaped `~!@$%^&()+=|{}[];'"?< or space, ., or #
// followed by anything other than an alphanumeric
// -abc or .-abc or #_abc or '# ' all fail (prob more)
// TODO: this will fail tag[prop=val] tests
return !/[^\\][`~!@$%\^&()+=|{}\[\];'"?<]|^\s*[^a-z0-9*#.]|[\s.#][^a-z0-9]/i.test(sel);
}
});
Y.StyleSheet = StyleSheet;
/*
NOTES
* Style node must be added to the head element. Safari does not honor styles
applied to StyleSheet objects on style nodes in the body.
* StyleSheet object is created on the style node when the style node is added
to the head element in Firefox 2 (and maybe 3?)
* The cssRules collection is replaced after insertRule/deleteRule calls in
Safari 3.1. Existing Rules are used in the new collection, so the collection
cannot be cached, but the rules can be.
* Opera requires that the index be passed with insertRule.
* Same-domain restrictions prevent modifying StyleSheet objects attached to
link elements with remote href (or "about:blank" or "javascript:false")
* IE names StyleSheet related properties and methods differently (see code)
* IE converts tag names to upper case in the Rule's selectorText
* IE converts empty string assignment to complex properties to value settings
for all child properties. E.g. style.background = '' sets non-'' values on
style.backgroundPosition, style.backgroundColor, etc. | {
for (i = multi.length - 1; i >= 0; --i) {
this.set(multi[i], css);
}
return this;
} | conditional_block |
stylesheet.js | }
}
};
function StyleSheet(seed, name) | d.getElementById(seed.replace(/^#/,'')));
if (seed && sheets[seed]) {
return sheets[seed];
} else if (node && sheets[Y.stamp(node)]) {
return sheets[Y.stamp(node)];
}
if (!node || !/^(?:style|link)$/i.test(node.nodeName)) {
node = d.createElement('style');
node.type = 'text/css';
}
if (typeof seed === 'string') {
// Create entire sheet from seed cssText
if (seed.indexOf('{') != -1) {
// Not a load-time fork because low run-time impact and IE fails
// test for s.styleSheet at page load time (oddly)
if (node.styleSheet) {
node.styleSheet.cssText = seed;
} else {
node.appendChild(d.createTextNode(seed));
}
} else if (!name) {
name = seed;
}
}
if (node.parentNode !== head) {
// styleSheet isn't available on the style node in FF2 until appended
// to the head element. style nodes appended to body do not affect
// change in Safari.
head.appendChild(node);
}
// IE stores StyleSheet under the "styleSheet" property
sheet = node.sheet || node.styleSheet;
// IE stores the rules collection under the "rules" property
_rules = ('cssRules' in sheet) ? 'cssRules' : 'rules';
// IE supports removeRule
_deleteRule = ('deleteRule' in sheet) ?
function (i) { sheet.deleteRule(i); } :
function (i) { sheet.removeRule(i); };
// IE supports addRule with different signature
_insertRule = ('insertRule' in sheet) ?
function (sel,css,i) { sheet.insertRule(sel+' '+css,i); } :
function (sel,css,i) { sheet.addRule(sel,css,i); };
// Initialize the cssRules map from the node
// TODO if xdomain link node, copy to a local style block and replace the
// link node with the style node. CAVEAT: alternate stylesheet, @media
// TODO: test existing node with funky selectors
// TODO: Split comma delimited rules
var i,r,sel;
for (i = sheet[_rules].length - 1; i >= 0; --i) {
r = sheet[_rules][i];
sel = r.selectorText;
if (cssRules[sel]) {
cssRules[sel].style.cssText += ';' + r.style.cssText;
_deleteRule(i);
} else {
cssRules[sel] = r;
}
}
// Cache the sheet by the generated Id
StyleSheet.register(Y.stamp(node),this);
if (name) {
StyleSheet.register(name,this);
}
// Public API
Y.mix(this,{
getId : function () { return Y.stamp(node); },
// Enabling/disabling the stylesheet. Changes may be made to rules
// while disabled.
enable : function () { sheet.disabled = false; return this; },
disable : function () { sheet.disabled = true; return this; },
isEnabled : function () { return !sheet.disabled; },
/**
* Update style for a rule. Add the rule if it's not present already.
*
*/
set : function (sel,css) {
var rule = cssRules[sel],
multi = sel.split(/\s*,\s*/),i,
idx;
// IE's addRule doesn't support multiple comma delimited selectors
if (multi.length > 1) {
for (i = multi.length - 1; i >= 0; --i) {
this.set(multi[i], css);
}
return this;
}
// Some selector values can cause IE to hang
if (!StyleSheet.isValidSelector(sel)) {
return this;
}
// Opera throws an error if there's a syntax error in assigned
// cssText. Avoid this using a worker styls collection, then
// assigning the resulting cssText.
if (rule) {
rule.style.cssText = StyleSheet.toCssText(css,rule.style.cssText);
} else {
idx = sheet[_rules].length;
_insertRule(sel, '{'+StyleSheet.toCssText(css)+'}', idx);
// Safari replaces the rules collection, but maintains the rule
// instances in the new collection when rules are added/removed
cssRules[sel] = sheet[_rules][idx];
}
return this;
},
// remove rule properties or an entire rule
unset : function (sel,css) {
var rule = cssRules[sel],
remove = !css,
rules, i;
if (rule) {
if (!remove) {
css = Y.Array(css);
style.cssText = rule.style.cssText;
for (i = css.length - 1; i >= 0; --i) {
_unsetProperty(style,css[i]);
}
if (style.cssText) {
rule.style.cssText = style.cssText;
} else {
remove = true;
}
}
if (remove) { // remove the rule altogether
rules = sheet[_rules];
for (i = rules.length - 1; i >= 0; --i) {
if (rules[i] === rule) {
delete cssRules[sel];
_deleteRule(i);
break;
}
}
}
}
return this;
}
},true);
}
_toCssText = function (css,base) {
var f = css.styleFloat || css.cssFloat || css['float'], prop;
style.cssText = base || '';
if (f && !css[floatAttr]) {
css = Y.merge(css);
delete css.styleFloat; delete css.cssFloat; delete css['float'];
css[floatAttr] = f;
}
for (prop in css) {
if (css.hasOwnProperty(prop)) {
// IE throws Invalid Value errors
try {
// IE doesn't like values with whitespace ala ' red' or 'red '
style[prop] = Y.Lang.trim(css[prop]);
}
catch (e) {
}
}
}
return style.cssText;
};
Y.mix(StyleSheet, {
// Wrap IE's toCssText to catch opacity. The copy/merge is to preserve the
// input object's integrity, but if float and opacity are set, the input will
// be copied twice in IE. Is there a way to avoid this without increasing the
// byte count?
toCssText : ('opacity' in style) ? _toCssText :
function (css, cssText) {
if ('opacity' in css) {
css = Y.merge(css,{
filter: 'alpha(opacity='+(css.opacity*100)+')'
});
delete css.opacity;
}
return _toCssText(css,cssText);
},
register : function (name,sheet) {
return !!(name && sheet instanceof StyleSheet &&
!sheets[name] && (sheets[name] = sheet));
},
// TODO: Selector should provide
isValidSelector : function (sel) {
// IE locks up on addRule(BAD_SELECTOR, '{..}');
// BAD_SELECTOR : unescaped `~!@$%^&()+=|{}[];'"?< or space, ., or #
// followed by anything other than an alphanumeric
// -abc or .-abc or #_abc or '# ' all fail (prob more)
// TODO: this will fail tag[prop=val] tests
return !/[^\\][`~!@$%\^&()+=|{}\[\];'"?<]|^\s*[^a-z0-9*#.]|[\s.#][^a-z0-9]/i.test(sel);
}
});
Y.StyleSheet = StyleSheet;
/*
NOTES
* Style node must be added to the head element. Safari does not honor styles
applied to StyleSheet objects on style nodes in the body.
* StyleSheet object is created on the style node when the style node is added
to the head element in Firefox 2 (and maybe 3?)
* The cssRules collection is replaced after insertRule/deleteRule calls in
Safari 3.1. Existing Rules are used in the new collection, so the collection
cannot be cached, but the rules can be.
* Opera requires that the index be passed with insertRule.
* Same-domain restrictions prevent modifying StyleSheet objects attached to
link elements with remote href (or "about:blank" or "javascript:false")
* IE names StyleSheet related properties and methods differently (see code)
* IE converts tag names to upper case in the Rule's selectorText
* IE converts empty string assignment to complex properties to value settings
for all child properties. E.g. style.background = '' sets non-'' values on
style.backgroundPosition, style.backgroundColor, etc. | {
var head,
node,
sheet,
cssRules = {},
_rules,
_insertRule,
_deleteRule;
// Factory or constructor
if (!(this instanceof arguments.callee)) {
return new arguments.callee(seed,name);
}
head = d.getElementsByTagName('head')[0];
if (!head) {
Y.fail('HEAD element not found to append STYLE node');
}
node = seed && (seed.nodeName ? seed : | identifier_body |
stylesheet.js | }
}
};
function | (seed, name) {
var head,
node,
sheet,
cssRules = {},
_rules,
_insertRule,
_deleteRule;
// Factory or constructor
if (!(this instanceof arguments.callee)) {
return new arguments.callee(seed,name);
}
head = d.getElementsByTagName('head')[0];
if (!head) {
Y.fail('HEAD element not found to append STYLE node');
}
node = seed && (seed.nodeName ? seed :
d.getElementById(seed.replace(/^#/,'')));
if (seed && sheets[seed]) {
return sheets[seed];
} else if (node && sheets[Y.stamp(node)]) {
return sheets[Y.stamp(node)];
}
if (!node || !/^(?:style|link)$/i.test(node.nodeName)) {
node = d.createElement('style');
node.type = 'text/css';
}
if (typeof seed === 'string') {
// Create entire sheet from seed cssText
if (seed.indexOf('{') != -1) {
// Not a load-time fork because low run-time impact and IE fails
// test for s.styleSheet at page load time (oddly)
if (node.styleSheet) {
node.styleSheet.cssText = seed;
} else {
node.appendChild(d.createTextNode(seed));
}
} else if (!name) {
name = seed;
}
}
if (node.parentNode !== head) {
// styleSheet isn't available on the style node in FF2 until appended
// to the head element. style nodes appended to body do not affect
// change in Safari.
head.appendChild(node);
}
// IE stores StyleSheet under the "styleSheet" property
sheet = node.sheet || node.styleSheet;
// IE stores the rules collection under the "rules" property
_rules = ('cssRules' in sheet) ? 'cssRules' : 'rules';
// IE supports removeRule
_deleteRule = ('deleteRule' in sheet) ?
function (i) { sheet.deleteRule(i); } :
function (i) { sheet.removeRule(i); };
// IE supports addRule with different signature
_insertRule = ('insertRule' in sheet) ?
function (sel,css,i) { sheet.insertRule(sel+' '+css,i); } :
function (sel,css,i) { sheet.addRule(sel,css,i); };
// Initialize the cssRules map from the node
// TODO if xdomain link node, copy to a local style block and replace the
// link node with the style node. CAVEAT: alternate stylesheet, @media
// TODO: test existing node with funky selectors
// TODO: Split comma delimited rules
var i,r,sel;
for (i = sheet[_rules].length - 1; i >= 0; --i) {
r = sheet[_rules][i];
sel = r.selectorText;
if (cssRules[sel]) {
cssRules[sel].style.cssText += ';' + r.style.cssText;
_deleteRule(i);
} else {
cssRules[sel] = r;
}
}
// Cache the sheet by the generated Id
StyleSheet.register(Y.stamp(node),this);
if (name) {
StyleSheet.register(name,this);
}
// Public API
Y.mix(this,{
getId : function () { return Y.stamp(node); },
// Enabling/disabling the stylesheet. Changes may be made to rules
// while disabled.
enable : function () { sheet.disabled = false; return this; },
disable : function () { sheet.disabled = true; return this; },
isEnabled : function () { return !sheet.disabled; },
/**
* Update style for a rule. Add the rule if it's not present already.
*
*/
set : function (sel,css) {
var rule = cssRules[sel],
multi = sel.split(/\s*,\s*/),i,
idx;
// IE's addRule doesn't support multiple comma delimited selectors
if (multi.length > 1) {
for (i = multi.length - 1; i >= 0; --i) {
this.set(multi[i], css);
}
return this;
}
// Some selector values can cause IE to hang
if (!StyleSheet.isValidSelector(sel)) {
return this;
}
// Opera throws an error if there's a syntax error in assigned
// cssText. Avoid this using a worker styls collection, then
// assigning the resulting cssText.
if (rule) {
rule.style.cssText = StyleSheet.toCssText(css,rule.style.cssText);
} else {
idx = sheet[_rules].length;
_insertRule(sel, '{'+StyleSheet.toCssText(css)+'}', idx);
// Safari replaces the rules collection, but maintains the rule
// instances in the new collection when rules are added/removed
cssRules[sel] = sheet[_rules][idx];
}
return this;
},
// remove rule properties or an entire rule
unset : function (sel,css) {
var rule = cssRules[sel],
remove = !css,
rules, i;
if (rule) {
if (!remove) {
css = Y.Array(css);
style.cssText = rule.style.cssText;
for (i = css.length - 1; i >= 0; --i) {
_unsetProperty(style,css[i]);
}
if (style.cssText) {
rule.style.cssText = style.cssText;
} else {
remove = true;
}
}
if (remove) { // remove the rule altogether
rules = sheet[_rules];
for (i = rules.length - 1; i >= 0; --i) {
if (rules[i] === rule) {
delete cssRules[sel];
_deleteRule(i);
break;
}
}
}
}
return this;
}
},true);
}
_toCssText = function (css,base) {
var f = css.styleFloat || css.cssFloat || css['float'], prop;
style.cssText = base || '';
if (f && !css[floatAttr]) {
css = Y.merge(css);
delete css.styleFloat; delete css.cssFloat; delete css['float'];
css[floatAttr] = f;
}
for (prop in css) {
if (css.hasOwnProperty(prop)) {
// IE throws Invalid Value errors
try {
// IE doesn't like values with whitespace ala ' red' or 'red '
style[prop] = Y.Lang.trim(css[prop]);
}
catch (e) {
}
}
}
return style.cssText;
};
Y.mix(StyleSheet, {
// Wrap IE's toCssText to catch opacity. The copy/merge is to preserve the
// input object's integrity, but if float and opacity are set, the input will
// be copied twice in IE. Is there a way to avoid this without increasing the
// byte count?
toCssText : ('opacity' in style) ? _toCssText :
function (css, cssText) {
if ('opacity' in css) {
css = Y.merge(css,{
filter: 'alpha(opacity='+(css.opacity*100)+')'
});
delete css.opacity;
}
return _toCssText(css,cssText);
},
register : function (name,sheet) {
return !!(name && sheet instanceof StyleSheet &&
!sheets[name] && (sheets[name] = sheet));
},
// TODO: Selector should provide
isValidSelector : function (sel) {
// IE locks up on addRule(BAD_SELECTOR, '{..}');
// BAD_SELECTOR : unescaped `~!@$%^&()+=|{}[];'"?< or space, ., or #
// followed by anything other than an alphanumeric
// -abc or .-abc or #_abc or '# ' all fail (prob more)
// TODO: this will fail tag[prop=val] tests
return !/[^\\][`~!@$%\^&()+=|{}\[\];'"?<]|^\s*[^a-z0-9*#.]|[\s.#][^a-z0-9]/i.test(sel);
}
});
Y.StyleSheet = StyleSheet;
/*
NOTES
* Style node must be added to the head element. Safari does not honor styles
applied to StyleSheet objects on style nodes in the body.
* StyleSheet object is created on the style node when the style node is added
to the head element in Firefox 2 (and maybe 3?)
* The cssRules collection is replaced after insertRule/deleteRule calls in
Safari 3.1. Existing Rules are used in the new collection, so the collection
cannot be cached, but the rules can be.
* Opera requires that the index be passed with insertRule.
* Same-domain restrictions prevent modifying StyleSheet objects attached to
link elements with remote href (or "about:blank" or "javascript:false")
* IE names StyleSheet related properties and methods differently (see code)
* IE converts tag names to upper case in the Rule's selectorText
* IE converts empty string assignment to complex properties to value settings
for all child properties. E.g. style.background = '' sets non-'' values on
style.backgroundPosition, style.backgroundColor, etc. | StyleSheet | identifier_name |
stylesheet.js | }
}
};
function StyleSheet(seed, name) {
var head,
node,
sheet,
cssRules = {},
_rules,
_insertRule,
_deleteRule;
// Factory or constructor
if (!(this instanceof arguments.callee)) {
return new arguments.callee(seed,name);
}
head = d.getElementsByTagName('head')[0];
if (!head) {
Y.fail('HEAD element not found to append STYLE node');
}
node = seed && (seed.nodeName ? seed :
d.getElementById(seed.replace(/^#/,'')));
if (seed && sheets[seed]) {
return sheets[seed];
} else if (node && sheets[Y.stamp(node)]) {
return sheets[Y.stamp(node)];
}
if (!node || !/^(?:style|link)$/i.test(node.nodeName)) {
node = d.createElement('style');
node.type = 'text/css';
}
if (typeof seed === 'string') {
// Create entire sheet from seed cssText
if (seed.indexOf('{') != -1) {
// Not a load-time fork because low run-time impact and IE fails
// test for s.styleSheet at page load time (oddly)
if (node.styleSheet) {
node.styleSheet.cssText = seed;
} else {
node.appendChild(d.createTextNode(seed));
}
} else if (!name) {
name = seed;
}
}
if (node.parentNode !== head) {
// styleSheet isn't available on the style node in FF2 until appended
// to the head element. style nodes appended to body do not affect
// change in Safari.
head.appendChild(node);
}
// IE stores StyleSheet under the "styleSheet" property
sheet = node.sheet || node.styleSheet;
// IE stores the rules collection under the "rules" property
_rules = ('cssRules' in sheet) ? 'cssRules' : 'rules';
// IE supports removeRule
_deleteRule = ('deleteRule' in sheet) ?
function (i) { sheet.deleteRule(i); } :
function (i) { sheet.removeRule(i); };
// IE supports addRule with different signature
_insertRule = ('insertRule' in sheet) ?
function (sel,css,i) { sheet.insertRule(sel+' '+css,i); } :
function (sel,css,i) { sheet.addRule(sel,css,i); };
// Initialize the cssRules map from the node
// TODO if xdomain link node, copy to a local style block and replace the
// link node with the style node. CAVEAT: alternate stylesheet, @media
// TODO: test existing node with funky selectors
// TODO: Split comma delimited rules
var i,r,sel;
for (i = sheet[_rules].length - 1; i >= 0; --i) {
r = sheet[_rules][i];
sel = r.selectorText;
if (cssRules[sel]) {
cssRules[sel].style.cssText += ';' + r.style.cssText;
_deleteRule(i);
} else {
cssRules[sel] = r;
}
}
// Cache the sheet by the generated Id
StyleSheet.register(Y.stamp(node),this);
if (name) {
StyleSheet.register(name,this);
}
// Public API
Y.mix(this,{
getId : function () { return Y.stamp(node); },
// Enabling/disabling the stylesheet. Changes may be made to rules
// while disabled.
enable : function () { sheet.disabled = false; return this; },
disable : function () { sheet.disabled = true; return this; },
isEnabled : function () { return !sheet.disabled; },
/**
* Update style for a rule. Add the rule if it's not present already.
*
*/
set : function (sel,css) {
var rule = cssRules[sel],
multi = sel.split(/\s*,\s*/),i,
idx;
// IE's addRule doesn't support multiple comma delimited selectors
if (multi.length > 1) {
for (i = multi.length - 1; i >= 0; --i) {
this.set(multi[i], css);
}
return this;
}
// Some selector values can cause IE to hang
if (!StyleSheet.isValidSelector(sel)) {
return this;
}
// Opera throws an error if there's a syntax error in assigned
// cssText. Avoid this using a worker styls collection, then
// assigning the resulting cssText.
if (rule) {
rule.style.cssText = StyleSheet.toCssText(css,rule.style.cssText);
} else {
idx = sheet[_rules].length;
_insertRule(sel, '{'+StyleSheet.toCssText(css)+'}', idx);
// Safari replaces the rules collection, but maintains the rule
// instances in the new collection when rules are added/removed
cssRules[sel] = sheet[_rules][idx];
}
return this;
},
// remove rule properties or an entire rule
unset : function (sel,css) {
var rule = cssRules[sel],
remove = !css,
rules, i;
if (rule) {
if (!remove) {
css = Y.Array(css);
style.cssText = rule.style.cssText;
for (i = css.length - 1; i >= 0; --i) {
_unsetProperty(style,css[i]);
}
if (style.cssText) {
rule.style.cssText = style.cssText;
} else {
remove = true;
} |
if (remove) { // remove the rule altogether
rules = sheet[_rules];
for (i = rules.length - 1; i >= 0; --i) {
if (rules[i] === rule) {
delete cssRules[sel];
_deleteRule(i);
break;
}
}
}
}
return this;
}
},true);
}
_toCssText = function (css,base) {
var f = css.styleFloat || css.cssFloat || css['float'], prop;
style.cssText = base || '';
if (f && !css[floatAttr]) {
css = Y.merge(css);
delete css.styleFloat; delete css.cssFloat; delete css['float'];
css[floatAttr] = f;
}
for (prop in css) {
if (css.hasOwnProperty(prop)) {
// IE throws Invalid Value errors
try {
// IE doesn't like values with whitespace ala ' red' or 'red '
style[prop] = Y.Lang.trim(css[prop]);
}
catch (e) {
}
}
}
return style.cssText;
};
Y.mix(StyleSheet, {
// Wrap IE's toCssText to catch opacity. The copy/merge is to preserve the
// input object's integrity, but if float and opacity are set, the input will
// be copied twice in IE. Is there a way to avoid this without increasing the
// byte count?
toCssText : ('opacity' in style) ? _toCssText :
function (css, cssText) {
if ('opacity' in css) {
css = Y.merge(css,{
filter: 'alpha(opacity='+(css.opacity*100)+')'
});
delete css.opacity;
}
return _toCssText(css,cssText);
},
register : function (name,sheet) {
return !!(name && sheet instanceof StyleSheet &&
!sheets[name] && (sheets[name] = sheet));
},
// TODO: Selector should provide
isValidSelector : function (sel) {
// IE locks up on addRule(BAD_SELECTOR, '{..}');
// BAD_SELECTOR : unescaped `~!@$%^&()+=|{}[];'"?< or space, ., or #
// followed by anything other than an alphanumeric
// -abc or .-abc or #_abc or '# ' all fail (prob more)
// TODO: this will fail tag[prop=val] tests
return !/[^\\][`~!@$%\^&()+=|{}\[\];'"?<]|^\s*[^a-z0-9*#.]|[\s.#][^a-z0-9]/i.test(sel);
}
});
Y.StyleSheet = StyleSheet;
/*
NOTES
* Style node must be added to the head element. Safari does not honor styles
applied to StyleSheet objects on style nodes in the body.
* StyleSheet object is created on the style node when the style node is added
to the head element in Firefox 2 (and maybe 3?)
* The cssRules collection is replaced after insertRule/deleteRule calls in
Safari 3.1. Existing Rules are used in the new collection, so the collection
cannot be cached, but the rules can be.
* Opera requires that the index be passed with insertRule.
* Same-domain restrictions prevent modifying StyleSheet objects attached to
link elements with remote href (or "about:blank" or "javascript:false")
* IE names StyleSheet related properties and methods differently (see code)
* IE converts tag names to upper case in the Rule's selectorText
* IE converts empty string assignment to complex properties to value settings
for all child properties. E.g. style.background = '' sets non-'' values on
style.backgroundPosition, style.backgroundColor, etc. All | } | random_line_split |
Puspesh_farmer1.py | last block of the chain.
def get_previous_block(self):
return self.chain[-1]
#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def | (self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg | is_chain_valid | identifier_name |
Puspesh_farmer1.py | block of the chain.
def get_previous_block(self):
return self.chain[-1]
| #It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, | random_line_split |
|
Puspesh_farmer1.py | _hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)}
return jsonify(response), 200
# Checking if the Blockchain is valid
#- It calls the function is_chain_valid and returns a string as response based on whether the chain is valid or not.
@app.route('/is_valid', methods = ['GET'])
def is_valid():
is_valid = blockchain.is_chain_valid(blockchain.chain)
if is_valid:
response = {'message': 'All good. The Blockchain is valid.'}
else:
response = {'message': 'Houston, we have a problem. The Blockchain is not valid.'}
return jsonify(response), 200
# Adding a new transaction to the Blockchain
#It takes the input in Jason format and checks if all the keys in the farmer keys(name_of_farmer,crop_name,quantity_inkg, rate_perkg) are available in the json file.
#If no, It returns that some elements are missing
# otherwise it calls the function add_farmer_details by passing the farmer details in the json file as parameter and
#returns the index of the block in which these details will be added.
@app.route('/add_farmerdetails', methods = ['POST'])
def add_farmer_details():
json = request.get_json()
farmer_keys = ['name_of_farmer', 'crop_name', 'quantity_inkg','rate_perkg']
if not all(key in json for key in farmer_keys):
return 'Some elements of the farmer_details are missing', 400
index = blockchain.add_farmerdetails(json['name_of_farmer'], json['crop_name'], json['quantity_inkg'], json['rate_perkg'])
response = {'message': f'These details will be added to Block {index}'}
return jsonify(response), 201
# Part 3 - Decentralizing our Blockchain
# Connecting new nodes
#It takes a Jason file as request and first check if it contains any node or not.
# If it contains the nodes then it calls the function blockchain.add_node .
#Then it returns the list of blockchain.nodes as response.
@app.route('/connect_node', methods = ['POST'])
def connect_node():
json = request.get_json()
nodes = json.get('nodes')
if nodes is None:
return "No n | ode", 400
for no | conditional_block |
|
Puspesh_farmer1.py | return self.chain[-1]
#It runs a lop and check if hash of new proof^2- previous proof^2 contains 4 leading zeroes.
#if yes,then it returns the new proof otherwise increment the new proof by 1 and iterates again.
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
#- It returns the hash of the block using sha256
def hash(self, block):
encoded_block = json.dumps(block, sort_keys = True).encode()
return hashlib.sha256(encoded_block).hexdigest()
#It iterates a loop from 0 to chain length and check if hash of the block is same as returned by the hash function,
#then it checks if hash of the proof of current block^2-proof of previous block^2 contains 4 leading zeroes or not.
# if no, then chain is not valid.
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block['previous_hash'] != self.hash(previous_block):
return False
previous_proof = previous_block['proof']
proof = block['proof']
hash_operation = hashlib.sha256(str(proof**2 - previous_proof**2).encode()).hexdigest()
if hash_operation[:4] != '0000':
return False
previous_block = block
block_index += 1
return True
#- It creates the private key using the RSA.generate(1024),then creates the public key,
# hash of transaction(it is the hash of the sum of hashes of the name,crop_name,quantity,rate),
#data( it is the hash of the transaction in the int form),
#signature( it is created by raising the data to the power of privatekey.d%privatekey.n).
# Then it append a dictionary containing all these information in the hash format to the chain farmer_details
#and returns the index of the new block.
def add_farmerdetails(self, name, crop_name, quantity,rate):
privatekey = RSA.generate(1024)
publickey = privatekey.publickey()
hash_of_transaction=hashlib.sha256((hashlib.sha256(name.encode()).hexdigest()+hashlib.sha256(crop_name.encode()).hexdigest()+hashlib.sha256(str(quantity).encode()).hexdigest()+hashlib.sha256(str(rate).encode()).hexdigest()).encode()).hexdigest()
data=int(hash_of_transaction,16)
signature=pow(data,privatekey.d,privatekey.n)
self.farmer_details.append({'name_of_farmer': hashlib.sha256(name.encode()).hexdigest(),
'crop_name': hashlib.sha256(crop_name.encode()).hexdigest(),
'quantity_inkg': hashlib.sha256(str(quantity).encode()).hexdigest(),
'rate_perkg': hashlib.sha256(str(rate).encode()).hexdigest(),
'hash_of_transaction': hash_of_transaction,
'signature': signature
})
previous_block = self.get_previous_block()
return previous_block['index'] + 1
#It takes the url using urlparse of the address and then adds this to the set nodes in the self.
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
#It access all the nodes in the set nodes and then iterates a loop to get their chain length using get_chain (to be described)
# and replaces the current chain with the longest chain of all the nodes.
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our Blockchain
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5001
node_address = str(uuid4()).replace('-', '')
# Creating a Blockchain
blockchain = Blockchain()
# Mining a new block
#- It access the previous block by calling the function get_previous_block(),
#then access the previous proof by previous_block[‘proof’],
#then it creates a new proof by using the function proof_of_work(‘previous_proof’),
#then it finds the hash of the previous block by using the function blockchain.hash(previous_block),
# then calls the function create_block( proof,previous_hash),then finds the hash of this block.
# It creates a response containing all the details of the new block,jsonify it and returns it.
@app.route('/mine_block', methods = ['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block['proof']
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
#blockchain.add_transaction(sender = node_address, receiver = 'Hadelin', amount = 1)
block = blockchain.create_block(proof, previous_hash)
current_block=blockchain.get_previous_block()
current_hash=blockchain.hash(current_block)
response = {'message': 'Congratulations, you just mined a block!',
'index': block['index'],
'timestamp': block['timestamp'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
'farmer': block['farmer_details'],
'current_hash': current_hash}
return jsonify(response), 200
# Getting the full Blockchain
#- It creates an empty list chain_till_now, then iterates over all the blocks in the blockchain and find it’s hash
#then check if the list farmer_details is empty or not,
#if it is empty then it appends a dictionary containing the current block’s index,timestamp,proof,previous_hash, current_hash, farmer_details.
# If the farmer_details list is not empty then it first finds the length of the list farmer_details
#then it iterates over the length of the list farmer_details and appends the hash of transaction
# contained within the dictionary of the list farmer_details. Then it creates the hash of this appended hash. This is the merged hash.
# Then it creates a dictionary containing merged hash,index,timestamp,proof,previous_hash,farmer_details and current hash.
# Then, it appends this dictionary to the list chain till now.
# It then creates the response containing the chain till now and length of the blockchain,jasonifies it and returns it.
@app.route('/print_chain',methods=['GET'])
def print_chain():
chain_till_now =[]
for xblock in blockchain.chain:
xcurrent_hash=blockchain.hash(xblock)
if len(xblock['farmer_details'])==0:
chain_till_now.append({'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
else:
l=len(xblock['farmer_details'])
sum=""
l-=1
while(l>=0):
sum=xblock['farmer_details'][l]['hash_of_transaction']+sum
l-=1
chain_till_now.append({'Merged_hash': hashlib.sha256(sum.encode()).hexdigest(),
'index': xblock['index'],
'timestamp': xblock['timestamp'],
'proof': xblock['proof'],
'previous_hash': xblock['previous_hash'],
'farmer': xblock['farmer_details'],
'current_hash': xcurrent_hash})
response = {'chain': chain_till_now,
'length': len(blockchain.chain)}
return jsonify(response), 200
#- It creats the response containing the blockchain.chain and its length,jasonifies it and returns it.
@app.route('/get_chain', methods = ['GET'])
def get_chain():
response = {' | def __init__(self):
self.chain = []
self.farmer_details = []
self.create_block(proof = 1, previous_hash = '0')
self.nodes = set()
#It creates a dictionary block which contains index(length of chain+1),timestamp( by using the module datetime),
#Proof( passes as parameter),previous_hash(passed as parameter),
#Farmer_details(from self) and append this to the chain.
def create_block(self, proof, previous_hash):
block = {'index': len(self.chain) + 1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'farmer_details': self.farmer_details}
self.farmer_details = []
self.chain.append(block)
return block
#It returns the last block of the chain.
def get_previous_block(self):
| identifier_body |
|
main_utils.py | " + "=" * 30)
logger.add_line(str(model))
logger.add_line("=" * 30 + " Parameters " + "=" * 30)
logger.add_line(parameter_description(model))
return model
def distribute_model_to_cuda(models, args, batch_size, num_workers, ngpus_per_node):
squeeze = False
if not isinstance(models, list):
models = [models]
squeeze = True
for i in range(len(models)):
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
models[i].cuda(args.gpu)
models[i] = torch.nn.parallel.DistributedDataParallel(models[i], device_ids=[args.gpu])
else:
models[i].cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
models[i] = torch.nn.parallel.DistributedDataParallel(models[i])
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
models[i] = models[i].cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
models[i] = torch.nn.DataParallel(models[i]).cuda()
if squeeze:
models = models[0]
if args.distributed and args.gpu is not None:
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
batch_size = int(batch_size / ngpus_per_node)
num_workers = int((num_workers + ngpus_per_node - 1) / ngpus_per_node)
return models, args, batch_size, num_workers
def build_criterion(cfg, logger=None):
import criterions
if cfg['name'] == 'contrastive':
criterion = criterions.ContrastiveLoss(**cfg['args'])
elif cfg['name'] == 'contrastive-hard':
criterion = criterions.HardContrastiveLoss(**cfg['args'])
else:
criterion = criterions.__dict__[cfg['name']](**cfg['args'])
if logger is not None:
logger.add_line("=" * 30 + " Criterion " + "=" * 30)
logger.add_line(str(criterion))
logger.add_line("=" * 30 + " Criterion Parameters " + "=" * 30)
logger.add_line(parameter_description(criterion))
logger.add_line("")
return criterion
def build_optimizer(params, cfg, logger=None):
if cfg['name'] == 'sgd':
optimizer = torch.optim.SGD(
params=params,
lr=cfg['lr']['base_lr'],
momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'],
nesterov=cfg['nesterov']
)
elif cfg['name'] == 'adam':
optimizer = torch.optim.Adam(
params=params,
lr=cfg['lr']['base_lr'], | else:
raise ValueError('Unknown optimizer.')
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['lr']['milestones'], gamma=cfg['lr']['gamma'])
if 'warmup' in cfg:
scheduler = GradualWarmupScheduler(optimizer,
multiplier=cfg['warmup']['multiplier'],
total_epoch=cfg['warmup']['epochs'],
after_scheduler=scheduler)
logger.add_line("=" * 30 + " Optimizer " + "=" * 30)
logger.add_line(str(optimizer))
return optimizer, scheduler
def build_dataloaders(cfg, num_workers, distributed, logger):
train_loader = build_dataloader(cfg, cfg['train'], num_workers, distributed)
logger.add_line("\n"+"="*30+" Train data "+"="*30)
logger.add_line(str(train_loader.dataset))
test_loader = build_dataloader(cfg, cfg['test'], num_workers, distributed)
logger.add_line("\n"+"="*30+" Train data "+"="*30)
logger.add_line(str(train_loader.dataset))
return train_loader, test_loader
def build_dataloader(db_cfg, split_cfg, num_workers, distributed):
import torch.utils.data as data
import torch.utils.data.distributed
if db_cfg['name'] == 'yt360':
db = build_360_dataset(db_cfg, split_cfg)
else:
db = build_video_dataset(db_cfg, split_cfg)
if distributed:
sampler = torch.utils.data.distributed.DistributedSampler(db)
else:
sampler = None
loader = torch.utils.data.DataLoader(
db,
batch_size=db_cfg['batch_size'],
shuffle=False,
drop_last=split_cfg['drop_last'],
num_workers=num_workers,
pin_memory=True,
sampler=sampler)
return loader
def build_360_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
assert db_cfg['name'] == 'yt360'
if '360to2D' in db_cfg and db_cfg['360to2D']:
joint_transform = preprocessing.Spatial2Planar(
size=(db_cfg['frame_size'], db_cfg['frame_size'])
)
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
else:
if 'horizon_only' not in db_cfg:
db_cfg['horizon_only'] = False
if 'crop_margin' not in db_cfg:
db_cfg['crop_margin'] = 0.
joint_transform = preprocessing.SpatialVideoCropTool(
size=(db_cfg['crop_size'], db_cfg['crop_size']),
hfov_lims=db_cfg['hfov_lims'],
horizon_only=db_cfg['horizon_only'],
margin=db_cfg['crop_margin'],
pos=db_cfg['crop_method'],
audio_input=db_cfg['audio_input'],
num_crops=1 if db_cfg['use_temporal_augm'] else db_cfg['augm_per_clip'],
)
video_transform = preprocessing.VideoPrep_CJ(
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=db_cfg['audio_input']=='mono',
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = datasets.YT360(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
sampling=db_cfg['sampling'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
joint_transform=joint_transform,
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.,
return_position=True,
return_labels=False,
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'] if db_cfg['use_temporal_augm'] else 1,
use_temporal_augm=db_cfg['use_temporal_augm'],
use_spatial_augm=db_cfg['use_spatial_augm'],
misalign=db_cfg['misalign'] if 'misalign' in db_cfg else False,
rotate_mode=db_cfg['rotate_mode'] if 'rotate_mode' in db_cfg else 'quat',
shuffle=True,
)
return db
def build_video_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=True,
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
if db_cfg['name'] == 'ucf-101':
dataset = datasets.UCF
elif db_cfg['name'] == 'hmdb':
dataset = datasets.HMDB
else:
raise ValueError('Unknown dataset')
clips_per_video = split_cfg['cl | weight_decay=cfg['weight_decay']
)
| random_line_split |
main_utils.py | _cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
joint_transform=joint_transform,
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.,
return_position=True,
return_labels=False,
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'] if db_cfg['use_temporal_augm'] else 1,
use_temporal_augm=db_cfg['use_temporal_augm'],
use_spatial_augm=db_cfg['use_spatial_augm'],
misalign=db_cfg['misalign'] if 'misalign' in db_cfg else False,
rotate_mode=db_cfg['rotate_mode'] if 'rotate_mode' in db_cfg else 'quat',
shuffle=True,
)
return db
def build_video_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=True,
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
if db_cfg['name'] == 'ucf-101':
dataset = datasets.UCF
elif db_cfg['name'] == 'hmdb':
dataset = datasets.HMDB
else:
raise ValueError('Unknown dataset')
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = dataset(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.5 if split_cfg['use_augmentation'] else 0,
return_labels=False if 'return_labels' not in db_cfg else db_cfg['return_labels'],
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'],
shuffle=True,
)
return db
def save_checkpoint(state, is_best, model_dir='.', filename=None):
if filename is None:
filename = '{}/checkpoint.pth.tar'.format(model_dir)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}/model_best.pth.tar'.format(model_dir))
class CheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.checkpoint_dir = checkpoint_dir
self.rank = rank
self.best_metric = 0.
def save(self, epoch, filename=None, eval_metric=0., **kwargs):
if self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
state = {'epoch': epoch}
for k in kwargs:
state[k] = kwargs[k].state_dict()
if filename is None:
save_checkpoint(state=state, is_best=is_best, model_dir=self.checkpoint_dir)
else:
save_checkpoint(state=state, is_best=False, filename='{}/{}'.format(self.checkpoint_dir, filename))
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
assert not (last and best)
if last:
return self.last_checkpoint_fn()
if best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, fn=None, restore_last=False, restore_best=False, **kwargs):
checkpoint_fn = fn if fn is not None else self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
for k in kwargs:
try:
kwargs[k].load_state_dict(ckp[k])
except RuntimeError:
torch.nn.DataParallel(kwargs[k]).load_state_dict(ckp[k])
return start_epoch
class Logger(object):
def __init__(self, quiet=False, log_fn=None, rank=0, prefix=""):
self.rank = rank if rank is not None else 0
self.quiet = quiet
self.log_fn = log_fn
self.prefix = ""
if prefix:
self.prefix = prefix + ' | '
self.file_pointers = []
if self.rank == 0:
if self.quiet:
open(log_fn, 'w').close()
def add_line(self, content):
if self.rank == 0:
msg = self.prefix+content
if self.quiet:
fp = open(self.log_fn, 'a')
fp.write(msg+'\n')
fp.flush()
fp.close()
else:
print(msg)
sys.stdout.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', window_size=0):
self.name = name
self.fmt = fmt
self.window_size = window_size
self.reset()
def reset(self):
if self.window_size > 0:
self.q = deque(maxlen=self.window_size)
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
if self.window_size > 0:
self.q.append((val, n))
self.count = sum([n for v, n in self.q])
self.sum = sum([v * n for v, n in self.q])
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, phase, epoch=None, logger=None, tb_writter=None):
self.batches_per_epoch = num_batches
self.batch_fmtstr = self._get_batch_fmtstr(epoch, num_batches)
self.meters = meters
self.phase = phase
self.epoch = epoch
self.logger = logger
self.tb_writter = tb_writter
def display(self, batch):
step = self.epoch * self.batches_per_epoch + batch
date = str(datetime.datetime.now())
entries = ['{} | {} {}'.format(date, self.phase, self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
if self.logger is None:
print('\t'.join(entries))
else:
self.logger.add_line('\t'.join(entries))
if self.tb_writter is not None:
for meter in self.meters:
self.tb_writter.add_scalar('{}/Batch-{}'.format(self.phase, meter.name), meter.val, step)
def _get_batch_fmtstr(self, epoch, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
epoch_str = '[{}]'.format(epoch) if epoch is not None else ''
return epoch_str+'[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def parameter_description(model):
desc = ''
for n, p in model.named_parameters():
| desc += "{:70} | {:10} | {:30} | {}\n".format(
n, 'Trainable' if p.requires_grad else 'Frozen',
' x '.join([str(s) for s in p.size()]), str(np.prod(p.size()))) | conditional_block |
|
main_utils.py | _cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=db_cfg['audio_input']=='mono',
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = datasets.YT360(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
sampling=db_cfg['sampling'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
joint_transform=joint_transform,
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.,
return_position=True,
return_labels=False,
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'] if db_cfg['use_temporal_augm'] else 1,
use_temporal_augm=db_cfg['use_temporal_augm'],
use_spatial_augm=db_cfg['use_spatial_augm'],
misalign=db_cfg['misalign'] if 'misalign' in db_cfg else False,
rotate_mode=db_cfg['rotate_mode'] if 'rotate_mode' in db_cfg else 'quat',
shuffle=True,
)
return db
def build_video_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=True,
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
if db_cfg['name'] == 'ucf-101':
dataset = datasets.UCF
elif db_cfg['name'] == 'hmdb':
dataset = datasets.HMDB
else:
raise ValueError('Unknown dataset')
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = dataset(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.5 if split_cfg['use_augmentation'] else 0,
return_labels=False if 'return_labels' not in db_cfg else db_cfg['return_labels'],
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'],
shuffle=True,
)
return db
def save_checkpoint(state, is_best, model_dir='.', filename=None):
if filename is None:
filename = '{}/checkpoint.pth.tar'.format(model_dir)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}/model_best.pth.tar'.format(model_dir))
class CheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.checkpoint_dir = checkpoint_dir
self.rank = rank
self.best_metric = 0.
def save(self, epoch, filename=None, eval_metric=0., **kwargs):
if self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
state = {'epoch': epoch}
for k in kwargs:
state[k] = kwargs[k].state_dict()
if filename is None:
save_checkpoint(state=state, is_best=is_best, model_dir=self.checkpoint_dir)
else:
save_checkpoint(state=state, is_best=False, filename='{}/{}'.format(self.checkpoint_dir, filename))
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
assert not (last and best)
if last:
return self.last_checkpoint_fn()
if best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, fn=None, restore_last=False, restore_best=False, **kwargs):
checkpoint_fn = fn if fn is not None else self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
for k in kwargs:
try:
kwargs[k].load_state_dict(ckp[k])
except RuntimeError:
torch.nn.DataParallel(kwargs[k]).load_state_dict(ckp[k])
return start_epoch
class Logger(object):
def __init__(self, quiet=False, log_fn=None, rank=0, prefix=""):
self.rank = rank if rank is not None else 0
self.quiet = quiet
self.log_fn = log_fn
self.prefix = ""
if prefix:
self.prefix = prefix + ' | '
self.file_pointers = []
if self.rank == 0:
if self.quiet:
open(log_fn, 'w').close()
def add_line(self, content):
if self.rank == 0:
msg = self.prefix+content
if self.quiet:
fp = open(self.log_fn, 'a')
fp.write(msg+'\n')
fp.flush()
fp.close()
else:
print(msg)
sys.stdout.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', window_size=0):
self.name = name
self.fmt = fmt
self.window_size = window_size
self.reset()
def reset(self):
if self.window_size > 0:
self.q = deque(maxlen=self.window_size)
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
if self.window_size > 0:
self.q.append((val, n))
self.count = sum([n for v, n in self.q])
self.sum = sum([v * n for v, n in self.q])
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, phase, epoch=None, logger=None, tb_writter=None):
self.batches_per_epoch = num_batches
self.batch_fmtstr = self._get_batch_fmtstr(epoch, num_batches)
self.meters = meters
self.phase = phase
self.epoch = epoch
self.logger = logger
self.tb_writter = tb_writter
def display(self, batch):
step = self.epoch * self.batches_per_epoch + batch
date = str(datetime.datetime.now())
entries = ['{} | {} {}'.format(date, self.phase, self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
if self.logger is None:
print('\t'.join(entries))
else:
self.logger.add_line('\t'.join(entries))
if self.tb_writter is not None:
for meter in self.meters:
self.tb_writter.add_scalar('{}/Batch-{}'.format(self.phase, meter.name), meter.val, step)
def _get_batch_fmtstr(self, epoch, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
epoch_str = '[{}]'.format(epoch) if epoch is not None else ''
return epoch_str+'[' + fmt + '/' + fmt.format(num_batches) + ']'
def | accuracy | identifier_name |
|
main_utils.py | as data
import torch.utils.data.distributed
if db_cfg['name'] == 'yt360':
db = build_360_dataset(db_cfg, split_cfg)
else:
db = build_video_dataset(db_cfg, split_cfg)
if distributed:
sampler = torch.utils.data.distributed.DistributedSampler(db)
else:
sampler = None
loader = torch.utils.data.DataLoader(
db,
batch_size=db_cfg['batch_size'],
shuffle=False,
drop_last=split_cfg['drop_last'],
num_workers=num_workers,
pin_memory=True,
sampler=sampler)
return loader
def build_360_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
assert db_cfg['name'] == 'yt360'
if '360to2D' in db_cfg and db_cfg['360to2D']:
joint_transform = preprocessing.Spatial2Planar(
size=(db_cfg['frame_size'], db_cfg['frame_size'])
)
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
else:
if 'horizon_only' not in db_cfg:
db_cfg['horizon_only'] = False
if 'crop_margin' not in db_cfg:
db_cfg['crop_margin'] = 0.
joint_transform = preprocessing.SpatialVideoCropTool(
size=(db_cfg['crop_size'], db_cfg['crop_size']),
hfov_lims=db_cfg['hfov_lims'],
horizon_only=db_cfg['horizon_only'],
margin=db_cfg['crop_margin'],
pos=db_cfg['crop_method'],
audio_input=db_cfg['audio_input'],
num_crops=1 if db_cfg['use_temporal_augm'] else db_cfg['augm_per_clip'],
)
video_transform = preprocessing.VideoPrep_CJ(
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=db_cfg['audio_input']=='mono',
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = datasets.YT360(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
sampling=db_cfg['sampling'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
joint_transform=joint_transform,
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.,
return_position=True,
return_labels=False,
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'] if db_cfg['use_temporal_augm'] else 1,
use_temporal_augm=db_cfg['use_temporal_augm'],
use_spatial_augm=db_cfg['use_spatial_augm'],
misalign=db_cfg['misalign'] if 'misalign' in db_cfg else False,
rotate_mode=db_cfg['rotate_mode'] if 'rotate_mode' in db_cfg else 'quat',
shuffle=True,
)
return db
def build_video_dataset(db_cfg, split_cfg):
from datasets import preprocessing
import datasets
video_transform = preprocessing.VideoPrep_MSC_CJ(
crop=(db_cfg['crop_size'], db_cfg['crop_size']),
augment=split_cfg['use_augmentation'],
num_frames=int(db_cfg['video_fps'] * db_cfg['video_clip_duration']),
pad_missing=True,
)
audio_transforms = [
preprocessing.AudioPrep(
mono=True,
duration=db_cfg['audio_clip_duration'],
augment=split_cfg['use_augmentation']),
preprocessing.LogMelSpectrogram(
db_cfg['audio_fps'],
n_mels=db_cfg['n_mels'],
n_fft=db_cfg['n_fft'],
hop_size=1. / db_cfg['spectrogram_fps'],
normalize=True)
]
if db_cfg['name'] == 'ucf-101':
dataset = datasets.UCF
elif db_cfg['name'] == 'hmdb':
dataset = datasets.HMDB
else:
raise ValueError('Unknown dataset')
clips_per_video = split_cfg['clips_per_video'] if 'clips_per_video' in split_cfg else 1
db = dataset(
subset=split_cfg['subset'],
full_res=db_cfg['full_res'],
return_video=True,
video_clip_duration=db_cfg['video_clip_duration'],
video_fps=db_cfg['video_fps'],
return_audio=True,
audio_clip_duration=db_cfg['audio_clip_duration'],
audio_fps=db_cfg['audio_fps'],
spect_fps=db_cfg['spectrogram_fps'],
video_transform=video_transform,
audio_transform=audio_transforms,
max_offsync_augm=0.5 if split_cfg['use_augmentation'] else 0,
return_labels=False if 'return_labels' not in db_cfg else db_cfg['return_labels'],
mode='clip',
clips_per_video=clips_per_video,
augm_per_clip=db_cfg['augm_per_clip'],
shuffle=True,
)
return db
def save_checkpoint(state, is_best, model_dir='.', filename=None):
if filename is None:
filename = '{}/checkpoint.pth.tar'.format(model_dir)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '{}/model_best.pth.tar'.format(model_dir))
class CheckpointManager(object):
def __init__(self, checkpoint_dir, rank=0):
self.checkpoint_dir = checkpoint_dir
self.rank = rank
self.best_metric = 0.
def save(self, epoch, filename=None, eval_metric=0., **kwargs):
if self.rank != 0:
return
is_best = False
if eval_metric > self.best_metric:
self.best_metric = eval_metric
is_best = True
state = {'epoch': epoch}
for k in kwargs:
state[k] = kwargs[k].state_dict()
if filename is None:
save_checkpoint(state=state, is_best=is_best, model_dir=self.checkpoint_dir)
else:
save_checkpoint(state=state, is_best=False, filename='{}/{}'.format(self.checkpoint_dir, filename))
def last_checkpoint_fn(self):
return '{}/checkpoint.pth.tar'.format(self.checkpoint_dir)
def best_checkpoint_fn(self):
return '{}/model_best.pth.tar'.format(self.checkpoint_dir)
def checkpoint_fn(self, last=False, best=False):
assert best or last
assert not (last and best)
if last:
return self.last_checkpoint_fn()
if best:
return self.best_checkpoint_fn()
def checkpoint_exists(self, last=False, best=False):
return os.path.isfile(self.checkpoint_fn(last, best))
def restore(self, fn=None, restore_last=False, restore_best=False, **kwargs):
checkpoint_fn = fn if fn is not None else self.checkpoint_fn(restore_last, restore_best)
ckp = torch.load(checkpoint_fn, map_location={'cuda:0': 'cpu'})
start_epoch = ckp['epoch']
for k in kwargs:
try:
kwargs[k].load_state_dict(ckp[k])
except RuntimeError:
torch.nn.DataParallel(kwargs[k]).load_state_dict(ckp[k])
return start_epoch
class Logger(object):
def __init__(self, quiet=False, log_fn=None, rank=0, prefix=""):
self.rank = rank if rank is not None else 0
self.quiet = quiet
self.log_fn = log_fn
self.prefix = ""
if prefix:
self.prefix = prefix + ' | '
self.file_pointers = []
if self.rank == 0:
if self.quiet:
open(log_fn, 'w').close()
def add_line(self, content):
if self.rank == 0:
msg = self.prefix+content
if self.quiet:
fp = open(self.log_fn, 'a')
fp.write(msg+'\n')
fp.flush()
fp.close()
else:
print(msg)
sys.stdout.flush()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', window_size=0):
self.name = name
self.fmt = fmt
self.window_size = window_size
self.reset()
def reset(self):
| if self.window_size > 0:
self.q = deque(maxlen=self.window_size)
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0 | identifier_body |
|
main.go | mode)"`
OutputStripFileExt string ` long:"output-strip-ext" description:"strip file extension from written files (also available in multi file mode)"`
Once string ` long:"once" description:"replace search term only one in a file, keep duplicaes (keep, default) or remove them (unique)" optional:"true" optional-value:"keep" choice:"keep" choice:"unique"`
Regex bool ` long:"regex" description:"treat pattern as regex"`
RegexBackref bool ` long:"regex-backrefs" description:"enable backreferences in replace term"`
RegexPosix bool ` long:"regex-posix" description:"parse regex term as POSIX regex"`
Path string ` long:"path" description:"use files in this path"`
PathPattern string ` long:"path-pattern" description:"file pattern (* for wildcard, only basename of file)"`
PathRegex string ` long:"path-regex" description:"file pattern (regex, full path)"`
IgnoreEmpty bool ` long:"ignore-empty" description:"ignore empty file list, otherwise this will result in an error"`
Verbose bool `short:"v" long:"verbose" description:"verbose mode"`
DryRun bool ` long:"dry-run" description:"dry run mode"`
ShowVersion bool `short:"V" long:"version" description:"show version and exit"`
ShowOnlyVersion bool ` long:"dumpversion" description:"show only version number and exit"`
ShowHelp bool `short:"h" long:"help" description:"show this help message"`
}
var pathFilterDirectories = []string{"autom4te.cache", "blib", "_build", ".bzr", ".cdv", "cover_db", "CVS", "_darcs", "~.dep", "~.dot", ".git", ".hg", "~.nib", ".pc", "~.plst", "RCS", "SCCS", "_sgbak", ".svn", "_obj", ".idea"}
// Apply changesets to file
func applyChangesetsToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
var (
output string = ""
status bool = true
)
// try open file
file, err := os.Open(fileitem.Path)
if err != nil {
return output, false, err
}
writeBufferToFile := false
var buffer bytes.Buffer
r := bufio.NewReader(file)
line, e := Readln(r)
for e == nil {
newLine, lineChanged, skipLine := applyChangesetsToLine(line, changesets)
if lineChanged || skipLine {
writeBufferToFile = true
}
if !skipLine {
buffer.WriteString(newLine + "\n")
}
line, e = Readln(r)
}
file.Close()
// --mode=lineinfile
if opts.ModeIsLineInFile {
lifBuffer, lifStatus := handleLineInFile(changesets, buffer)
if lifStatus {
buffer.Reset()
buffer.WriteString(lifBuffer.String())
writeBufferToFile = lifStatus
}
}
// --output
// --output-strip-ext
// enforcing writing of file (creating new file)
if opts.Output != "" || opts.OutputStripFileExt != "" {
writeBufferToFile = true
}
if writeBufferToFile {
output, status = writeContentToFile(fileitem, buffer)
} else {
output = fmt.Sprintf("%s no match", fileitem.Path)
}
return output, status, err
}
// Apply changesets to file
func applyTemplateToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
// try open file
buffer, err := os.ReadFile(fileitem.Path)
if err != nil {
return "", false, err
}
content := parseContentAsTemplate(string(buffer), changesets)
output, status := writeContentToFile(fileitem, content)
return output, status, err
}
func applyChangesetsToLine(line string, changesets []changeset) (string, bool, bool) {
changed := false
skipLine := false
for i, changeset := range changesets {
// --once, only do changeset once if already applied to file
if opts.Once != "" && changeset.MatchFound {
// --once=unique, skip matching lines
if opts.Once == "unique" && searchMatch(line, changeset) {
// matching line, not writing to buffer as requsted
skipLine = true
changed = true
break
}
} else {
// search and replace
if searchMatch(line, changeset) {
// --mode=line or --mode=lineinfile
if opts.ModeIsReplaceLine || opts.ModeIsLineInFile {
if opts.RegexBackref {
// get match
line = string(changeset.Search.Find([]byte(line)))
// replace regex backrefs in match
line = changeset.Search.ReplaceAllString(line, changeset.Replace)
} else {
// replace whole line with replace term
line = changeset.Replace
}
} else {
// replace only term inside line
line = replaceText(line, changeset)
}
changesets[i].MatchFound = true
changed = true
}
}
}
return line, changed, skipLine
}
// Build search term
// Compiles regexp if regexp is used
func buildSearchTerm(term string) *regexp.Regexp {
var ret *regexp.Regexp
var regex string
// --regex
if opts.Regex {
// use search term as regex
regex = term
} else {
// use search term as normal string, escape it for regex usage
regex = regexp.QuoteMeta(term)
}
// --ignore-case
if opts.CaseInsensitive {
regex = "(?i:" + regex + ")"
}
// --verbose
if opts.Verbose {
logMessage(fmt.Sprintf("Using regular expression: %s", regex))
}
// --regex-posix
if opts.RegexPosix {
ret = regexp.MustCompilePOSIX(regex)
} else {
ret = regexp.MustCompile(regex)
}
return ret
}
// handle special cli options
// eg. --help
//
// --version
// --path
// --mode=...
func handleSpecialCliOptions(args []string) {
// --dumpversion
if opts.ShowOnlyVersion {
fmt.Println(gitTag)
os.Exit(0)
}
// --version
if opts.ShowVersion {
fmt.Printf("go-replace version %s (%s)\n", gitTag, gitCommit)
fmt.Printf("Copyright (C) 2022 %s\n", Author)
os.Exit(0)
}
// --help
if opts.ShowHelp {
argparser.WriteHelp(os.Stdout)
os.Exit(0)
}
// --mode
switch mode := opts.Mode; mode {
case "replace":
opts.ModeIsReplaceMatch = true
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "line":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = true
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "lineinfile":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = true
opts.ModeIsTemplate = false
case "template":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = true
}
// --output
if opts.Output != "" && len(args) > 1 {
logFatalErrorAndExit(errors.New("Only one file is allowed when using --output"), 1)
}
if opts.LineinfileBefore != "" || opts.LineinfileAfter != "" {
if !opts.ModeIsLineInFile {
logFatalErrorAndExit(errors.New("--lineinfile-after and --lineinfile-before only valid in --mode=lineinfile"), 1)
}
if opts.LineinfileBefore != "" && opts.LineinfileAfter != "" {
logFatalErrorAndExit(errors.New("Only --lineinfile-after or --lineinfile-before is allowed in --mode=lineinfile"), 1)
}
}
}
func actionProcessStdinReplace(changesets []changeset) int {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
newLine, _, skipLine := applyChangesetsToLine(line, changesets)
if !skipLine {
fmt.Println(newLine)
}
}
return 0
}
func | (changesets []changeset) int {
var buffer bytes.Buffer
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
buffer.WriteString(scanner.Text() + "\n")
}
content := parseContentAsTemplate(buffer.String(), changesets)
fmt.Print(content.String())
return 0
}
func actionProcessFiles(changesets []changeset, fileitems []fileitem) int {
// check if there is at least one file to process
if len(fileitems | actionProcessStdinTemplate | identifier_name |
main.go | mode)"`
OutputStripFileExt string ` long:"output-strip-ext" description:"strip file extension from written files (also available in multi file mode)"`
Once string ` long:"once" description:"replace search term only one in a file, keep duplicaes (keep, default) or remove them (unique)" optional:"true" optional-value:"keep" choice:"keep" choice:"unique"`
Regex bool ` long:"regex" description:"treat pattern as regex"`
RegexBackref bool ` long:"regex-backrefs" description:"enable backreferences in replace term"`
RegexPosix bool ` long:"regex-posix" description:"parse regex term as POSIX regex"`
Path string ` long:"path" description:"use files in this path"`
PathPattern string ` long:"path-pattern" description:"file pattern (* for wildcard, only basename of file)"`
PathRegex string ` long:"path-regex" description:"file pattern (regex, full path)"`
IgnoreEmpty bool ` long:"ignore-empty" description:"ignore empty file list, otherwise this will result in an error"`
Verbose bool `short:"v" long:"verbose" description:"verbose mode"`
DryRun bool ` long:"dry-run" description:"dry run mode"`
ShowVersion bool `short:"V" long:"version" description:"show version and exit"`
ShowOnlyVersion bool ` long:"dumpversion" description:"show only version number and exit"`
ShowHelp bool `short:"h" long:"help" description:"show this help message"`
}
var pathFilterDirectories = []string{"autom4te.cache", "blib", "_build", ".bzr", ".cdv", "cover_db", "CVS", "_darcs", "~.dep", "~.dot", ".git", ".hg", "~.nib", ".pc", "~.plst", "RCS", "SCCS", "_sgbak", ".svn", "_obj", ".idea"}
// Apply changesets to file
func applyChangesetsToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
var (
output string = ""
status bool = true
)
// try open file
file, err := os.Open(fileitem.Path)
if err != nil {
return output, false, err
}
writeBufferToFile := false
var buffer bytes.Buffer
r := bufio.NewReader(file)
line, e := Readln(r)
for e == nil {
newLine, lineChanged, skipLine := applyChangesetsToLine(line, changesets)
if lineChanged || skipLine {
writeBufferToFile = true
}
if !skipLine {
buffer.WriteString(newLine + "\n")
}
line, e = Readln(r)
}
file.Close()
// --mode=lineinfile
if opts.ModeIsLineInFile {
lifBuffer, lifStatus := handleLineInFile(changesets, buffer)
if lifStatus {
buffer.Reset()
buffer.WriteString(lifBuffer.String())
writeBufferToFile = lifStatus
}
}
// --output
// --output-strip-ext
// enforcing writing of file (creating new file)
if opts.Output != "" || opts.OutputStripFileExt != "" {
writeBufferToFile = true
}
if writeBufferToFile {
output, status = writeContentToFile(fileitem, buffer)
} else {
output = fmt.Sprintf("%s no match", fileitem.Path)
}
return output, status, err
}
// Apply changesets to file
func applyTemplateToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
// try open file
buffer, err := os.ReadFile(fileitem.Path)
if err != nil |
content := parseContentAsTemplate(string(buffer), changesets)
output, status := writeContentToFile(fileitem, content)
return output, status, err
}
func applyChangesetsToLine(line string, changesets []changeset) (string, bool, bool) {
changed := false
skipLine := false
for i, changeset := range changesets {
// --once, only do changeset once if already applied to file
if opts.Once != "" && changeset.MatchFound {
// --once=unique, skip matching lines
if opts.Once == "unique" && searchMatch(line, changeset) {
// matching line, not writing to buffer as requsted
skipLine = true
changed = true
break
}
} else {
// search and replace
if searchMatch(line, changeset) {
// --mode=line or --mode=lineinfile
if opts.ModeIsReplaceLine || opts.ModeIsLineInFile {
if opts.RegexBackref {
// get match
line = string(changeset.Search.Find([]byte(line)))
// replace regex backrefs in match
line = changeset.Search.ReplaceAllString(line, changeset.Replace)
} else {
// replace whole line with replace term
line = changeset.Replace
}
} else {
// replace only term inside line
line = replaceText(line, changeset)
}
changesets[i].MatchFound = true
changed = true
}
}
}
return line, changed, skipLine
}
// Build search term
// Compiles regexp if regexp is used
func buildSearchTerm(term string) *regexp.Regexp {
var ret *regexp.Regexp
var regex string
// --regex
if opts.Regex {
// use search term as regex
regex = term
} else {
// use search term as normal string, escape it for regex usage
regex = regexp.QuoteMeta(term)
}
// --ignore-case
if opts.CaseInsensitive {
regex = "(?i:" + regex + ")"
}
// --verbose
if opts.Verbose {
logMessage(fmt.Sprintf("Using regular expression: %s", regex))
}
// --regex-posix
if opts.RegexPosix {
ret = regexp.MustCompilePOSIX(regex)
} else {
ret = regexp.MustCompile(regex)
}
return ret
}
// handle special cli options
// eg. --help
//
// --version
// --path
// --mode=...
func handleSpecialCliOptions(args []string) {
// --dumpversion
if opts.ShowOnlyVersion {
fmt.Println(gitTag)
os.Exit(0)
}
// --version
if opts.ShowVersion {
fmt.Printf("go-replace version %s (%s)\n", gitTag, gitCommit)
fmt.Printf("Copyright (C) 2022 %s\n", Author)
os.Exit(0)
}
// --help
if opts.ShowHelp {
argparser.WriteHelp(os.Stdout)
os.Exit(0)
}
// --mode
switch mode := opts.Mode; mode {
case "replace":
opts.ModeIsReplaceMatch = true
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "line":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = true
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "lineinfile":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = true
opts.ModeIsTemplate = false
case "template":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = true
}
// --output
if opts.Output != "" && len(args) > 1 {
logFatalErrorAndExit(errors.New("Only one file is allowed when using --output"), 1)
}
if opts.LineinfileBefore != "" || opts.LineinfileAfter != "" {
if !opts.ModeIsLineInFile {
logFatalErrorAndExit(errors.New("--lineinfile-after and --lineinfile-before only valid in --mode=lineinfile"), 1)
}
if opts.LineinfileBefore != "" && opts.LineinfileAfter != "" {
logFatalErrorAndExit(errors.New("Only --lineinfile-after or --lineinfile-before is allowed in --mode=lineinfile"), 1)
}
}
}
func actionProcessStdinReplace(changesets []changeset) int {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
newLine, _, skipLine := applyChangesetsToLine(line, changesets)
if !skipLine {
fmt.Println(newLine)
}
}
return 0
}
func actionProcessStdinTemplate(changesets []changeset) int {
var buffer bytes.Buffer
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
buffer.WriteString(scanner.Text() + "\n")
}
content := parseContentAsTemplate(buffer.String(), changesets)
fmt.Print(content.String())
return 0
}
func actionProcessFiles(changesets []changeset, fileitems []fileitem) int {
// check if there is at least one file to process
if len(file | {
return "", false, err
} | conditional_block |
main.go | file mode)"`
OutputStripFileExt string ` long:"output-strip-ext" description:"strip file extension from written files (also available in multi file mode)"`
Once string ` long:"once" description:"replace search term only one in a file, keep duplicaes (keep, default) or remove them (unique)" optional:"true" optional-value:"keep" choice:"keep" choice:"unique"`
Regex bool ` long:"regex" description:"treat pattern as regex"`
RegexBackref bool ` long:"regex-backrefs" description:"enable backreferences in replace term"`
RegexPosix bool ` long:"regex-posix" description:"parse regex term as POSIX regex"`
Path string ` long:"path" description:"use files in this path"`
PathPattern string ` long:"path-pattern" description:"file pattern (* for wildcard, only basename of file)"`
PathRegex string ` long:"path-regex" description:"file pattern (regex, full path)"`
IgnoreEmpty bool ` long:"ignore-empty" description:"ignore empty file list, otherwise this will result in an error"`
Verbose bool `short:"v" long:"verbose" description:"verbose mode"`
DryRun bool ` long:"dry-run" description:"dry run mode"`
ShowVersion bool `short:"V" long:"version" description:"show version and exit"`
ShowOnlyVersion bool ` long:"dumpversion" description:"show only version number and exit"`
ShowHelp bool `short:"h" long:"help" description:"show this help message"`
}
var pathFilterDirectories = []string{"autom4te.cache", "blib", "_build", ".bzr", ".cdv", "cover_db", "CVS", "_darcs", "~.dep", "~.dot", ".git", ".hg", "~.nib", ".pc", "~.plst", "RCS", "SCCS", "_sgbak", ".svn", "_obj", ".idea"}
// Apply changesets to file
func applyChangesetsToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
var (
output string = ""
status bool = true
)
// try open file
file, err := os.Open(fileitem.Path)
if err != nil {
return output, false, err
}
writeBufferToFile := false
var buffer bytes.Buffer
r := bufio.NewReader(file)
line, e := Readln(r)
for e == nil {
newLine, lineChanged, skipLine := applyChangesetsToLine(line, changesets)
if lineChanged || skipLine {
writeBufferToFile = true
}
if !skipLine {
buffer.WriteString(newLine + "\n")
}
line, e = Readln(r)
}
file.Close()
// --mode=lineinfile
if opts.ModeIsLineInFile {
lifBuffer, lifStatus := handleLineInFile(changesets, buffer)
if lifStatus {
buffer.Reset()
buffer.WriteString(lifBuffer.String())
writeBufferToFile = lifStatus
}
}
// --output
// --output-strip-ext
// enforcing writing of file (creating new file)
if opts.Output != "" || opts.OutputStripFileExt != "" {
writeBufferToFile = true
}
if writeBufferToFile {
output, status = writeContentToFile(fileitem, buffer)
} else {
output = fmt.Sprintf("%s no match", fileitem.Path)
}
return output, status, err
}
// Apply changesets to file
func applyTemplateToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
// try open file
buffer, err := os.ReadFile(fileitem.Path)
if err != nil {
return "", false, err
}
content := parseContentAsTemplate(string(buffer), changesets)
output, status := writeContentToFile(fileitem, content)
return output, status, err
}
func applyChangesetsToLine(line string, changesets []changeset) (string, bool, bool) {
changed := false
skipLine := false
for i, changeset := range changesets {
// --once, only do changeset once if already applied to file
if opts.Once != "" && changeset.MatchFound {
// --once=unique, skip matching lines
if opts.Once == "unique" && searchMatch(line, changeset) {
// matching line, not writing to buffer as requsted
skipLine = true
changed = true
break
}
} else {
// search and replace
if searchMatch(line, changeset) {
// --mode=line or --mode=lineinfile
if opts.ModeIsReplaceLine || opts.ModeIsLineInFile {
if opts.RegexBackref {
// get match
line = string(changeset.Search.Find([]byte(line)))
// replace regex backrefs in match
line = changeset.Search.ReplaceAllString(line, changeset.Replace)
} else {
// replace whole line with replace term
line = changeset.Replace
}
} else {
// replace only term inside line
line = replaceText(line, changeset)
}
changesets[i].MatchFound = true
changed = true
}
}
}
return line, changed, skipLine
}
// Build search term
// Compiles regexp if regexp is used
func buildSearchTerm(term string) *regexp.Regexp {
var ret *regexp.Regexp
var regex string
// --regex
if opts.Regex {
// use search term as regex
regex = term
} else {
// use search term as normal string, escape it for regex usage
regex = regexp.QuoteMeta(term)
}
// --ignore-case | regex = "(?i:" + regex + ")"
}
// --verbose
if opts.Verbose {
logMessage(fmt.Sprintf("Using regular expression: %s", regex))
}
// --regex-posix
if opts.RegexPosix {
ret = regexp.MustCompilePOSIX(regex)
} else {
ret = regexp.MustCompile(regex)
}
return ret
}
// handle special cli options
// eg. --help
//
// --version
// --path
// --mode=...
func handleSpecialCliOptions(args []string) {
// --dumpversion
if opts.ShowOnlyVersion {
fmt.Println(gitTag)
os.Exit(0)
}
// --version
if opts.ShowVersion {
fmt.Printf("go-replace version %s (%s)\n", gitTag, gitCommit)
fmt.Printf("Copyright (C) 2022 %s\n", Author)
os.Exit(0)
}
// --help
if opts.ShowHelp {
argparser.WriteHelp(os.Stdout)
os.Exit(0)
}
// --mode
switch mode := opts.Mode; mode {
case "replace":
opts.ModeIsReplaceMatch = true
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "line":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = true
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "lineinfile":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = true
opts.ModeIsTemplate = false
case "template":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = true
}
// --output
if opts.Output != "" && len(args) > 1 {
logFatalErrorAndExit(errors.New("Only one file is allowed when using --output"), 1)
}
if opts.LineinfileBefore != "" || opts.LineinfileAfter != "" {
if !opts.ModeIsLineInFile {
logFatalErrorAndExit(errors.New("--lineinfile-after and --lineinfile-before only valid in --mode=lineinfile"), 1)
}
if opts.LineinfileBefore != "" && opts.LineinfileAfter != "" {
logFatalErrorAndExit(errors.New("Only --lineinfile-after or --lineinfile-before is allowed in --mode=lineinfile"), 1)
}
}
}
func actionProcessStdinReplace(changesets []changeset) int {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
newLine, _, skipLine := applyChangesetsToLine(line, changesets)
if !skipLine {
fmt.Println(newLine)
}
}
return 0
}
func actionProcessStdinTemplate(changesets []changeset) int {
var buffer bytes.Buffer
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
buffer.WriteString(scanner.Text() + "\n")
}
content := parseContentAsTemplate(buffer.String(), changesets)
fmt.Print(content.String())
return 0
}
func actionProcessFiles(changesets []changeset, fileitems []fileitem) int {
// check if there is at least one file to process
if len(fileitems) | if opts.CaseInsensitive { | random_line_split |
main.go | mode)"`
OutputStripFileExt string ` long:"output-strip-ext" description:"strip file extension from written files (also available in multi file mode)"`
Once string ` long:"once" description:"replace search term only one in a file, keep duplicaes (keep, default) or remove them (unique)" optional:"true" optional-value:"keep" choice:"keep" choice:"unique"`
Regex bool ` long:"regex" description:"treat pattern as regex"`
RegexBackref bool ` long:"regex-backrefs" description:"enable backreferences in replace term"`
RegexPosix bool ` long:"regex-posix" description:"parse regex term as POSIX regex"`
Path string ` long:"path" description:"use files in this path"`
PathPattern string ` long:"path-pattern" description:"file pattern (* for wildcard, only basename of file)"`
PathRegex string ` long:"path-regex" description:"file pattern (regex, full path)"`
IgnoreEmpty bool ` long:"ignore-empty" description:"ignore empty file list, otherwise this will result in an error"`
Verbose bool `short:"v" long:"verbose" description:"verbose mode"`
DryRun bool ` long:"dry-run" description:"dry run mode"`
ShowVersion bool `short:"V" long:"version" description:"show version and exit"`
ShowOnlyVersion bool ` long:"dumpversion" description:"show only version number and exit"`
ShowHelp bool `short:"h" long:"help" description:"show this help message"`
}
var pathFilterDirectories = []string{"autom4te.cache", "blib", "_build", ".bzr", ".cdv", "cover_db", "CVS", "_darcs", "~.dep", "~.dot", ".git", ".hg", "~.nib", ".pc", "~.plst", "RCS", "SCCS", "_sgbak", ".svn", "_obj", ".idea"}
// Apply changesets to file
func applyChangesetsToFile(fileitem fileitem, changesets []changeset) (string, bool, error) {
var (
output string = ""
status bool = true
)
// try open file
file, err := os.Open(fileitem.Path)
if err != nil {
return output, false, err
}
writeBufferToFile := false
var buffer bytes.Buffer
r := bufio.NewReader(file)
line, e := Readln(r)
for e == nil {
newLine, lineChanged, skipLine := applyChangesetsToLine(line, changesets)
if lineChanged || skipLine {
writeBufferToFile = true
}
if !skipLine {
buffer.WriteString(newLine + "\n")
}
line, e = Readln(r)
}
file.Close()
// --mode=lineinfile
if opts.ModeIsLineInFile {
lifBuffer, lifStatus := handleLineInFile(changesets, buffer)
if lifStatus {
buffer.Reset()
buffer.WriteString(lifBuffer.String())
writeBufferToFile = lifStatus
}
}
// --output
// --output-strip-ext
// enforcing writing of file (creating new file)
if opts.Output != "" || opts.OutputStripFileExt != "" {
writeBufferToFile = true
}
if writeBufferToFile {
output, status = writeContentToFile(fileitem, buffer)
} else {
output = fmt.Sprintf("%s no match", fileitem.Path)
}
return output, status, err
}
// Apply changesets to file
func applyTemplateToFile(fileitem fileitem, changesets []changeset) (string, bool, error) |
func applyChangesetsToLine(line string, changesets []changeset) (string, bool, bool) {
changed := false
skipLine := false
for i, changeset := range changesets {
// --once, only do changeset once if already applied to file
if opts.Once != "" && changeset.MatchFound {
// --once=unique, skip matching lines
if opts.Once == "unique" && searchMatch(line, changeset) {
// matching line, not writing to buffer as requsted
skipLine = true
changed = true
break
}
} else {
// search and replace
if searchMatch(line, changeset) {
// --mode=line or --mode=lineinfile
if opts.ModeIsReplaceLine || opts.ModeIsLineInFile {
if opts.RegexBackref {
// get match
line = string(changeset.Search.Find([]byte(line)))
// replace regex backrefs in match
line = changeset.Search.ReplaceAllString(line, changeset.Replace)
} else {
// replace whole line with replace term
line = changeset.Replace
}
} else {
// replace only term inside line
line = replaceText(line, changeset)
}
changesets[i].MatchFound = true
changed = true
}
}
}
return line, changed, skipLine
}
// Build search term
// Compiles regexp if regexp is used
func buildSearchTerm(term string) *regexp.Regexp {
var ret *regexp.Regexp
var regex string
// --regex
if opts.Regex {
// use search term as regex
regex = term
} else {
// use search term as normal string, escape it for regex usage
regex = regexp.QuoteMeta(term)
}
// --ignore-case
if opts.CaseInsensitive {
regex = "(?i:" + regex + ")"
}
// --verbose
if opts.Verbose {
logMessage(fmt.Sprintf("Using regular expression: %s", regex))
}
// --regex-posix
if opts.RegexPosix {
ret = regexp.MustCompilePOSIX(regex)
} else {
ret = regexp.MustCompile(regex)
}
return ret
}
// handle special cli options
// eg. --help
//
// --version
// --path
// --mode=...
func handleSpecialCliOptions(args []string) {
// --dumpversion
if opts.ShowOnlyVersion {
fmt.Println(gitTag)
os.Exit(0)
}
// --version
if opts.ShowVersion {
fmt.Printf("go-replace version %s (%s)\n", gitTag, gitCommit)
fmt.Printf("Copyright (C) 2022 %s\n", Author)
os.Exit(0)
}
// --help
if opts.ShowHelp {
argparser.WriteHelp(os.Stdout)
os.Exit(0)
}
// --mode
switch mode := opts.Mode; mode {
case "replace":
opts.ModeIsReplaceMatch = true
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "line":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = true
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = false
case "lineinfile":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = true
opts.ModeIsTemplate = false
case "template":
opts.ModeIsReplaceMatch = false
opts.ModeIsReplaceLine = false
opts.ModeIsLineInFile = false
opts.ModeIsTemplate = true
}
// --output
if opts.Output != "" && len(args) > 1 {
logFatalErrorAndExit(errors.New("Only one file is allowed when using --output"), 1)
}
if opts.LineinfileBefore != "" || opts.LineinfileAfter != "" {
if !opts.ModeIsLineInFile {
logFatalErrorAndExit(errors.New("--lineinfile-after and --lineinfile-before only valid in --mode=lineinfile"), 1)
}
if opts.LineinfileBefore != "" && opts.LineinfileAfter != "" {
logFatalErrorAndExit(errors.New("Only --lineinfile-after or --lineinfile-before is allowed in --mode=lineinfile"), 1)
}
}
}
func actionProcessStdinReplace(changesets []changeset) int {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
line := scanner.Text()
newLine, _, skipLine := applyChangesetsToLine(line, changesets)
if !skipLine {
fmt.Println(newLine)
}
}
return 0
}
func actionProcessStdinTemplate(changesets []changeset) int {
var buffer bytes.Buffer
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
buffer.WriteString(scanner.Text() + "\n")
}
content := parseContentAsTemplate(buffer.String(), changesets)
fmt.Print(content.String())
return 0
}
func actionProcessFiles(changesets []changeset, fileitems []fileitem) int {
// check if there is at least one file to process
if len(file | {
// try open file
buffer, err := os.ReadFile(fileitem.Path)
if err != nil {
return "", false, err
}
content := parseContentAsTemplate(string(buffer), changesets)
output, status := writeContentToFile(fileitem, content)
return output, status, err
} | identifier_body |
patterns.rs | ES,
SE,
SS,
EE,
Psk,
}
pub type MessagePattern = &'static [Token];
/// Handshake protocol specification.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct HandshakeTokens {
pub(crate) name: &'static str,
pub(crate) initiator: MessagePattern,
pub(crate) responder: MessagePattern,
pub(crate) handshake: &'static [MessagePattern],
}
/// Pattern error.
#[derive(Debug, Fail)]
pub enum PatternError {
#[fail(display = "Unsupported handshake type")]
UnsupportedHandshakeType,
#[fail(display = "Unsupported modifier")]
UnsupportedModifier,
#[fail(display = "Invalid psk")]
InvalidPsk,
}
/// The basic handshake patterns.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum HandshakePattern {
// 7.4 One-way handshake patterns
N,
K,
X,
// 7.5. Interactive handshake patterns (fundamental)
NN,
NK,
NX,
KN,
KK,
KX,
XN,
XK,
XX,
IN,
IK,
IX,
// 7.6. Interactive handshake patterns (deferred)
// TODO
}
impl HandshakePattern {
/// If the protocol is one-way only.
pub fn is_oneway(&self) -> bool {
match self {
N | X | K => true,
_ => false,
}
}
/// Whether this pattern requires a long-term static key.
pub fn needs_local_static_key(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | NN | NK | NX => false,
_ => true,
},
Role::Responder => match self {
NN | XN | KN | IN => false,
_ => true,
},
}
}
/// Whether this pattern demands a remote public key pre-message.
pub fn needs_known_remote_pubkey(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | K | X | NK | XK | KK | IK => true,
_ => false,
},
Role::Responder => match self {
K | KN | KK | KX => true,
_ => false,
},
}
}
/// Returns the tokens of a handshake.
pub fn tokens(&self) -> HandshakeTokens {
match self {
N => tokens::N,
K => tokens::K,
X => tokens::X,
NN => tokens::NN,
NK => tokens::NK,
NX => tokens::NX,
XN => tokens::XN,
XK => tokens::XK,
XX => tokens::XX,
KN => tokens::KN,
KK => tokens::KK,
KX => tokens::KX,
IN => tokens::IN,
IK => tokens::IK,
IX => tokens::IX,
}
}
}
impl FromStr for HandshakePattern {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"N" => Ok(N),
"K" => Ok(K),
"X" => Ok(X),
"NN" => Ok(NN),
"NK" => Ok(NK),
"NX" => Ok(NX),
"XN" => Ok(XN),
"XK" => Ok(XK),
"XX" => Ok(XX),
"KN" => Ok(KN),
"KK" => Ok(KK),
"KX" => Ok(KX),
"IN" => Ok(IN),
"IK" => Ok(IK),
"IX" => Ok(IX),
_ => Err(PatternError::UnsupportedHandshakeType),
}
}
}
/// A modifier applied to the base pattern as defined in the Noise spec.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HandshakeModifier {
/// Insert a PSK to mix at the associated position.
Psk(u8),
/// Modify the base pattern to its "fallback" form.
Fallback,
}
impl FromStr for HandshakeModifier {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("psk") {
let n: u8 = s[3..].parse().map_err(|_| PatternError::InvalidPsk)?;
Ok(Self::Psk(n))
} else if s == "fallback" {
Ok(Self::Fallback)
} else {
Err(PatternError::UnsupportedModifier)
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct HandshakeModifierList(Vec<HandshakeModifier>);
impl FromStr for HandshakeModifierList {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.is_empty() {
Ok(Self(vec![]))
} else {
let modifier_names = s.split('+');
let mut modifiers = vec![];
for modifier_name in modifier_names {
modifiers.push(modifier_name.parse()?);
}
Ok(Self(modifiers))
}
}
}
/// The pattern/modifier combination choice (no primitives specified) for a
/// full noise protocol definition.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Handshake {
name: String,
pattern: HandshakePattern,
modifiers: HandshakeModifierList,
}
impl Handshake {
/// Returns the name of the handshake.
pub fn name(&self) -> &str {
&self.name
}
/// Parse and split a base HandshakePattern from its optional modifiers.
fn parse_pattern_and_modifier(s: &str) -> Result<(HandshakePattern, &str), PatternError> {
for i in (1..=4).rev() {
if s.len() > i - 1 && s.is_char_boundary(i) {
if let Ok(p) = s[..i].parse() {
return Ok((p, &s[i..]));
}
}
}
Err(PatternError::UnsupportedHandshakeType)
}
/// Returns the base pattern of the handshake.
pub fn pattern(&self) -> &HandshakePattern {
&self.pattern
}
/// Returns the number of psks used in the handshake.
pub fn number_of_psks(&self) -> usize {
self.modifiers
.0
.iter()
.filter(|modifier| {
if let HandshakeModifier::Psk(_) = modifier {
return true;
}
false
})
.count()
}
/// Whether the pattern has a fallback modifier.
#[allow(unused)]
pub fn is_fallback(&self) -> bool {
self.modifiers
.0
.iter()
.find(|modifier| {
if let HandshakeModifier::Fallback = modifier {
return true;
}
false
})
.is_some()
}
/// Returns the tokens of a handshake pattern.
pub fn tokens(&self) -> (&'static [Token], &'static [Token], Vec<Vec<Token>>) {
let base = self.pattern.tokens();
let mut handshake: Vec<Vec<Token>> = base.handshake.iter().map(|p| p.to_vec()).collect();
for modifier in self.modifiers.0.iter() {
if let HandshakeModifier::Psk(n) = modifier {
if *n == 0 {
handshake[0 as usize].insert(0, Token::Psk);
} else {
handshake[*n as usize - 1].push(Token::Psk);
}
}
}
(base.initiator, base.responder, handshake)
}
}
impl FromStr for Handshake {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (pattern, remainder) = Self::parse_pattern_and_modifier(s)?;
let modifiers = remainder.parse()?;
let name = s.to_string();
Ok(Self {
name,
pattern,
modifiers,
})
}
}
macro_rules! pattern {
($name:ident {
$initiator:expr,
$responder:expr,
...
$($handshake:expr,)*
}) => {
pattern!($name, stringify!($name), $initiator, $responder, $($handshake,)*);
};
($const_name:ident, $name:expr, $initiator:expr, $responder:expr, $($handshake:expr,)*) => {
pub const $const_name: HandshakeTokens = HandshakeTokens {
name: $name,
initiator: &$initiator,
responder: &$responder,
handshake: &[$(&$handshake,)*],
};
};
}
mod tokens {
use super::{HandshakeTokens, Token::*};
// 7.2 - One-way Patterns
pattern!(N {
[],
[S],
...
[E, ES],
});
pattern!(K {
[S],
[S],
...
[E, ES, SS],
});
pattern!(X {
[],
[S],
...
[E, ES, S, SS],
});
// 7.3 - Interactive patterns (fundamental)
pattern!(NN {
[],
[],
...
[E],
[E, EE], |
pattern!( | }); | random_line_split |
patterns.rs | ES,
SE,
SS,
EE,
Psk,
}
pub type MessagePattern = &'static [Token];
/// Handshake protocol specification.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct HandshakeTokens {
pub(crate) name: &'static str,
pub(crate) initiator: MessagePattern,
pub(crate) responder: MessagePattern,
pub(crate) handshake: &'static [MessagePattern],
}
/// Pattern error.
#[derive(Debug, Fail)]
pub enum PatternError {
#[fail(display = "Unsupported handshake type")]
UnsupportedHandshakeType,
#[fail(display = "Unsupported modifier")]
UnsupportedModifier,
#[fail(display = "Invalid psk")]
InvalidPsk,
}
/// The basic handshake patterns.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum HandshakePattern {
// 7.4 One-way handshake patterns
N,
K,
X,
// 7.5. Interactive handshake patterns (fundamental)
NN,
NK,
NX,
KN,
KK,
KX,
XN,
XK,
XX,
IN,
IK,
IX,
// 7.6. Interactive handshake patterns (deferred)
// TODO
}
impl HandshakePattern {
/// If the protocol is one-way only.
pub fn is_oneway(&self) -> bool {
match self {
N | X | K => true,
_ => false,
}
}
/// Whether this pattern requires a long-term static key.
pub fn needs_local_static_key(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | NN | NK | NX => false,
_ => true,
},
Role::Responder => match self {
NN | XN | KN | IN => false,
_ => true,
},
}
}
/// Whether this pattern demands a remote public key pre-message.
pub fn needs_known_remote_pubkey(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | K | X | NK | XK | KK | IK => true,
_ => false,
},
Role::Responder => match self {
K | KN | KK | KX => true,
_ => false,
},
}
}
/// Returns the tokens of a handshake.
pub fn tokens(&self) -> HandshakeTokens {
match self {
N => tokens::N,
K => tokens::K,
X => tokens::X,
NN => tokens::NN,
NK => tokens::NK,
NX => tokens::NX,
XN => tokens::XN,
XK => tokens::XK,
XX => tokens::XX,
KN => tokens::KN,
KK => tokens::KK,
KX => tokens::KX,
IN => tokens::IN,
IK => tokens::IK,
IX => tokens::IX,
}
}
}
impl FromStr for HandshakePattern {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"N" => Ok(N),
"K" => Ok(K),
"X" => Ok(X),
"NN" => Ok(NN),
"NK" => Ok(NK),
"NX" => Ok(NX),
"XN" => Ok(XN),
"XK" => Ok(XK),
"XX" => Ok(XX),
"KN" => Ok(KN),
"KK" => Ok(KK),
"KX" => Ok(KX),
"IN" => Ok(IN),
"IK" => Ok(IK),
"IX" => Ok(IX),
_ => Err(PatternError::UnsupportedHandshakeType),
}
}
}
/// A modifier applied to the base pattern as defined in the Noise spec.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HandshakeModifier {
/// Insert a PSK to mix at the associated position.
Psk(u8),
/// Modify the base pattern to its "fallback" form.
Fallback,
}
impl FromStr for HandshakeModifier {
type Err = PatternError;
fn | (s: &str) -> Result<Self, Self::Err> {
if s.starts_with("psk") {
let n: u8 = s[3..].parse().map_err(|_| PatternError::InvalidPsk)?;
Ok(Self::Psk(n))
} else if s == "fallback" {
Ok(Self::Fallback)
} else {
Err(PatternError::UnsupportedModifier)
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct HandshakeModifierList(Vec<HandshakeModifier>);
impl FromStr for HandshakeModifierList {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.is_empty() {
Ok(Self(vec![]))
} else {
let modifier_names = s.split('+');
let mut modifiers = vec![];
for modifier_name in modifier_names {
modifiers.push(modifier_name.parse()?);
}
Ok(Self(modifiers))
}
}
}
/// The pattern/modifier combination choice (no primitives specified) for a
/// full noise protocol definition.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Handshake {
name: String,
pattern: HandshakePattern,
modifiers: HandshakeModifierList,
}
impl Handshake {
/// Returns the name of the handshake.
pub fn name(&self) -> &str {
&self.name
}
/// Parse and split a base HandshakePattern from its optional modifiers.
fn parse_pattern_and_modifier(s: &str) -> Result<(HandshakePattern, &str), PatternError> {
for i in (1..=4).rev() {
if s.len() > i - 1 && s.is_char_boundary(i) {
if let Ok(p) = s[..i].parse() {
return Ok((p, &s[i..]));
}
}
}
Err(PatternError::UnsupportedHandshakeType)
}
/// Returns the base pattern of the handshake.
pub fn pattern(&self) -> &HandshakePattern {
&self.pattern
}
/// Returns the number of psks used in the handshake.
pub fn number_of_psks(&self) -> usize {
self.modifiers
.0
.iter()
.filter(|modifier| {
if let HandshakeModifier::Psk(_) = modifier {
return true;
}
false
})
.count()
}
/// Whether the pattern has a fallback modifier.
#[allow(unused)]
pub fn is_fallback(&self) -> bool {
self.modifiers
.0
.iter()
.find(|modifier| {
if let HandshakeModifier::Fallback = modifier {
return true;
}
false
})
.is_some()
}
/// Returns the tokens of a handshake pattern.
pub fn tokens(&self) -> (&'static [Token], &'static [Token], Vec<Vec<Token>>) {
let base = self.pattern.tokens();
let mut handshake: Vec<Vec<Token>> = base.handshake.iter().map(|p| p.to_vec()).collect();
for modifier in self.modifiers.0.iter() {
if let HandshakeModifier::Psk(n) = modifier {
if *n == 0 {
handshake[0 as usize].insert(0, Token::Psk);
} else {
handshake[*n as usize - 1].push(Token::Psk);
}
}
}
(base.initiator, base.responder, handshake)
}
}
impl FromStr for Handshake {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (pattern, remainder) = Self::parse_pattern_and_modifier(s)?;
let modifiers = remainder.parse()?;
let name = s.to_string();
Ok(Self {
name,
pattern,
modifiers,
})
}
}
macro_rules! pattern {
($name:ident {
$initiator:expr,
$responder:expr,
...
$($handshake:expr,)*
}) => {
pattern!($name, stringify!($name), $initiator, $responder, $($handshake,)*);
};
($const_name:ident, $name:expr, $initiator:expr, $responder:expr, $($handshake:expr,)*) => {
pub const $const_name: HandshakeTokens = HandshakeTokens {
name: $name,
initiator: &$initiator,
responder: &$responder,
handshake: &[$(&$handshake,)*],
};
};
}
mod tokens {
use super::{HandshakeTokens, Token::*};
// 7.2 - One-way Patterns
pattern!(N {
[],
[S],
...
[E, ES],
});
pattern!(K {
[S],
[S],
...
[E, ES, SS],
});
pattern!(X {
[],
[S],
...
[E, ES, S, SS],
});
// 7.3 - Interactive patterns (fundamental)
pattern!(NN {
[],
[],
...
[E],
[E, EE],
});
pattern!( | from_str | identifier_name |
patterns.rs | ES,
SE,
SS,
EE,
Psk,
}
pub type MessagePattern = &'static [Token];
/// Handshake protocol specification.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct HandshakeTokens {
pub(crate) name: &'static str,
pub(crate) initiator: MessagePattern,
pub(crate) responder: MessagePattern,
pub(crate) handshake: &'static [MessagePattern],
}
/// Pattern error.
#[derive(Debug, Fail)]
pub enum PatternError {
#[fail(display = "Unsupported handshake type")]
UnsupportedHandshakeType,
#[fail(display = "Unsupported modifier")]
UnsupportedModifier,
#[fail(display = "Invalid psk")]
InvalidPsk,
}
/// The basic handshake patterns.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum HandshakePattern {
// 7.4 One-way handshake patterns
N,
K,
X,
// 7.5. Interactive handshake patterns (fundamental)
NN,
NK,
NX,
KN,
KK,
KX,
XN,
XK,
XX,
IN,
IK,
IX,
// 7.6. Interactive handshake patterns (deferred)
// TODO
}
impl HandshakePattern {
/// If the protocol is one-way only.
pub fn is_oneway(&self) -> bool {
match self {
N | X | K => true,
_ => false,
}
}
/// Whether this pattern requires a long-term static key.
pub fn needs_local_static_key(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | NN | NK | NX => false,
_ => true,
},
Role::Responder => match self {
NN | XN | KN | IN => false,
_ => true,
},
}
}
/// Whether this pattern demands a remote public key pre-message.
pub fn needs_known_remote_pubkey(&self, role: Role) -> bool {
match role {
Role::Initiator => match self {
N | K | X | NK | XK | KK | IK => true,
_ => false,
},
Role::Responder => match self {
K | KN | KK | KX => true,
_ => false,
},
}
}
/// Returns the tokens of a handshake.
pub fn tokens(&self) -> HandshakeTokens {
match self {
N => tokens::N,
K => tokens::K,
X => tokens::X,
NN => tokens::NN,
NK => tokens::NK,
NX => tokens::NX,
XN => tokens::XN,
XK => tokens::XK,
XX => tokens::XX,
KN => tokens::KN,
KK => tokens::KK,
KX => tokens::KX,
IN => tokens::IN,
IK => tokens::IK,
IX => tokens::IX,
}
}
}
impl FromStr for HandshakePattern {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"N" => Ok(N),
"K" => Ok(K),
"X" => Ok(X),
"NN" => Ok(NN),
"NK" => Ok(NK),
"NX" => Ok(NX),
"XN" => Ok(XN),
"XK" => Ok(XK),
"XX" => Ok(XX),
"KN" => Ok(KN),
"KK" => Ok(KK),
"KX" => Ok(KX),
"IN" => Ok(IN),
"IK" => Ok(IK),
"IX" => Ok(IX),
_ => Err(PatternError::UnsupportedHandshakeType),
}
}
}
/// A modifier applied to the base pattern as defined in the Noise spec.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum HandshakeModifier {
/// Insert a PSK to mix at the associated position.
Psk(u8),
/// Modify the base pattern to its "fallback" form.
Fallback,
}
impl FromStr for HandshakeModifier {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("psk") {
let n: u8 = s[3..].parse().map_err(|_| PatternError::InvalidPsk)?;
Ok(Self::Psk(n))
} else if s == "fallback" {
Ok(Self::Fallback)
} else {
Err(PatternError::UnsupportedModifier)
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct HandshakeModifierList(Vec<HandshakeModifier>);
impl FromStr for HandshakeModifierList {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.is_empty() {
Ok(Self(vec![]))
} else {
let modifier_names = s.split('+');
let mut modifiers = vec![];
for modifier_name in modifier_names {
modifiers.push(modifier_name.parse()?);
}
Ok(Self(modifiers))
}
}
}
/// The pattern/modifier combination choice (no primitives specified) for a
/// full noise protocol definition.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Handshake {
name: String,
pattern: HandshakePattern,
modifiers: HandshakeModifierList,
}
impl Handshake {
/// Returns the name of the handshake.
pub fn name(&self) -> &str {
&self.name
}
/// Parse and split a base HandshakePattern from its optional modifiers.
fn parse_pattern_and_modifier(s: &str) -> Result<(HandshakePattern, &str), PatternError> {
for i in (1..=4).rev() {
if s.len() > i - 1 && s.is_char_boundary(i) {
if let Ok(p) = s[..i].parse() {
return Ok((p, &s[i..]));
}
}
}
Err(PatternError::UnsupportedHandshakeType)
}
/// Returns the base pattern of the handshake.
pub fn pattern(&self) -> &HandshakePattern |
/// Returns the number of psks used in the handshake.
pub fn number_of_psks(&self) -> usize {
self.modifiers
.0
.iter()
.filter(|modifier| {
if let HandshakeModifier::Psk(_) = modifier {
return true;
}
false
})
.count()
}
/// Whether the pattern has a fallback modifier.
#[allow(unused)]
pub fn is_fallback(&self) -> bool {
self.modifiers
.0
.iter()
.find(|modifier| {
if let HandshakeModifier::Fallback = modifier {
return true;
}
false
})
.is_some()
}
/// Returns the tokens of a handshake pattern.
pub fn tokens(&self) -> (&'static [Token], &'static [Token], Vec<Vec<Token>>) {
let base = self.pattern.tokens();
let mut handshake: Vec<Vec<Token>> = base.handshake.iter().map(|p| p.to_vec()).collect();
for modifier in self.modifiers.0.iter() {
if let HandshakeModifier::Psk(n) = modifier {
if *n == 0 {
handshake[0 as usize].insert(0, Token::Psk);
} else {
handshake[*n as usize - 1].push(Token::Psk);
}
}
}
(base.initiator, base.responder, handshake)
}
}
impl FromStr for Handshake {
type Err = PatternError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (pattern, remainder) = Self::parse_pattern_and_modifier(s)?;
let modifiers = remainder.parse()?;
let name = s.to_string();
Ok(Self {
name,
pattern,
modifiers,
})
}
}
macro_rules! pattern {
($name:ident {
$initiator:expr,
$responder:expr,
...
$($handshake:expr,)*
}) => {
pattern!($name, stringify!($name), $initiator, $responder, $($handshake,)*);
};
($const_name:ident, $name:expr, $initiator:expr, $responder:expr, $($handshake:expr,)*) => {
pub const $const_name: HandshakeTokens = HandshakeTokens {
name: $name,
initiator: &$initiator,
responder: &$responder,
handshake: &[$(&$handshake,)*],
};
};
}
mod tokens {
use super::{HandshakeTokens, Token::*};
// 7.2 - One-way Patterns
pattern!(N {
[],
[S],
...
[E, ES],
});
pattern!(K {
[S],
[S],
...
[E, ES, SS],
});
pattern!(X {
[],
[S],
...
[E, ES, S, SS],
});
// 7.3 - Interactive patterns (fundamental)
pattern!(NN {
[],
[],
...
[E],
[E, EE],
});
pattern!( | {
&self.pattern
} | identifier_body |
lib.rs | .cmp(b.0));
// Initialize empty huffman tree.
let mut tree = HuffmanTree {
list: Vec::new(),
};
//
let mut old_weight = 0;
let mut counter = 0;
for (weight, value) in self.list {
number_of_bits(max_bits, weight);
}
// Return the created tree
tree
}*/
}
struct HuffmanTree {
// List of bits, bit sequence, value.
list: Vec<(u8, u8, u8)>,
}
// Get the number of bits for a weight.
fn number_of_bits(max_bits: u8, weight: u8) -> u8 {
if weight > 0 {
max_bits + 1 - weight
} else {
0
}
}
fn huffman_stream(stream: &[u8]) {
let mut stream = stream.iter();
while let Some(byte) = stream.next_back() {
todo!()
}
}
// ZStd magic number.
const MAGIC_NUMBER: u32 = 0xFD2FB528;
#[derive(PartialEq)]
enum BlockType {
RawBlock,
RleBlock,
ZstdBlock,
}
#[derive(PartialEq)]
enum LiteralType {
Raw,
Rle,
HuffmanTree,
HuffmanTreeless,
}
/// Decoder Error.
#[derive(Debug)]
enum DecError {
MagicNumber,
FrameHeaderDesc,
WindowSize,
NoBlocks,
InvalidBlockType,
}
impl Display for DecError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
use DecError::*;
let message = match self {
MagicNumber => "Magic number does not match",
FrameHeaderDesc => "Invalid values in the frame header descriptor.",
WindowSize => "Window size is too large or too small.",
NoBlocks => "There were no blocks in the frame.",
InvalidBlockType => "Block type is invalid (reserved value used).",
};
write!(f, "{}", message)
}
}
impl Error for DecError {
}
impl From<DecError> for IoErr {
fn from(dec_error: DecError) -> IoErr {
IoErr::new(Kind::InvalidInput, dec_error)
}
}
#[derive(Default)]
struct Frame {
data: Vec<u8>,
}
impl Frame {
fn encode<W: Write>(&mut self, writer: &mut W) -> Result<(), IoErr> {
///////////////////// Magic_Number ////////////////////
let data = &self.data[..];
writer.write_all(&MAGIC_NUMBER.to_le_bytes())?;
///////////////////// Frame_Header ////////////////////
// Encode frame header descriptor.
let mut frame_head_desc = 0b0110_0000;
// 16 bit Frame Content Size
// Single segment
// No Checksum
// No Dictionary
writer.write_all(&[frame_head_desc])?;
///////////////////// Data_Block(s) ////////////////////
todo!();
///////////////////// Content_Checksum ////////////////////
todo!();
self.data.clear();
Ok(())
}
fn decode<R: Read>(&mut self, reader: &mut R) -> Result<(), IoErr> {
let mut dec = LeDecoder::new(reader);
///////////////////// Magic_Number ////////////////////
if dec.u32()? != MAGIC_NUMBER {
Err(DecError::MagicNumber)?
}
///////////////////// Frame_Header ////////////////////
// Decode the frame header descriptor.
let frame_head_desc = dec.u8()?;
let frame_content_size_flag = frame_head_desc & 0b1100_0000;
let single_segment_flag = frame_head_desc & 0b0010_0000;
let unused_reserved_bits = frame_head_desc & 0b0001_1000;
let content_checksum_flag = frame_head_desc & 0b0000_0100;
let dictionary_id_flag = frame_head_desc & 0b0000_0011;
// Interpret frame header descriptor.
let fcs_field_size = match frame_content_size_flag {
0b0000_0000 => single_segment_flag >> 5,
0b0100_0000 => 2,
0b1000_0000 => 4,
0b1100_0000 => 8,
_ => unreachable!(),
};
if unused_reserved_bits != 0 {
Err(DecError::FrameHeaderDesc)?
}
let content_checksum = content_checksum_flag != 0;
// Check for window descriptor if it exists.
let window_size: Option<u64> = if single_segment_flag == 0 {
let window_descriptor: u64 = dec.u8()?.into();
let exponent = (window_descriptor & 0b1111_1000) >> 3;
let mantissa = window_descriptor & 0b0000_0111;
let window_log = 10 + exponent;
let window_base = 1 << window_log;
let window_add = (window_base / 8) * mantissa;
Some(window_base + window_add)
} else {
None
};
// Check dictionary ID field.
let dictionary_id: Option<u32> = match dictionary_id_flag {
0 => None,
1 => {
let did = dec.u8()?.into();
Some(did)
},
2 => {
let did = dec.u16()?.into(); | 3 => {
let did = dec.u32()?;
Some(did)
},
_ => unreachable!(),
};
// Check frame content size.
let window_size: u64 = if let Some(window_size) = window_size {
window_size
} else {
let window_size: u64 = match fcs_field_size {
1 => dec.u8()?.into(),
2 => dec.u16()?.into(),
4 => dec.u32()?.into(),
8 => dec.u64()?,
_ => unreachable!(),
};
window_size
};
// Support From 1KB to 8MB
if window_size > 8_000_000 || window_size < 1_000 {
Err(DecError::WindowSize)?
}
// Resize buffer (to be overwritten)
self.data.resize(window_size.try_into().unwrap(), 0);
///////////////////// Data_Block(s) ////////////////////
// FIXME:
let block_header = dec.u24()?;
let mut last_block = (block_header & 1) != 0;
let mut block_type = match block_header & 0b0110 {
0b000 => BlockType::RawBlock,
0b010 => BlockType::RleBlock,
0b100 => BlockType::ZstdBlock,
_ => Err(DecError::InvalidBlockType)?,
};
if last_block {
Err(DecError::NoBlocks)?
}
let mut block_size = ((block_header >> 3) as usize).min(128_000);
let mut buf = &mut self.data[..];
loop {
// Decode this block.
match block_type {
BlockType::RawBlock => {
// No decompression necessary
dec.bytes(&mut buf[..block_size])?;
buf = &mut buf[block_size..];
}
BlockType::RleBlock => {
// Run length decompression of a single byte
let single_byte = dec.u8()?;
for i in &mut buf[..block_size] {
*i = single_byte;
}
buf = &mut buf[block_size..];
}
BlockType::ZstdBlock => {
// ZStandard decompression
//////////// Literals section //////////
// Literals Section header
let first_nibble = dec.u(4, 0)?;
let literal_type = match first_nibble & 0b0011 {
0b00 => LiteralType::Raw,
0b01 => LiteralType::Rle,
0b10 => LiteralType::HuffmanTree,
0b11 => LiteralType::HuffmanTreeless,
_ => unreachable!(),
};
use LiteralType::*;
let (regenerated_size, compressed_size, four_huffman_streams) = match literal_type {
Rle | Raw => {
// Size format uses 1 or 2 bits.
let rs = match first_nibble & 0b1100 {
// 1 Bit (Regenerated Size: u5)
0b0000 | 0b1000 => dec.u(5, 5)?,
// 2 Bit (Regenerated Size: u12)
0b0100 => dec.u(12, 4)?,
// 2 Bit (Regenerated Size: u20)
0b1100 => dec.u(20, 4)?,
_ => unreachable!(),
};
(rs, None, false)
}
HuffmanTree | HuffmanTreeless => {
// Size format always uses 2 bits.
let ( | Some(did)
}, | random_line_split |
lib.rs | () -> Self {
Self {
literal: 0,
list: Vec::new(),
}
}
/// Add a weight for the next literal.
pub fn weight(&mut self, weight: u8) {
if weight != 0 {
self.list.push((weight, self.literal));
}
self.literal += 1;
}
// FIXME https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#representation
/*/// Finish building the Huffman tree.
pub fn finish(self) -> HuffmanTree {
// Stable sort by weight, secondary sort by natural seq. order stays.
self.list.sort_by(|a, b| a.0.cmp(b.0));
// Initialize empty huffman tree.
let mut tree = HuffmanTree {
list: Vec::new(),
};
//
let mut old_weight = 0;
let mut counter = 0;
for (weight, value) in self.list {
number_of_bits(max_bits, weight);
}
// Return the created tree
tree
}*/
}
struct HuffmanTree {
// List of bits, bit sequence, value.
list: Vec<(u8, u8, u8)>,
}
// Get the number of bits for a weight.
fn number_of_bits(max_bits: u8, weight: u8) -> u8 {
if weight > 0 {
max_bits + 1 - weight
} else {
0
}
}
fn huffman_stream(stream: &[u8]) {
let mut stream = stream.iter();
while let Some(byte) = stream.next_back() {
todo!()
}
}
// ZStd magic number.
const MAGIC_NUMBER: u32 = 0xFD2FB528;
#[derive(PartialEq)]
enum BlockType {
RawBlock,
RleBlock,
ZstdBlock,
}
#[derive(PartialEq)]
enum LiteralType {
Raw,
Rle,
HuffmanTree,
HuffmanTreeless,
}
/// Decoder Error.
#[derive(Debug)]
enum DecError {
MagicNumber,
FrameHeaderDesc,
WindowSize,
NoBlocks,
InvalidBlockType,
}
impl Display for DecError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
use DecError::*;
let message = match self {
MagicNumber => "Magic number does not match",
FrameHeaderDesc => "Invalid values in the frame header descriptor.",
WindowSize => "Window size is too large or too small.",
NoBlocks => "There were no blocks in the frame.",
InvalidBlockType => "Block type is invalid (reserved value used).",
};
write!(f, "{}", message)
}
}
impl Error for DecError {
}
impl From<DecError> for IoErr {
fn from(dec_error: DecError) -> IoErr {
IoErr::new(Kind::InvalidInput, dec_error)
}
}
#[derive(Default)]
struct Frame {
data: Vec<u8>,
}
impl Frame {
fn encode<W: Write>(&mut self, writer: &mut W) -> Result<(), IoErr> {
///////////////////// Magic_Number ////////////////////
let data = &self.data[..];
writer.write_all(&MAGIC_NUMBER.to_le_bytes())?;
///////////////////// Frame_Header ////////////////////
// Encode frame header descriptor.
let mut frame_head_desc = 0b0110_0000;
// 16 bit Frame Content Size
// Single segment
// No Checksum
// No Dictionary
writer.write_all(&[frame_head_desc])?;
///////////////////// Data_Block(s) ////////////////////
todo!();
///////////////////// Content_Checksum ////////////////////
todo!();
self.data.clear();
Ok(())
}
fn decode<R: Read>(&mut self, reader: &mut R) -> Result<(), IoErr> {
let mut dec = LeDecoder::new(reader);
///////////////////// Magic_Number ////////////////////
if dec.u32()? != MAGIC_NUMBER {
Err(DecError::MagicNumber)?
}
///////////////////// Frame_Header ////////////////////
// Decode the frame header descriptor.
let frame_head_desc = dec.u8()?;
let frame_content_size_flag = frame_head_desc & 0b1100_0000;
let single_segment_flag = frame_head_desc & 0b0010_0000;
let unused_reserved_bits = frame_head_desc & 0b0001_1000;
let content_checksum_flag = frame_head_desc & 0b0000_0100;
let dictionary_id_flag = frame_head_desc & 0b0000_0011;
// Interpret frame header descriptor.
let fcs_field_size = match frame_content_size_flag {
0b0000_0000 => single_segment_flag >> 5,
0b0100_0000 => 2,
0b1000_0000 => 4,
0b1100_0000 => 8,
_ => unreachable!(),
};
if unused_reserved_bits != 0 {
Err(DecError::FrameHeaderDesc)?
}
let content_checksum = content_checksum_flag != 0;
// Check for window descriptor if it exists.
let window_size: Option<u64> = if single_segment_flag == 0 {
let window_descriptor: u64 = dec.u8()?.into();
let exponent = (window_descriptor & 0b1111_1000) >> 3;
let mantissa = window_descriptor & 0b0000_0111;
let window_log = 10 + exponent;
let window_base = 1 << window_log;
let window_add = (window_base / 8) * mantissa;
Some(window_base + window_add)
} else {
None
};
// Check dictionary ID field.
let dictionary_id: Option<u32> = match dictionary_id_flag {
0 => None,
1 => {
let did = dec.u8()?.into();
Some(did)
},
2 => {
let did = dec.u16()?.into();
Some(did)
},
3 => {
let did = dec.u32()?;
Some(did)
},
_ => unreachable!(),
};
// Check frame content size.
let window_size: u64 = if let Some(window_size) = window_size {
window_size
} else {
let window_size: u64 = match fcs_field_size {
1 => dec.u8()?.into(),
2 => dec.u16()?.into(),
4 => dec.u32()?.into(),
8 => dec.u64()?,
_ => unreachable!(),
};
window_size
};
// Support From 1KB to 8MB
if window_size > 8_000_000 || window_size < 1_000 {
Err(DecError::WindowSize)?
}
// Resize buffer (to be overwritten)
self.data.resize(window_size.try_into().unwrap(), 0);
///////////////////// Data_Block(s) ////////////////////
// FIXME:
let block_header = dec.u24()?;
let mut last_block = (block_header & 1) != 0;
let mut block_type = match block_header & 0b0110 {
0b000 => BlockType::RawBlock,
0b010 => BlockType::RleBlock,
0b100 => BlockType::ZstdBlock,
_ => Err(DecError::InvalidBlockType)?,
};
if last_block {
Err(DecError::NoBlocks)?
}
let mut block_size = ((block_header >> 3) as usize).min(128_000);
let mut buf = &mut self.data[..];
loop {
// Decode this block.
match block_type {
BlockType::RawBlock => {
// No decompression necessary
dec.bytes(&mut buf[..block_size])?;
buf = &mut buf[block_size..];
}
BlockType::RleBlock => {
// Run length decompression of a single byte
let single_byte = dec.u8()?;
for i in &mut buf[..block_size] {
*i = single_byte;
}
buf = &mut buf[block_size..];
}
BlockType::ZstdBlock => {
// ZStandard decompression
//////////// Literals section //////////
// Literals Section header
let first_nibble = dec.u(4, 0)?;
let literal_type = match first_nibble & 0b0011 {
0b00 => LiteralType::Raw,
0b01 => LiteralType::Rle,
0b10 => LiteralType::HuffmanTree,
0b11 => LiteralType::HuffmanTreeless,
_ => unreachable!(),
};
use LiteralType::*;
let (regenerated_size, compressed_size, four_huffman_streams) = match literal_type {
Rle | Raw => {
// Size format uses 1 or 2 bits.
let rs = match first_nibble & 0b110 | new | identifier_name |
|
gulpfile.js | )
.on(
'error',
notify.onError({
message: '<%= error.message %>',
title: 'PUG Error!',
})
)
.pipe(gulp.dest(outputDir))
.pipe(browserSync.stream({ once: true }));
});
gulp.task('sass', function () {
return gulp
.src([assetsDir + 'sass/**/*.scss', '!' + assetsDir + 'sass/**/_*.scss'])
.pipe(plumber())
.pipe(sourcemaps.init())
.pipe(
sass().on(
'error',
notify.onError({
message: '<%= error.message %>',
title: 'Sass Error!',
})
)
)
.pipe(inlineimage())
.pipe(prefix('last 3 versions'))
.pipe(
postcss([
assets({
basePath: outputDir,
loadPaths: ['i/'],
}),
])
)
.pipe(sourcemaps.write())
.pipe(gulp.dest(outputDir + 'styles/'))
.pipe(browserSync.stream({ match: '**/*.css' }));
});
gulp.task('jsConcatLibs', function () {
return gulp
.src(assetsDir + 'js/libs/**/*.js')
.pipe(concat('libs.js', { newLine: ';' }))
.pipe(gulp.dest(outputDir + 'js/'))
.pipe(browserSync.stream({ once: true }));
});
gulp.task('jsConcatComponents', function () {
return gulp
.src(assetsDir + 'js/components/**/*.js')
.pipe(concat('components.js', { newLine: ';' }))
.pipe(gulp.dest(outputDir + 'js/'))
.pipe(browserSync.stream({ once: true }));
});
gulp.task('fontsConvert', function () {
return gulp
.src([assetsDir + 'fonts/*.woff', assetsDir + 'fonts/*.woff2'])
.pipe(cssfont64())
.pipe(gulp.dest(outputDir + 'styles/'))
.pipe(browserSync.stream({ once: true }));
});
//----------------------------------------------------Compiling###
//-------------------------------------------------Synchronization
gulp.task('imageSync', function () {
return gulp
.src(assetsDir + 'i/**/*')
.pipe(plumber())
.pipe(gulp.dest(outputDir + 'i/'))
.pipe(browserSync.stream({ once: true }));
});
gulp.task('fontsSync', function () {
return gulp
.src(assetsDir + 'fonts/**/*')
.pipe(plumber())
.pipe(gulp.dest(outputDir + 'fonts/'))
.pipe(browserSync.stream({ once: true }));
});
gulp.task('jsSync', function () {
return gulp
.src(assetsDir + 'js/*.js')
.pipe(plumber())
.pipe(gulp.dest(outputDir + 'js/'))
.pipe(browserSync.stream({ once: true }));
});
//-------------------------------------------------Synchronization###
//watching files and run tasks
gulp.task('watch', function () {
gulp.watch(assetsDir + 'pug/**/*.pug', gulp.series('pug'));
gulp.watch(assetsDir + 'sass/**/*.scss', gulp.series('sass'));
gulp.watch(assetsDir + 'js/**/*.js', gulp.series('jsSync'));
gulp.watch(assetsDir + 'js/libs/**/*.js', gulp.series('jsConcatLibs'));
gulp.watch(
assetsDir + 'js/components/**/*.js',
gulp.series('jsConcatComponents')
);
gulp.watch(assetsDir + 'i/**/*', gulp.series('imageSync'));
gulp.watch(assetsDir + 'i/**/*', gulp.series('imgWebp'));
gulp.watch(
assetsDir + 'fonts/**/*',
gulp.series('fontsSync', 'fontsConvert')
);
});
//livereload and open project in browser
var plugins = {
browserSync: {
options: {
port: 1337,
server: {
baseDir: outputDir,
},
},
},
};
gulp.task('browser-sync', function () {
return browserSync.init(plugins.browserSync.options);
});
gulp.task('bs-reload', function (cb) {
browserSync.reload();
});
//---------------------------------building final project folder
//clean build folder
gulp.task('cleanBuildDir', function (cb) {
return rimraf(buildDir, cb);
});
//minify images
gulp.task('imgBuild', function () {
return gulp
.src([outputDir + 'i/**/*', '!' + outputDir + 'i/sprite/**/*'])
.pipe(
image({
pngquant: true,
optipng: false,
zopflipng: true,
jpegRecompress: false,
mozjpeg: true,
gifsicle: true,
svgo: false,
concurrent: 10,
quiet: false, // defaults to false
})
)
.pipe(gulp.dest(buildDir + 'i/'));
});
//copy sprite.svg
gulp.task('copySprite', function () {
return gulp
.src(outputDir + 'i/sprite/sprite.svg')
.pipe(plumber())
.pipe(gulp.dest(buildDir + 'i/sprite/'));
});
//copy fonts
gulp.task('fontsBuild', function () {
return gulp
.src(outputDir + 'fonts/**/*')
.pipe(gulp.dest(buildDir + 'fonts/'));
});
//copy html
gulp.task('htmlBuild', function () {
return gulp.src(outputDir + '**/*.html').pipe(gulp.dest(buildDir));
});
//copy and minify js
gulp.task('jsBuild', function () {
return gulp
.src(outputDir + 'js/**/*')
.pipe(terser())
.pipe(gulp.dest(buildDir + 'js/'));
});
//copy, minify css
gulp.task('cssBuild', function () {
return gulp
.src(outputDir + 'styles/**/*')
.pipe(csso())
.pipe(gulp.dest(buildDir + 'styles/'));
});
//// --------------------------------------------If you need iconfont
// var iconfont = require('gulp-iconfont'),
// iconfontCss = require('gulp-iconfont-css'),
// fontName = 'iconfont';
// gulp.task('iconfont', function () {
// gulp.src([assetsDir + 'i/icons/*.svg'])
// .pipe(iconfontCss({
// path: 'assets/sass/templates/_icons_template.scss',
// fontName: fontName,
// targetPath: '../../sass/_icons.scss',
// fontPath: '../fonts/icons/',
// svg: true
// }))
// .pipe(iconfont({
// fontName: fontName,
// svg: true,
// formats: ['svg','eot','woff','ttf']
// }))
// .pipe(gulp.dest('assets/fonts/icons'));
// });
// --------------------------------------------If you need svg sprite
var svgSprite = require('gulp-svg-sprite'),
svgmin = require('gulp-svgmin'),
cheerio = require('gulp-cheerio'),
replace = require('gulp-replace');
gulp.task('svgSpriteBuild', function () {
return (
gulp
.src(assetsDir + 'i/icons/*.svg')
// minify svg
.pipe(
svgmin({
js2svg: {
pretty: true,
},
})
)
// remove all fill and style declarations in out shapes
.pipe(
cheerio({
run: function ($) {
$('[fill]').removeAttr('fill');
$('[stroke]').removeAttr('stroke');
$('[style]').removeAttr('style');
},
parserOptions: { xmlMode: true },
})
)
// cheerio plugin create unnecessary string '>', so replace it.
.pipe(replace('>', '>'))
// build svg sprite
.pipe(
svgSprite({
mode: {
symbol: {
sprite: '../sprite.svg',
render: {
scss: {
dest: '../../../sass/_sprite.scss',
template: assetsDir + 'sass/templates/_sprite_template.scss',
},
},
example: true,
},
},
})
)
.pipe(gulp.dest(assetsDir + 'i/sprite/')) | gulp.task('cssLint', function () {
return gulp
.src([
assetsDir + 'sass/**/*.scss',
'!' + assetsDir + 'sass/templates/*.scss',
])
.pipe(
postcss([stylelint(), reporter({ clearMessages: true })], {
syntax: postcss_scss,
})
);
});
gulp.task('set-dev-node-env', function(done) {
productionStatus = 'development';
done();
});
gulp.task('set-prod-node-env', function(done) {
productionStatus = 'production';
done();
});
let taskArray = {
development: gulp.series(
'set-dev-node-env',
gulp.parallel(
'pug',
'sass',
'imgWebp',
'imageSync',
'fontsSync',
'fontsConvert',
'jsConcatLibs',
'jsConcatComponents',
'jsSync',
'watch',
'browser-sync'
)
),
production: gulp.series(
'cleanBuildDir',
'set-prod-node-env',
'pug',
gulp.parallel(
'imgBuild',
| );
});
| random_line_split |
vr.rs | -> VrMoment {
{
let mut disp = self.disp.borrow_mut();
disp.sync_poses();
}
let mut new_controllers = Vec::new();
for event in self.vrsm.poll_events() {
match event {
VREvent::Display(VRDisplayEvent::Pause(_)) => self.paused = true,
VREvent::Display(VRDisplayEvent::Resume(_)) => self.paused = false,
VREvent::Display(VRDisplayEvent::Exit(_)) => self.exit = true,
VREvent::Gamepad(VRGamepadEvent::Connect(_, state)) =>
new_controllers.push(ControllerRef::Indexed(state.gamepad_id)),
_ => (),
}
}
let mut moment = VrMoment {
cont: FnvHashMap::default(),
hmd: None,
primary: None,
secondary: None,
tertiary: None,
layer: self.layer.clone(),
stage: na::one(),
inverse_stage: na::one(),
exit: self.exit,
paused: self.paused,
new_controllers: new_controllers,
timestamp: 0.,
};
{
let disp = self.disp.borrow();
let data = disp.data();
let state = disp.synced_frame_data(self.near, self.far);
let (w, h) = size_from_data(&data);
moment.timestamp = state.timestamp / 1000.;
moment.inverse_stage = data.stage_parameters
.map(|stage| Matrix4::upgrade(stage.sitting_to_standing_transform))
.and_then(|stage| na::try_convert(stage))
.unwrap_or(Similarity3::identity());
moment.stage = moment.inverse_stage.inverse();
let left_view = Transform3::upgrade(state.left_view_matrix);
let right_view = Transform3::upgrade(state.right_view_matrix);
let left_projection = Transform3::upgrade(state.left_projection_matrix);
let right_projection = Transform3::upgrade(state.right_projection_matrix);
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), data.connected) {
moment.hmd = Some(HmdMoment {
name: data.display_name.clone(),
size: (w, h),
pose: pose,
left: EyeParams {
eye: moment.inverse_stage * left_view.try_inverse().unwrap() * Point3::origin(),
view: left_view * moment.stage,
proj: left_projection,
clip_offset: -0.5,
clip: Rect {
x: 0,
y: 0,
w: data.left_eye_parameters.render_width as u16,
h: h as u16,
},
},
right: EyeParams {
eye: moment.inverse_stage * right_view.try_inverse().unwrap() * Point3::origin(),
view: right_view * moment.stage,
proj: right_projection,
clip_offset: 0.5,
clip: Rect {
x: data.left_eye_parameters.render_width as u16,
y: 0,
w: data.right_eye_parameters.render_width as u16,
h: h as u16,
},
},
});
}
}
let gamepads = self.vrsm.get_gamepads();
{
let mut gpiter = gamepads.iter().filter_map(|gp| {
let gp = gp.borrow();
if gp.state().connected { Some(gp.id()) } else { None }
});
moment.primary = gpiter.next();
moment.secondary = gpiter.next();
moment.tertiary = gpiter.next();
}
for gp in gamepads {
let gp = gp.borrow();
let data = gp.data();
let state = gp.state();
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), state.connected) {
moment.cont.insert(state.gamepad_id, ControllerMoment {
id: state.gamepad_id,
name: data.name.clone(),
pose: pose,
axes: state.axes.clone(),
buttons: state.buttons.clone(),
});
}
}
moment
}
}
/// Instantaneous information about the VR system retrieved from `VrContext::sync()`.
/// This can be used directly or to update some persistent state.
pub struct VrMoment {
cont: FnvHashMap<u32, ControllerMoment>,
hmd: Option<HmdMoment>,
primary: Option<u32>,
secondary: Option<u32>,
tertiary: Option<u32>,
layer: VRLayer,
/// The stage transform (moves the origin to the center of the room)
pub stage: Similarity3<f32>,
/// The inverse stage transform (moves the center of the room to the origin)
pub inverse_stage: Similarity3<f32>,
/// Has the VR system requested the application to exit
pub exit: bool,
/// Has the VR system requested the application to pause movement (should still sync and submit frames)
pub paused: bool,
/// References to controllers that have connected since the last sync
pub new_controllers: Vec<ControllerRef>,
/// Relative time of this moment (seconds)
pub timestamp: f64,
}
impl VrMoment {
/// Get a controller by reference if such a controller is connected.
pub fn controller(&self, role: ControllerRef) -> Option<&ControllerMoment> {
if let Some(ref i) = role.index(self) { self.cont.get(i) } else { None }
}
/// Iterate over all connected controllers.
pub fn controllers<'a>(&'a self) -> ControllerIter<'a> {
self.cont.values()
}
/// Get instantaneous information about the HMD if it is connected.
pub fn hmd(&self) -> Option<&HmdMoment> {
self.hmd.as_ref()
}
/// Submit the rendered scene. This ends the applicability
/// of this information, since it only applies to the
/// state of the VR system at the last sync.
pub fn submit(self, ctx: &mut VrContext) {
let mut d = ctx.disp.borrow_mut();
d.render_layer(&self.layer);
d.submit_frame();
}
}
/// Iterator over momentary controller information.
pub type ControllerIter<'a> = ::std::collections::hash_map::Values<'a, u32, ControllerMoment>;
/// Used to persistently identity a controller, either by internal
/// id or by role. Note that roles can refer to different physical devices
/// at different times, while the internal id will remain locked
/// to a particular device.
#[derive(Copy, Clone, Debug)]
pub enum ControllerRef {
Primary,
Secondary,
Tertiary,
Indexed(u32),
}
impl ControllerRef {
/// Get the internal id of the controller at a particular moment.
fn index(&self, moment: &VrMoment) -> Option<u32> {
use self::ControllerRef::*;
match *self {
Primary => moment.primary,
Secondary => moment.secondary,
Tertiary => moment.tertiary,
Indexed(i) => Some(i),
}
}
/// Make thus reference specific to a device (internal id)
/// rather than dynamically updating (role).
pub fn fixed(&self, moment: &VrMoment) -> ControllerRef {
match self.index(moment) {
Some(i) => ControllerRef::Indexed(i),
None => *self,
}
}
}
/// Create a reference to the primary controller.
pub fn primary() -> ControllerRef {
ControllerRef::Primary
}
/// Create a reference to the secondary controller.
pub fn secondary() -> ControllerRef {
ControllerRef::Secondary
}
/// Create a reference to the tertiary controller.
pub fn tertiary() -> ControllerRef {
ControllerRef::Tertiary
}
/// Instantaneous information about a button.
pub type ButtonMoment = VRGamepadButton;
/// A device that provides instantaneous position and orientation information.
pub trait Trackable {
/// Get the location and orientation of the device.
fn pose(&self) -> Isometry3<f32>;
/// Get the direction of the device's x axis.
fn x_dir(&self) -> Vector3<f32> { self.pose() * Vector3::x() }
/// Get the direction of the device's y axis.
fn y_dir(&self) -> Vector3<f32> { self.pose() * Vector3::y() }
/// Get the direction of the device's z axis.
fn z_dir(&self) -> Vector3<f32> { self.pose() * Vector3::z() }
/// The the location of the device's origin.
fn origin(&self) -> Point3<f32> { self.pose() * Point3::origin() }
/// Get the direction the device is pointing.
fn pointing(&self) -> Vector3<f32> { -self.z_dir() }
}
/// Instantaneous information about the HMD. This can be used directly
/// or to update some persistent state.
#[derive(Clone)]
pub struct HmdMoment {
/// The textual name of the HMD
pub name: String,
/// The resolution of the HMD
pub size: (u32, u32),
/// The location and orientation of the HMD
pub pose: Isometry3<f32>,
/// The drawing parameters for the left eye
pub left: EyeParams,
/// The drawing parameters for the right eye
pub right: EyeParams,
}
impl Trackable for HmdMoment {
fn | pose | identifier_name |
|
vr.rs | hardware API.
pub fn retrieve_size(&mut self) -> (u32, u32) {
size_from_data(&self.disp.borrow().data())
}
/// Synchronize with the hardware, returning transient details about the VR
/// system at the specific moment in time. This data can be used directly or
/// to update state variables.
pub fn sync(&mut self) -> VrMoment {
{
let mut disp = self.disp.borrow_mut();
disp.sync_poses();
}
let mut new_controllers = Vec::new();
for event in self.vrsm.poll_events() {
match event {
VREvent::Display(VRDisplayEvent::Pause(_)) => self.paused = true,
VREvent::Display(VRDisplayEvent::Resume(_)) => self.paused = false,
VREvent::Display(VRDisplayEvent::Exit(_)) => self.exit = true,
VREvent::Gamepad(VRGamepadEvent::Connect(_, state)) =>
new_controllers.push(ControllerRef::Indexed(state.gamepad_id)),
_ => (),
}
}
let mut moment = VrMoment {
cont: FnvHashMap::default(),
hmd: None,
primary: None,
secondary: None,
tertiary: None,
layer: self.layer.clone(),
stage: na::one(),
inverse_stage: na::one(),
exit: self.exit,
paused: self.paused,
new_controllers: new_controllers,
timestamp: 0.,
};
{
let disp = self.disp.borrow();
let data = disp.data();
let state = disp.synced_frame_data(self.near, self.far);
let (w, h) = size_from_data(&data);
moment.timestamp = state.timestamp / 1000.;
moment.inverse_stage = data.stage_parameters
.map(|stage| Matrix4::upgrade(stage.sitting_to_standing_transform))
.and_then(|stage| na::try_convert(stage))
.unwrap_or(Similarity3::identity());
moment.stage = moment.inverse_stage.inverse();
let left_view = Transform3::upgrade(state.left_view_matrix);
let right_view = Transform3::upgrade(state.right_view_matrix);
let left_projection = Transform3::upgrade(state.left_projection_matrix);
let right_projection = Transform3::upgrade(state.right_projection_matrix);
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), data.connected) {
moment.hmd = Some(HmdMoment {
name: data.display_name.clone(),
size: (w, h),
pose: pose,
left: EyeParams {
eye: moment.inverse_stage * left_view.try_inverse().unwrap() * Point3::origin(),
view: left_view * moment.stage,
proj: left_projection,
clip_offset: -0.5,
clip: Rect {
x: 0,
y: 0,
w: data.left_eye_parameters.render_width as u16,
h: h as u16,
},
}, | proj: right_projection,
clip_offset: 0.5,
clip: Rect {
x: data.left_eye_parameters.render_width as u16,
y: 0,
w: data.right_eye_parameters.render_width as u16,
h: h as u16,
},
},
});
}
}
let gamepads = self.vrsm.get_gamepads();
{
let mut gpiter = gamepads.iter().filter_map(|gp| {
let gp = gp.borrow();
if gp.state().connected { Some(gp.id()) } else { None }
});
moment.primary = gpiter.next();
moment.secondary = gpiter.next();
moment.tertiary = gpiter.next();
}
for gp in gamepads {
let gp = gp.borrow();
let data = gp.data();
let state = gp.state();
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), state.connected) {
moment.cont.insert(state.gamepad_id, ControllerMoment {
id: state.gamepad_id,
name: data.name.clone(),
pose: pose,
axes: state.axes.clone(),
buttons: state.buttons.clone(),
});
}
}
moment
}
}
/// Instantaneous information about the VR system retrieved from `VrContext::sync()`.
/// This can be used directly or to update some persistent state.
pub struct VrMoment {
cont: FnvHashMap<u32, ControllerMoment>,
hmd: Option<HmdMoment>,
primary: Option<u32>,
secondary: Option<u32>,
tertiary: Option<u32>,
layer: VRLayer,
/// The stage transform (moves the origin to the center of the room)
pub stage: Similarity3<f32>,
/// The inverse stage transform (moves the center of the room to the origin)
pub inverse_stage: Similarity3<f32>,
/// Has the VR system requested the application to exit
pub exit: bool,
/// Has the VR system requested the application to pause movement (should still sync and submit frames)
pub paused: bool,
/// References to controllers that have connected since the last sync
pub new_controllers: Vec<ControllerRef>,
/// Relative time of this moment (seconds)
pub timestamp: f64,
}
impl VrMoment {
/// Get a controller by reference if such a controller is connected.
pub fn controller(&self, role: ControllerRef) -> Option<&ControllerMoment> {
if let Some(ref i) = role.index(self) { self.cont.get(i) } else { None }
}
/// Iterate over all connected controllers.
pub fn controllers<'a>(&'a self) -> ControllerIter<'a> {
self.cont.values()
}
/// Get instantaneous information about the HMD if it is connected.
pub fn hmd(&self) -> Option<&HmdMoment> {
self.hmd.as_ref()
}
/// Submit the rendered scene. This ends the applicability
/// of this information, since it only applies to the
/// state of the VR system at the last sync.
pub fn submit(self, ctx: &mut VrContext) {
let mut d = ctx.disp.borrow_mut();
d.render_layer(&self.layer);
d.submit_frame();
}
}
/// Iterator over momentary controller information.
pub type ControllerIter<'a> = ::std::collections::hash_map::Values<'a, u32, ControllerMoment>;
/// Used to persistently identity a controller, either by internal
/// id or by role. Note that roles can refer to different physical devices
/// at different times, while the internal id will remain locked
/// to a particular device.
#[derive(Copy, Clone, Debug)]
pub enum ControllerRef {
Primary,
Secondary,
Tertiary,
Indexed(u32),
}
impl ControllerRef {
/// Get the internal id of the controller at a particular moment.
fn index(&self, moment: &VrMoment) -> Option<u32> {
use self::ControllerRef::*;
match *self {
Primary => moment.primary,
Secondary => moment.secondary,
Tertiary => moment.tertiary,
Indexed(i) => Some(i),
}
}
/// Make thus reference specific to a device (internal id)
/// rather than dynamically updating (role).
pub fn fixed(&self, moment: &VrMoment) -> ControllerRef {
match self.index(moment) {
Some(i) => ControllerRef::Indexed(i),
None => *self,
}
}
}
/// Create a reference to the primary controller.
pub fn primary() -> ControllerRef {
ControllerRef::Primary
}
/// Create a reference to the secondary controller.
pub fn secondary() -> ControllerRef {
ControllerRef::Secondary
}
/// Create a reference to the tertiary controller.
pub fn tertiary() -> ControllerRef {
ControllerRef::Tertiary
}
/// Instantaneous information about a button.
pub type ButtonMoment = VRGamepadButton;
/// A device that provides instantaneous position and orientation information.
pub trait Trackable {
/// Get the location and orientation of the device.
fn pose(&self) -> Isometry3<f32>;
/// Get the direction of the device's x axis.
fn x_dir(&self) -> Vector3<f32> { self.pose() * Vector3::x() }
/// Get the direction of the device's y axis.
fn y_dir(&self) -> Vector3<f32> { self.pose() * Vector3::y() }
/// Get the direction of the device's z axis.
fn z_dir(&self) -> Vector3<f32> { self.pose() * Vector3::z() }
/// The the location of the device's origin.
fn origin(&self) -> Point3<f32> { self.pose() * Point3::origin() }
/// Get the direction the device is pointing.
fn pointing(&self) -> Vector3<f32> { -self.z_dir() }
}
/// Instantaneous information about the HMD. This can be used directly
/// or to update some persistent state.
#[derive(Clone)]
pub struct HmdMoment {
/// The textual name of the HMD
pub name: String,
/// The resolution of the | right: EyeParams {
eye: moment.inverse_stage * right_view.try_inverse().unwrap() * Point3::origin(),
view: right_view * moment.stage, | random_line_split |
vr.rs | _stage: na::one(),
exit: self.exit,
paused: self.paused,
new_controllers: new_controllers,
timestamp: 0.,
};
{
let disp = self.disp.borrow();
let data = disp.data();
let state = disp.synced_frame_data(self.near, self.far);
let (w, h) = size_from_data(&data);
moment.timestamp = state.timestamp / 1000.;
moment.inverse_stage = data.stage_parameters
.map(|stage| Matrix4::upgrade(stage.sitting_to_standing_transform))
.and_then(|stage| na::try_convert(stage))
.unwrap_or(Similarity3::identity());
moment.stage = moment.inverse_stage.inverse();
let left_view = Transform3::upgrade(state.left_view_matrix);
let right_view = Transform3::upgrade(state.right_view_matrix);
let left_projection = Transform3::upgrade(state.left_projection_matrix);
let right_projection = Transform3::upgrade(state.right_projection_matrix);
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), data.connected) {
moment.hmd = Some(HmdMoment {
name: data.display_name.clone(),
size: (w, h),
pose: pose,
left: EyeParams {
eye: moment.inverse_stage * left_view.try_inverse().unwrap() * Point3::origin(),
view: left_view * moment.stage,
proj: left_projection,
clip_offset: -0.5,
clip: Rect {
x: 0,
y: 0,
w: data.left_eye_parameters.render_width as u16,
h: h as u16,
},
},
right: EyeParams {
eye: moment.inverse_stage * right_view.try_inverse().unwrap() * Point3::origin(),
view: right_view * moment.stage,
proj: right_projection,
clip_offset: 0.5,
clip: Rect {
x: data.left_eye_parameters.render_width as u16,
y: 0,
w: data.right_eye_parameters.render_width as u16,
h: h as u16,
},
},
});
}
}
let gamepads = self.vrsm.get_gamepads();
{
let mut gpiter = gamepads.iter().filter_map(|gp| {
let gp = gp.borrow();
if gp.state().connected { Some(gp.id()) } else { None }
});
moment.primary = gpiter.next();
moment.secondary = gpiter.next();
moment.tertiary = gpiter.next();
}
for gp in gamepads {
let gp = gp.borrow();
let data = gp.data();
let state = gp.state();
if let (Some(pose), true) = (pose_transform(&state.pose, &moment.inverse_stage), state.connected) {
moment.cont.insert(state.gamepad_id, ControllerMoment {
id: state.gamepad_id,
name: data.name.clone(),
pose: pose,
axes: state.axes.clone(),
buttons: state.buttons.clone(),
});
}
}
moment
}
}
/// Instantaneous information about the VR system retrieved from `VrContext::sync()`.
/// This can be used directly or to update some persistent state.
pub struct VrMoment {
cont: FnvHashMap<u32, ControllerMoment>,
hmd: Option<HmdMoment>,
primary: Option<u32>,
secondary: Option<u32>,
tertiary: Option<u32>,
layer: VRLayer,
/// The stage transform (moves the origin to the center of the room)
pub stage: Similarity3<f32>,
/// The inverse stage transform (moves the center of the room to the origin)
pub inverse_stage: Similarity3<f32>,
/// Has the VR system requested the application to exit
pub exit: bool,
/// Has the VR system requested the application to pause movement (should still sync and submit frames)
pub paused: bool,
/// References to controllers that have connected since the last sync
pub new_controllers: Vec<ControllerRef>,
/// Relative time of this moment (seconds)
pub timestamp: f64,
}
impl VrMoment {
/// Get a controller by reference if such a controller is connected.
pub fn controller(&self, role: ControllerRef) -> Option<&ControllerMoment> {
if let Some(ref i) = role.index(self) { self.cont.get(i) } else { None }
}
/// Iterate over all connected controllers.
pub fn controllers<'a>(&'a self) -> ControllerIter<'a> {
self.cont.values()
}
/// Get instantaneous information about the HMD if it is connected.
pub fn hmd(&self) -> Option<&HmdMoment> {
self.hmd.as_ref()
}
/// Submit the rendered scene. This ends the applicability
/// of this information, since it only applies to the
/// state of the VR system at the last sync.
pub fn submit(self, ctx: &mut VrContext) {
let mut d = ctx.disp.borrow_mut();
d.render_layer(&self.layer);
d.submit_frame();
}
}
/// Iterator over momentary controller information.
pub type ControllerIter<'a> = ::std::collections::hash_map::Values<'a, u32, ControllerMoment>;
/// Used to persistently identity a controller, either by internal
/// id or by role. Note that roles can refer to different physical devices
/// at different times, while the internal id will remain locked
/// to a particular device.
#[derive(Copy, Clone, Debug)]
pub enum ControllerRef {
Primary,
Secondary,
Tertiary,
Indexed(u32),
}
impl ControllerRef {
/// Get the internal id of the controller at a particular moment.
fn index(&self, moment: &VrMoment) -> Option<u32> {
use self::ControllerRef::*;
match *self {
Primary => moment.primary,
Secondary => moment.secondary,
Tertiary => moment.tertiary,
Indexed(i) => Some(i),
}
}
/// Make thus reference specific to a device (internal id)
/// rather than dynamically updating (role).
pub fn fixed(&self, moment: &VrMoment) -> ControllerRef {
match self.index(moment) {
Some(i) => ControllerRef::Indexed(i),
None => *self,
}
}
}
/// Create a reference to the primary controller.
pub fn primary() -> ControllerRef {
ControllerRef::Primary
}
/// Create a reference to the secondary controller.
pub fn secondary() -> ControllerRef {
ControllerRef::Secondary
}
/// Create a reference to the tertiary controller.
pub fn tertiary() -> ControllerRef {
ControllerRef::Tertiary
}
/// Instantaneous information about a button.
pub type ButtonMoment = VRGamepadButton;
/// A device that provides instantaneous position and orientation information.
pub trait Trackable {
/// Get the location and orientation of the device.
fn pose(&self) -> Isometry3<f32>;
/// Get the direction of the device's x axis.
fn x_dir(&self) -> Vector3<f32> { self.pose() * Vector3::x() }
/// Get the direction of the device's y axis.
fn y_dir(&self) -> Vector3<f32> { self.pose() * Vector3::y() }
/// Get the direction of the device's z axis.
fn z_dir(&self) -> Vector3<f32> { self.pose() * Vector3::z() }
/// The the location of the device's origin.
fn origin(&self) -> Point3<f32> { self.pose() * Point3::origin() }
/// Get the direction the device is pointing.
fn pointing(&self) -> Vector3<f32> { -self.z_dir() }
}
/// Instantaneous information about the HMD. This can be used directly
/// or to update some persistent state.
#[derive(Clone)]
pub struct HmdMoment {
/// The textual name of the HMD
pub name: String,
/// The resolution of the HMD
pub size: (u32, u32),
/// The location and orientation of the HMD
pub pose: Isometry3<f32>,
/// The drawing parameters for the left eye
pub left: EyeParams,
/// The drawing parameters for the right eye
pub right: EyeParams,
}
impl Trackable for HmdMoment {
fn pose(&self) -> Isometry3<f32> {
self.pose
}
}
/// Instantaneous information about a controller. This can be used directly
/// or to update some persistent state.
#[derive(Clone, Debug)]
pub struct ControllerMoment {
id: u32,
/// The textual name of the controller
pub name: String,
/// The location and orientation of the controller
pub pose: Isometry3<f32>,
/// The state of the floating point inputs on the controller
pub axes: Vec<f64>,
/// The state of the button inputs on the controller
pub buttons: Vec<ButtonMoment>,
}
impl ControllerMoment {
/// Create a reference to this particular hardware device (not to its role).
pub fn reference(&self) -> ControllerRef {
ControllerRef::Indexed(self.id)
}
}
impl Trackable for ControllerMoment {
fn pose(&self) -> Isometry3<f32> | {
self.pose
} | identifier_body |
|
mod.rs | ;
//! extern crate signal_hook;
//!
//! use std::io::Error;
//!
//! use signal_hook::consts::signal::*;
//! use signal_hook::iterator::Signals;
//!
//! fn main() -> Result<(), Error> {
//! let mut signals = Signals::new(&[
//! SIGHUP,
//! SIGTERM,
//! SIGINT,
//! SIGQUIT,
//! # SIGUSR1,
//! ])?;
//! # // A trick to terminate the example when run as doc-test. Not part of the real code.
//! # signal_hook::low_level::raise(SIGUSR1).unwrap();
//! 'outer: loop {
//! // Pick up signals that arrived since last time
//! for signal in signals.pending() {
//! match signal as libc::c_int {
//! SIGHUP => {
//! // Reload configuration
//! // Reopen the log file
//! }
//! SIGTERM | SIGINT | SIGQUIT => {
//! break 'outer;
//! },
//! # SIGUSR1 => return Ok(()),
//! _ => unreachable!(),
//! }
//! }
//! // Do some bit of work ‒ something with upper limit on waiting, so we don't block
//! // forever with a SIGTERM already waiting.
//! }
//! println!("Terminating. Bye bye");
//! Ok(())
//! }
//! ```
pub mod backend;
pub mod exfiltrator;
use std::borrow::Borrow;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::io::{Error, ErrorKind, Read};
use std::os::unix::net::UnixStream;
use libc::{self, c_int};
pub use self::backend::{Handle, Pending};
use self::backend::{PollResult, RefSignalIterator, SignalDelivery};
use self::exfiltrator::{Exfiltrator, SignalOnly};
/// The main structure of the module, representing interest in some signals.
///
/// Unlike the helpers in other modules, this registers the signals when created and unregisters
/// them on drop. It provides the pending signals during its lifetime, either in batches or as an
/// infinite iterator.
///
/// Most users will want to use it through the [`Signals`] type alias for simplicity.
///
/// # Multiple threads
///
/// Instances of this struct can be [sent][std::marker::Send] to other threads. In a multithreaded
/// application this can be used to dedicate a separate thread for signal handling. In this case
/// you should get a [`Handle`] using the [`handle`][Signals::handle] method before sending the
/// `Signals` instance to a background thread. With the handle you will be able to shut down the
/// background thread later, or to operatively add more signals.
///
/// The controller handle can be shared between as many threads as you like using its
/// [`clone`][Handle::clone] method.
///
/// # Exfiltrators
///
/// The [`SignalOnly]` provides only the signal number. There are further exfiltrators available in
/// the [`exfiltrator`] module. Note that some of them are behind feature flags that need to be
/// enabled.
///
/// # Examples
///
/// ```rust
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// #
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// let thread = thread::spawn(move || {
/// for signal in &mut signals {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
///
/// // Some time later...
/// handle.close();
/// thread.join().unwrap();
/// # Ok(())
/// # }
/// ```
pub struct SignalsInfo<E: Exfiltrator = SignalOnly>(SignalDelivery<UnixStream, E>);
impl<E: Exfiltrator> SignalsInfo<E> {
/// Creates the `Signals` structure.
///
/// This registers all the signals listed. The same restrictions (panics, errors) apply as
/// for the [`Handle::add_signal`] method.
pub fn new<I, S>(signals: I) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
E: Default,
{
Self::with_exfiltrator(signals, E::default())
}
/// An advanced constructor with explicit [`Exfiltrator`].
pub fn with_exfiltrator<I, S>(signals: I, exfiltrator: E) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
{
let (read, write) = UnixStream::pair()?;
Ok(SignalsInfo(SignalDelivery::with_pipe(
read,
write,
exfiltrator,
signals,
)?))
}
/// Registers another signal to the set watched by this [`Signals`] instance.
///
/// The same restrictions (panics, errors) apply as for the [`Handle::add_signal`]
/// method.
pub fn add_signal(&self, signal: c_int) -> Result<(), Error> {
self.handle().add_signal(signal)
}
/// Returns an iterator of already received signals.
///
/// This returns an iterator over all the signal numbers of the signals received since last
/// time they were read (out of the set registered by this `Signals` instance). Note that they
/// are returned in arbitrary order and a signal instance may returned only once even if it was
/// received multiple times.
///
/// This method returns immediately (does not block) and may produce an empty iterator if there
/// are no signals ready.
pub fn pending(&mut self) -> Pending<E> {
self.0.pending()
}
/// Block until the stream contains some bytes.
///
/// Returns true if it was possible to read a byte and false otherwise.
fn has_signals(read: &mut UnixStream) -> Result<bool, Error> {
loop {
match read.read(&mut [0u8]) {
Ok(num_read) => break Ok(num_read > 0),
// If we get an EINTR error it is fine to retry reading from the stream.
// Otherwise we should pass on the error to the caller.
Err(error) => {
if error.kind() != ErrorKind::Interrupted {
break Err(error);
}
}
}
}
}
/// Waits for some signals to be available and returns an iterator.
///
/// This is similar to [`pending`][SignalsInfo::pending]. If there are no signals available, it
/// tries to wait for some to arrive. However, due to implementation details, this still can
/// produce an empty iterator.
///
/// This can block for arbitrary long time. If the [`Handle::close`] method is used in
/// another thread this method will return immediately.
///
/// Note that the blocking is done in this method, not in the iterator.
pub fn wait(&mut self) -> Pending<E> {
match self.0.poll_pending(&mut Self::has_signals) {
Ok(Some(pending)) => pending,
// Because of the blocking has_signals method the poll_pending method
// only returns None if the instance is closed. But we want to return
// a possibly empty pending object anyway.
Ok(None) => self.pending(),
// Users can't manipulate the internal file descriptors and the way we use them
// shouldn't produce any errors. So it is OK to panic.
Err(error) => panic!("Unexpected error: {}", error),
}
}
/// Is it closed?
///
/// See [`close`][Handle::close].
pub fn is_closed(&self) -> bool {
self.handle().is_closed()
}
/// Get an infinite iterator over arriving signals.
///
/// The iterator's `next()` blocks as necessary to wait for signals to arrive. This is adequate
/// if you want to designate a thread solely to handling signals. If multiple signals come at
/// the same time (between two values produced by the iterator), they will be returned in
/// arbitrary order. Multiple instances of the same signal may be collated.
///
/// This is also the iterator returned by `IntoIterator` implementation on `&mut Signals`.
///
/// This iterator terminates only if explicitly [closed][Handle::close].
///
/// # Examples
///
/// ```rust
/// # extern crate libc;
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// #
/// use signal_hook::consts::signal::*; | /// let handle = signals.handle();
/// thread::spawn(move || {
/// for signal in signals.forever() {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
/// handle.close();
/// # Ok(())
/// # }
/// ```
pub | /// use signal_hook::iterator::Signals;
///
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?; | random_line_split |
mod.rs | ;
//! extern crate signal_hook;
//!
//! use std::io::Error;
//!
//! use signal_hook::consts::signal::*;
//! use signal_hook::iterator::Signals;
//!
//! fn main() -> Result<(), Error> {
//! let mut signals = Signals::new(&[
//! SIGHUP,
//! SIGTERM,
//! SIGINT,
//! SIGQUIT,
//! # SIGUSR1,
//! ])?;
//! # // A trick to terminate the example when run as doc-test. Not part of the real code.
//! # signal_hook::low_level::raise(SIGUSR1).unwrap();
//! 'outer: loop {
//! // Pick up signals that arrived since last time
//! for signal in signals.pending() {
//! match signal as libc::c_int {
//! SIGHUP => {
//! // Reload configuration
//! // Reopen the log file
//! }
//! SIGTERM | SIGINT | SIGQUIT => {
//! break 'outer;
//! },
//! # SIGUSR1 => return Ok(()),
//! _ => unreachable!(),
//! }
//! }
//! // Do some bit of work ‒ something with upper limit on waiting, so we don't block
//! // forever with a SIGTERM already waiting.
//! }
//! println!("Terminating. Bye bye");
//! Ok(())
//! }
//! ```
pub mod backend;
pub mod exfiltrator;
use std::borrow::Borrow;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::io::{Error, ErrorKind, Read};
use std::os::unix::net::UnixStream;
use libc::{self, c_int};
pub use self::backend::{Handle, Pending};
use self::backend::{PollResult, RefSignalIterator, SignalDelivery};
use self::exfiltrator::{Exfiltrator, SignalOnly};
/// The main structure of the module, representing interest in some signals.
///
/// Unlike the helpers in other modules, this registers the signals when created and unregisters
/// them on drop. It provides the pending signals during its lifetime, either in batches or as an
/// infinite iterator.
///
/// Most users will want to use it through the [`Signals`] type alias for simplicity.
///
/// # Multiple threads
///
/// Instances of this struct can be [sent][std::marker::Send] to other threads. In a multithreaded
/// application this can be used to dedicate a separate thread for signal handling. In this case
/// you should get a [`Handle`] using the [`handle`][Signals::handle] method before sending the
/// `Signals` instance to a background thread. With the handle you will be able to shut down the
/// background thread later, or to operatively add more signals.
///
/// The controller handle can be shared between as many threads as you like using its
/// [`clone`][Handle::clone] method.
///
/// # Exfiltrators
///
/// The [`SignalOnly]` provides only the signal number. There are further exfiltrators available in
/// the [`exfiltrator`] module. Note that some of them are behind feature flags that need to be
/// enabled.
///
/// # Examples
///
/// ```rust
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// #
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// let thread = thread::spawn(move || {
/// for signal in &mut signals {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
///
/// // Some time later...
/// handle.close();
/// thread.join().unwrap();
/// # Ok(())
/// # }
/// ```
pub struct SignalsInfo<E: Exfiltrator = SignalOnly>(SignalDelivery<UnixStream, E>);
impl<E: Exfiltrator> SignalsInfo<E> {
/// Creates the `Signals` structure.
///
/// This registers all the signals listed. The same restrictions (panics, errors) apply as
/// for the [`Handle::add_signal`] method.
pub fn new<I, S>(signals: I) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
E: Default,
{
Self::with_exfiltrator(signals, E::default())
}
/// An advanced constructor with explicit [`Exfiltrator`].
pub fn with_exfiltrator<I, S>(signals: I, exfiltrator: E) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
{
let (read, write) = UnixStream::pair()?;
Ok(SignalsInfo(SignalDelivery::with_pipe(
read,
write,
exfiltrator,
signals,
)?))
}
/// Registers another signal to the set watched by this [`Signals`] instance.
///
/// The same restrictions (panics, errors) apply as for the [`Handle::add_signal`]
/// method.
pub fn add_signal(&self, signal: c_int) -> Result<(), Error> {
self.handle().add_signal(signal)
}
/// Returns an iterator of already received signals.
///
/// This returns an iterator over all the signal numbers of the signals received since last
/// time they were read (out of the set registered by this `Signals` instance). Note that they
/// are returned in arbitrary order and a signal instance may returned only once even if it was
/// received multiple times.
///
/// This method returns immediately (does not block) and may produce an empty iterator if there
/// are no signals ready.
pub fn pending(&mut self) -> Pending<E> {
self.0.pending()
}
/// Block until the stream contains some bytes.
///
/// Returns true if it was possible to read a byte and false otherwise.
fn has_signals(read: &mut UnixStream) -> Result<bool, Error> {
| /// Waits for some signals to be available and returns an iterator.
///
/// This is similar to [`pending`][SignalsInfo::pending]. If there are no signals available, it
/// tries to wait for some to arrive. However, due to implementation details, this still can
/// produce an empty iterator.
///
/// This can block for arbitrary long time. If the [`Handle::close`] method is used in
/// another thread this method will return immediately.
///
/// Note that the blocking is done in this method, not in the iterator.
pub fn wait(&mut self) -> Pending<E> {
match self.0.poll_pending(&mut Self::has_signals) {
Ok(Some(pending)) => pending,
// Because of the blocking has_signals method the poll_pending method
// only returns None if the instance is closed. But we want to return
// a possibly empty pending object anyway.
Ok(None) => self.pending(),
// Users can't manipulate the internal file descriptors and the way we use them
// shouldn't produce any errors. So it is OK to panic.
Err(error) => panic!("Unexpected error: {}", error),
}
}
/// Is it closed?
///
/// See [`close`][Handle::close].
pub fn is_closed(&self) -> bool {
self.handle().is_closed()
}
/// Get an infinite iterator over arriving signals.
///
/// The iterator's `next()` blocks as necessary to wait for signals to arrive. This is adequate
/// if you want to designate a thread solely to handling signals. If multiple signals come at
/// the same time (between two values produced by the iterator), they will be returned in
/// arbitrary order. Multiple instances of the same signal may be collated.
///
/// This is also the iterator returned by `IntoIterator` implementation on `&mut Signals`.
///
/// This iterator terminates only if explicitly [closed][Handle::close].
///
/// # Examples
///
/// ```rust
/// # extern crate libc;
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// #
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// thread::spawn(move || {
/// for signal in signals.forever() {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
/// handle.close();
/// # Ok(())
/// # }
/// ```
pub | loop {
match read.read(&mut [0u8]) {
Ok(num_read) => break Ok(num_read > 0),
// If we get an EINTR error it is fine to retry reading from the stream.
// Otherwise we should pass on the error to the caller.
Err(error) => {
if error.kind() != ErrorKind::Interrupted {
break Err(error);
}
}
}
}
}
| identifier_body |
mod.rs | signal_hook::low_level::raise(SIGUSR1).unwrap();
//! 'outer: loop {
//! // Pick up signals that arrived since last time
//! for signal in signals.pending() {
//! match signal as libc::c_int {
//! SIGHUP => {
//! // Reload configuration
//! // Reopen the log file
//! }
//! SIGTERM | SIGINT | SIGQUIT => {
//! break 'outer;
//! },
//! # SIGUSR1 => return Ok(()),
//! _ => unreachable!(),
//! }
//! }
//! // Do some bit of work ‒ something with upper limit on waiting, so we don't block
//! // forever with a SIGTERM already waiting.
//! }
//! println!("Terminating. Bye bye");
//! Ok(())
//! }
//! ```
pub mod backend;
pub mod exfiltrator;
use std::borrow::Borrow;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::io::{Error, ErrorKind, Read};
use std::os::unix::net::UnixStream;
use libc::{self, c_int};
pub use self::backend::{Handle, Pending};
use self::backend::{PollResult, RefSignalIterator, SignalDelivery};
use self::exfiltrator::{Exfiltrator, SignalOnly};
/// The main structure of the module, representing interest in some signals.
///
/// Unlike the helpers in other modules, this registers the signals when created and unregisters
/// them on drop. It provides the pending signals during its lifetime, either in batches or as an
/// infinite iterator.
///
/// Most users will want to use it through the [`Signals`] type alias for simplicity.
///
/// # Multiple threads
///
/// Instances of this struct can be [sent][std::marker::Send] to other threads. In a multithreaded
/// application this can be used to dedicate a separate thread for signal handling. In this case
/// you should get a [`Handle`] using the [`handle`][Signals::handle] method before sending the
/// `Signals` instance to a background thread. With the handle you will be able to shut down the
/// background thread later, or to operatively add more signals.
///
/// The controller handle can be shared between as many threads as you like using its
/// [`clone`][Handle::clone] method.
///
/// # Exfiltrators
///
/// The [`SignalOnly]` provides only the signal number. There are further exfiltrators available in
/// the [`exfiltrator`] module. Note that some of them are behind feature flags that need to be
/// enabled.
///
/// # Examples
///
/// ```rust
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// #
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// let thread = thread::spawn(move || {
/// for signal in &mut signals {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
///
/// // Some time later...
/// handle.close();
/// thread.join().unwrap();
/// # Ok(())
/// # }
/// ```
pub struct SignalsInfo<E: Exfiltrator = SignalOnly>(SignalDelivery<UnixStream, E>);
impl<E: Exfiltrator> SignalsInfo<E> {
/// Creates the `Signals` structure.
///
/// This registers all the signals listed. The same restrictions (panics, errors) apply as
/// for the [`Handle::add_signal`] method.
pub fn new<I, S>(signals: I) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
E: Default,
{
Self::with_exfiltrator(signals, E::default())
}
/// An advanced constructor with explicit [`Exfiltrator`].
pub fn with_exfiltrator<I, S>(signals: I, exfiltrator: E) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
{
let (read, write) = UnixStream::pair()?;
Ok(SignalsInfo(SignalDelivery::with_pipe(
read,
write,
exfiltrator,
signals,
)?))
}
/// Registers another signal to the set watched by this [`Signals`] instance.
///
/// The same restrictions (panics, errors) apply as for the [`Handle::add_signal`]
/// method.
pub fn add_signal(&self, signal: c_int) -> Result<(), Error> {
self.handle().add_signal(signal)
}
/// Returns an iterator of already received signals.
///
/// This returns an iterator over all the signal numbers of the signals received since last
/// time they were read (out of the set registered by this `Signals` instance). Note that they
/// are returned in arbitrary order and a signal instance may returned only once even if it was
/// received multiple times.
///
/// This method returns immediately (does not block) and may produce an empty iterator if there
/// are no signals ready.
pub fn pending(&mut self) -> Pending<E> {
self.0.pending()
}
/// Block until the stream contains some bytes.
///
/// Returns true if it was possible to read a byte and false otherwise.
fn has_signals(read: &mut UnixStream) -> Result<bool, Error> {
loop {
match read.read(&mut [0u8]) {
Ok(num_read) => break Ok(num_read > 0),
// If we get an EINTR error it is fine to retry reading from the stream.
// Otherwise we should pass on the error to the caller.
Err(error) => {
if error.kind() != ErrorKind::Interrupted {
break Err(error);
}
}
}
}
}
/// Waits for some signals to be available and returns an iterator.
///
/// This is similar to [`pending`][SignalsInfo::pending]. If there are no signals available, it
/// tries to wait for some to arrive. However, due to implementation details, this still can
/// produce an empty iterator.
///
/// This can block for arbitrary long time. If the [`Handle::close`] method is used in
/// another thread this method will return immediately.
///
/// Note that the blocking is done in this method, not in the iterator.
pub fn wait(&mut self) -> Pending<E> {
match self.0.poll_pending(&mut Self::has_signals) {
Ok(Some(pending)) => pending,
// Because of the blocking has_signals method the poll_pending method
// only returns None if the instance is closed. But we want to return
// a possibly empty pending object anyway.
Ok(None) => self.pending(),
// Users can't manipulate the internal file descriptors and the way we use them
// shouldn't produce any errors. So it is OK to panic.
Err(error) => panic!("Unexpected error: {}", error),
}
}
/// Is it closed?
///
/// See [`close`][Handle::close].
pub fn is_closed(&self) -> bool {
self.handle().is_closed()
}
/// Get an infinite iterator over arriving signals.
///
/// The iterator's `next()` blocks as necessary to wait for signals to arrive. This is adequate
/// if you want to designate a thread solely to handling signals. If multiple signals come at
/// the same time (between two values produced by the iterator), they will be returned in
/// arbitrary order. Multiple instances of the same signal may be collated.
///
/// This is also the iterator returned by `IntoIterator` implementation on `&mut Signals`.
///
/// This iterator terminates only if explicitly [closed][Handle::close].
///
/// # Examples
///
/// ```rust
/// # extern crate libc;
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// #
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// thread::spawn(move || {
/// for signal in signals.forever() {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
/// handle.close();
/// # Ok(())
/// # }
/// ```
pub fn forever(&mut self) -> Forever<E> {
Forever(RefSignalIterator::new(&mut self.0))
}
/// Get a shareable handle to a [`Handle`] for this instance.
///
/// This can be used to add further signals or close the [`Signals`] instance.
pub fn handle(&self) -> Handle {
self.0.handle()
}
}
impl<E> Debug for SignalsInfo<E>
where
E: Debug + Exfiltrator,
E::Storage: Debug,
{
fn fmt( | &se | identifier_name |
|
ssd_train.py | 20
"""
import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import torch
from draugr.numpy_utilities import SplitEnum
from draugr.torch_utilities import (
TorchCacheSession,
TorchEvalSession,
TorchTrainSession,
WarmupMultiStepLR,
)
from torch.nn import Module
from torch.utils.data import DataLoader
from warg import NOD, ensure_existence
from warg.arguments import str2bool
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.detection.single_stage.ssd import (
MultiBoxLoss,
SingleShotDetectionNms,
do_ssd_evaluation,
object_detection_data_loaders,
)
from neodroidvision.utilities import (
CheckPointer,
MetricLogger,
global_distribution_rank,
reduce_loss_dict,
set_benchmark_device_dist,
setup_distributed_logger,
write_metrics_recursive,
)
def inner_train_ssd(
*,
data_root: Path,
cfg: NOD,
model: Module,
data_loader: DataLoader,
optimiser: torch.optim.Optimizer,
scheduler: WarmupMultiStepLR,
check_pointer: callable,
device: callable,
arguments: callable,
kws: NOD,
) -> Module:
"""
:param data_root:
:type data_root:
:param cfg:
:type cfg:
:param model:
:type model:
:param data_loader:
:type data_loader:
:param optimiser:
:type optimiser:
:param scheduler:
:type scheduler:
:param check_pointer:
:type check_pointer:
:param device:
:type device:
:param arguments:
:type arguments:
:param kws:
:type kws:
:return:
:rtype:"""
logger = logging.getLogger("SSD.trainer")
logger.info("Start training ...")
meters = MetricLogger()
with TorchTrainSession(model):
save_to_disk = global_distribution_rank() == 0
if kws.use_tensorboard and save_to_disk:
import tensorboardX
writer = tensorboardX.SummaryWriter(
log_dir=str(PROJECT_APP_PATH.user_data / "results" / "tf_logs")
)
else:
writer = None
max_iter = len(data_loader)
start_iter = arguments["iteration"]
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
arguments["iteration"] = iteration
images = images.to(device)
targets = targets.to(device)
loss_instance = MultiBoxLoss(neg_pos_ratio=cfg.model.neg_pos_ratio)
cls_logits, bbox_pred = model(images)
reg_loss, cls_loss = loss_instance(
cls_logits, bbox_pred, targets.labels, targets.boxes
)
loss_dict = dict(reg_loss=reg_loss, cls_loss=cls_loss)
loss = sum(loss for loss in loss_dict.values())
loss_dict_reduced = reduce_loss_dict(
loss_dict
) # reduce losses over all GPUs for logging purposes
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(total_loss=losses_reduced, **loss_dict_reduced)
optimiser.zero_grad()
loss.backward()
optimiser.step()
scheduler.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time)
if iteration % kws.log_step == 0:
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
f"iter: {iteration:06d}",
f"lr: {optimiser.param_groups[0]['lr']:.5f}",
f"{str(meters)}",
f"eta: {eta_string}",
f"mem: {round(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)}M",
]
)
)
if writer:
global_step = iteration
writer.add_scalar(
"losses/total_loss", losses_reduced, global_step=global_step
)
for loss_name, loss_item in loss_dict_reduced.items():
writer.add_scalar(
f"losses/{loss_name}", loss_item, global_step=global_step
) | writer.add_scalar(
"lr", optimiser.param_groups[0]["lr"], global_step=global_step
)
if iteration % kws.save_step == 0:
check_pointer.save(f"model_{iteration:06d}", **arguments)
if (
kws.eval_step > 0
and iteration % kws.eval_step == 0
and not iteration == max_iter
):
with TorchEvalSession(model):
eval_results = do_ssd_evaluation(
data_root,
cfg,
model,
distributed=kws.distributed,
iteration=iteration,
)
if global_distribution_rank() == 0 and writer:
for eval_result, dataset in zip(
eval_results, cfg.datasets.test
):
write_metrics_recursive(
eval_result["metrics"],
"metrics/" + dataset,
writer,
iteration,
)
check_pointer.save("model_final", **arguments)
total_training_time = int(
time.time() - start_training_time
) # compute training time
logger.info(
f"Total training time: {datetime.timedelta(seconds = total_training_time)} ("
f"{total_training_time / max_iter:.4f} s / it)"
)
return model
def train_ssd(data_root: Path, cfg, solver_cfg: NOD, kws: NOD) -> Module:
"""
Args:
data_root:
cfg:
solver_cfg:
kws:
Returns:
"""
logger = logging.getLogger("SSD.trainer")
model = SingleShotDetectionNms(cfg)
device = torch.device(cfg.model.device)
if kws.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[kws.local_rank], output_device=kws.local_rank
)
lr = solver_cfg.lr * kws.num_gpus # scale by num gpus
lr = solver_cfg.base_lr if lr is None else lr
optimiser = torch.optim.SGD(
model.parameters(),
lr=lr,
momentum=solver_cfg.momentum,
weight_decay=solver_cfg.weight_decay,
)
milestones = [step // kws.num_gpus for step in solver_cfg.lr_steps]
scheduler = WarmupMultiStepLR(
optimiser=optimiser,
milestones=solver_cfg.lr_steps if milestones is None else milestones,
gamma=solver_cfg.gamma,
warmup_factor=solver_cfg.warmup_factor,
warmup_iters=solver_cfg.warmup_iters,
)
arguments = {"iteration": 0}
save_to_disk = global_distribution_rank() == 0
checkpointer = CheckPointer(
model, optimiser, scheduler, cfg.output_dir, save_to_disk, logger
)
arguments.update(checkpointer.load())
model.post_init()
model.to(device)
model = inner_train_ssd(
data_root=data_root,
cfg=cfg,
model=model,
data_loader=object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=SplitEnum.training,
distributed=kws.distributed,
max_iter=solver_cfg.max_iter // kws.num_gpus,
start_iter=arguments["iteration"],
),
optimiser=optimiser,
scheduler=scheduler,
check_pointer=checkpointer,
device=device,
arguments=arguments,
kws=kws,
)
return model
def main():
"""description"""
from configs.mobilenet_v2_ssd320_voc0712 import base_cfg
# from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
# from configs.vgg_ssd300_voc0712 import base_cfg
parser = argparse.ArgumentParser(
description="Single Shot MultiBox Detector Training With PyTorch"
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--log_step", default=10, type=int, help="Print logs every log_step"
)
parser.add_argument(
"--save_step", default=2500, type=int, help="Save checkpoint every save_step"
)
parser.add_argument(
"--eval_step",
default=2500,
type=int,
help="Evaluate dataset every eval_step, disabled when eval_step < 0",
)
parser.add_argument("--use_tensorboard", default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
set_benchmark_device_dist(args.distributed, args.local_rank)
logger = setup_distributed_logger(
"SSD",
global_distribution_rank(),
ensure_existence(PROJECT_APP_PATH | random_line_split |
|
ssd_train.py | 20
"""
import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import torch
from draugr.numpy_utilities import SplitEnum
from draugr.torch_utilities import (
TorchCacheSession,
TorchEvalSession,
TorchTrainSession,
WarmupMultiStepLR,
)
from torch.nn import Module
from torch.utils.data import DataLoader
from warg import NOD, ensure_existence
from warg.arguments import str2bool
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.detection.single_stage.ssd import (
MultiBoxLoss,
SingleShotDetectionNms,
do_ssd_evaluation,
object_detection_data_loaders,
)
from neodroidvision.utilities import (
CheckPointer,
MetricLogger,
global_distribution_rank,
reduce_loss_dict,
set_benchmark_device_dist,
setup_distributed_logger,
write_metrics_recursive,
)
def inner_train_ssd(
*,
data_root: Path,
cfg: NOD,
model: Module,
data_loader: DataLoader,
optimiser: torch.optim.Optimizer,
scheduler: WarmupMultiStepLR,
check_pointer: callable,
device: callable,
arguments: callable,
kws: NOD,
) -> Module:
"""
:param data_root:
:type data_root:
:param cfg:
:type cfg:
:param model:
:type model:
:param data_loader:
:type data_loader:
:param optimiser:
:type optimiser:
:param scheduler:
:type scheduler:
:param check_pointer:
:type check_pointer:
:param device:
:type device:
:param arguments:
:type arguments:
:param kws:
:type kws:
:return:
:rtype:"""
logger = logging.getLogger("SSD.trainer")
logger.info("Start training ...")
meters = MetricLogger()
with TorchTrainSession(model):
save_to_disk = global_distribution_rank() == 0
if kws.use_tensorboard and save_to_disk:
import tensorboardX
writer = tensorboardX.SummaryWriter(
log_dir=str(PROJECT_APP_PATH.user_data / "results" / "tf_logs")
)
else:
writer = None
max_iter = len(data_loader)
start_iter = arguments["iteration"]
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
arguments["iteration"] = iteration
images = images.to(device)
targets = targets.to(device)
loss_instance = MultiBoxLoss(neg_pos_ratio=cfg.model.neg_pos_ratio)
cls_logits, bbox_pred = model(images)
reg_loss, cls_loss = loss_instance(
cls_logits, bbox_pred, targets.labels, targets.boxes
)
loss_dict = dict(reg_loss=reg_loss, cls_loss=cls_loss)
loss = sum(loss for loss in loss_dict.values())
loss_dict_reduced = reduce_loss_dict(
loss_dict
) # reduce losses over all GPUs for logging purposes
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(total_loss=losses_reduced, **loss_dict_reduced)
optimiser.zero_grad()
loss.backward()
optimiser.step()
scheduler.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time)
if iteration % kws.log_step == 0:
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
f"iter: {iteration:06d}",
f"lr: {optimiser.param_groups[0]['lr']:.5f}",
f"{str(meters)}",
f"eta: {eta_string}",
f"mem: {round(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)}M",
]
)
)
if writer:
global_step = iteration
writer.add_scalar(
"losses/total_loss", losses_reduced, global_step=global_step
)
for loss_name, loss_item in loss_dict_reduced.items():
writer.add_scalar(
f"losses/{loss_name}", loss_item, global_step=global_step
)
writer.add_scalar(
"lr", optimiser.param_groups[0]["lr"], global_step=global_step
)
if iteration % kws.save_step == 0:
check_pointer.save(f"model_{iteration:06d}", **arguments)
if (
kws.eval_step > 0
and iteration % kws.eval_step == 0
and not iteration == max_iter
):
with TorchEvalSession(model):
eval_results = do_ssd_evaluation(
data_root,
cfg,
model,
distributed=kws.distributed,
iteration=iteration,
)
if global_distribution_rank() == 0 and writer:
for eval_result, dataset in zip(
eval_results, cfg.datasets.test
):
write_metrics_recursive(
eval_result["metrics"],
"metrics/" + dataset,
writer,
iteration,
)
check_pointer.save("model_final", **arguments)
total_training_time = int(
time.time() - start_training_time
) # compute training time
logger.info(
f"Total training time: {datetime.timedelta(seconds = total_training_time)} ("
f"{total_training_time / max_iter:.4f} s / it)"
)
return model
def train_ssd(data_root: Path, cfg, solver_cfg: NOD, kws: NOD) -> Module:
"""
Args:
data_root:
cfg:
solver_cfg:
kws:
Returns:
"""
logger = logging.getLogger("SSD.trainer")
model = SingleShotDetectionNms(cfg)
device = torch.device(cfg.model.device)
if kws.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[kws.local_rank], output_device=kws.local_rank
)
lr = solver_cfg.lr * kws.num_gpus # scale by num gpus
lr = solver_cfg.base_lr if lr is None else lr
optimiser = torch.optim.SGD(
model.parameters(),
lr=lr,
momentum=solver_cfg.momentum,
weight_decay=solver_cfg.weight_decay,
)
milestones = [step // kws.num_gpus for step in solver_cfg.lr_steps]
scheduler = WarmupMultiStepLR(
optimiser=optimiser,
milestones=solver_cfg.lr_steps if milestones is None else milestones,
gamma=solver_cfg.gamma,
warmup_factor=solver_cfg.warmup_factor,
warmup_iters=solver_cfg.warmup_iters,
)
arguments = {"iteration": 0}
save_to_disk = global_distribution_rank() == 0
checkpointer = CheckPointer(
model, optimiser, scheduler, cfg.output_dir, save_to_disk, logger
)
arguments.update(checkpointer.load())
model.post_init()
model.to(device)
model = inner_train_ssd(
data_root=data_root,
cfg=cfg,
model=model,
data_loader=object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=SplitEnum.training,
distributed=kws.distributed,
max_iter=solver_cfg.max_iter // kws.num_gpus,
start_iter=arguments["iteration"],
),
optimiser=optimiser,
scheduler=scheduler,
check_pointer=checkpointer,
device=device,
arguments=arguments,
kws=kws,
)
return model
def main():
| help="Evaluate dataset every eval_step, disabled when eval_step < 0",
)
parser.add_argument("--use_tensorboard", default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
set_benchmark_device_dist(args.distributed, args.local_rank)
logger = setup_distributed_logger(
"SSD",
global_distribution_rank(),
ensure_existence(PROJECT_APP_PATH | """description"""
from configs.mobilenet_v2_ssd320_voc0712 import base_cfg
# from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
# from configs.vgg_ssd300_voc0712 import base_cfg
parser = argparse.ArgumentParser(
description="Single Shot MultiBox Detector Training With PyTorch"
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--log_step", default=10, type=int, help="Print logs every log_step"
)
parser.add_argument(
"--save_step", default=2500, type=int, help="Save checkpoint every save_step"
)
parser.add_argument(
"--eval_step",
default=2500,
type=int, | identifier_body |
ssd_train.py | 20
"""
import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import torch
from draugr.numpy_utilities import SplitEnum
from draugr.torch_utilities import (
TorchCacheSession,
TorchEvalSession,
TorchTrainSession,
WarmupMultiStepLR,
)
from torch.nn import Module
from torch.utils.data import DataLoader
from warg import NOD, ensure_existence
from warg.arguments import str2bool
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.detection.single_stage.ssd import (
MultiBoxLoss,
SingleShotDetectionNms,
do_ssd_evaluation,
object_detection_data_loaders,
)
from neodroidvision.utilities import (
CheckPointer,
MetricLogger,
global_distribution_rank,
reduce_loss_dict,
set_benchmark_device_dist,
setup_distributed_logger,
write_metrics_recursive,
)
def inner_train_ssd(
*,
data_root: Path,
cfg: NOD,
model: Module,
data_loader: DataLoader,
optimiser: torch.optim.Optimizer,
scheduler: WarmupMultiStepLR,
check_pointer: callable,
device: callable,
arguments: callable,
kws: NOD,
) -> Module:
"""
:param data_root:
:type data_root:
:param cfg:
:type cfg:
:param model:
:type model:
:param data_loader:
:type data_loader:
:param optimiser:
:type optimiser:
:param scheduler:
:type scheduler:
:param check_pointer:
:type check_pointer:
:param device:
:type device:
:param arguments:
:type arguments:
:param kws:
:type kws:
:return:
:rtype:"""
logger = logging.getLogger("SSD.trainer")
logger.info("Start training ...")
meters = MetricLogger()
with TorchTrainSession(model):
save_to_disk = global_distribution_rank() == 0
if kws.use_tensorboard and save_to_disk:
import tensorboardX
writer = tensorboardX.SummaryWriter(
log_dir=str(PROJECT_APP_PATH.user_data / "results" / "tf_logs")
)
else:
writer = None
max_iter = len(data_loader)
start_iter = arguments["iteration"]
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
arguments["iteration"] = iteration
images = images.to(device)
targets = targets.to(device)
loss_instance = MultiBoxLoss(neg_pos_ratio=cfg.model.neg_pos_ratio)
cls_logits, bbox_pred = model(images)
reg_loss, cls_loss = loss_instance(
cls_logits, bbox_pred, targets.labels, targets.boxes
)
loss_dict = dict(reg_loss=reg_loss, cls_loss=cls_loss)
loss = sum(loss for loss in loss_dict.values())
loss_dict_reduced = reduce_loss_dict(
loss_dict
) # reduce losses over all GPUs for logging purposes
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(total_loss=losses_reduced, **loss_dict_reduced)
optimiser.zero_grad()
loss.backward()
optimiser.step()
scheduler.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time)
if iteration % kws.log_step == 0:
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
f"iter: {iteration:06d}",
f"lr: {optimiser.param_groups[0]['lr']:.5f}",
f"{str(meters)}",
f"eta: {eta_string}",
f"mem: {round(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)}M",
]
)
)
if writer:
global_step = iteration
writer.add_scalar(
"losses/total_loss", losses_reduced, global_step=global_step
)
for loss_name, loss_item in loss_dict_reduced.items():
writer.add_scalar(
f"losses/{loss_name}", loss_item, global_step=global_step
)
writer.add_scalar(
"lr", optimiser.param_groups[0]["lr"], global_step=global_step
)
if iteration % kws.save_step == 0:
check_pointer.save(f"model_{iteration:06d}", **arguments)
if (
kws.eval_step > 0
and iteration % kws.eval_step == 0
and not iteration == max_iter
):
with TorchEvalSession(model):
eval_results = do_ssd_evaluation(
data_root,
cfg,
model,
distributed=kws.distributed,
iteration=iteration,
)
if global_distribution_rank() == 0 and writer:
for eval_result, dataset in zip(
eval_results, cfg.datasets.test
):
write_metrics_recursive(
eval_result["metrics"],
"metrics/" + dataset,
writer,
iteration,
)
check_pointer.save("model_final", **arguments)
total_training_time = int(
time.time() - start_training_time
) # compute training time
logger.info(
f"Total training time: {datetime.timedelta(seconds = total_training_time)} ("
f"{total_training_time / max_iter:.4f} s / it)"
)
return model
def train_ssd(data_root: Path, cfg, solver_cfg: NOD, kws: NOD) -> Module:
"""
Args:
data_root:
cfg:
solver_cfg:
kws:
Returns:
"""
logger = logging.getLogger("SSD.trainer")
model = SingleShotDetectionNms(cfg)
device = torch.device(cfg.model.device)
if kws.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[kws.local_rank], output_device=kws.local_rank
)
lr = solver_cfg.lr * kws.num_gpus # scale by num gpus
lr = solver_cfg.base_lr if lr is None else lr
optimiser = torch.optim.SGD(
model.parameters(),
lr=lr,
momentum=solver_cfg.momentum,
weight_decay=solver_cfg.weight_decay,
)
milestones = [step // kws.num_gpus for step in solver_cfg.lr_steps]
scheduler = WarmupMultiStepLR(
optimiser=optimiser,
milestones=solver_cfg.lr_steps if milestones is None else milestones,
gamma=solver_cfg.gamma,
warmup_factor=solver_cfg.warmup_factor,
warmup_iters=solver_cfg.warmup_iters,
)
arguments = {"iteration": 0}
save_to_disk = global_distribution_rank() == 0
checkpointer = CheckPointer(
model, optimiser, scheduler, cfg.output_dir, save_to_disk, logger
)
arguments.update(checkpointer.load())
model.post_init()
model.to(device)
model = inner_train_ssd(
data_root=data_root,
cfg=cfg,
model=model,
data_loader=object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=SplitEnum.training,
distributed=kws.distributed,
max_iter=solver_cfg.max_iter // kws.num_gpus,
start_iter=arguments["iteration"],
),
optimiser=optimiser,
scheduler=scheduler,
check_pointer=checkpointer,
device=device,
arguments=arguments,
kws=kws,
)
return model
def | ():
"""description"""
from configs.mobilenet_v2_ssd320_voc0712 import base_cfg
# from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
# from configs.vgg_ssd300_voc0712 import base_cfg
parser = argparse.ArgumentParser(
description="Single Shot MultiBox Detector Training With PyTorch"
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--log_step", default=10, type=int, help="Print logs every log_step"
)
parser.add_argument(
"--save_step", default=2500, type=int, help="Save checkpoint every save_step"
)
parser.add_argument(
"--eval_step",
default=2500,
type=int,
help="Evaluate dataset every eval_step, disabled when eval_step < 0",
)
parser.add_argument("--use_tensorboard", default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
set_benchmark_device_dist(args.distributed, args.local_rank)
logger = setup_distributed_logger(
"SSD",
global_distribution_rank(),
ensure_existence(PROJECT_APP | main | identifier_name |
ssd_train.py | 20
"""
import argparse
import datetime
import logging
import os
import time
from pathlib import Path
import torch
from draugr.numpy_utilities import SplitEnum
from draugr.torch_utilities import (
TorchCacheSession,
TorchEvalSession,
TorchTrainSession,
WarmupMultiStepLR,
)
from torch.nn import Module
from torch.utils.data import DataLoader
from warg import NOD, ensure_existence
from warg.arguments import str2bool
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.detection.single_stage.ssd import (
MultiBoxLoss,
SingleShotDetectionNms,
do_ssd_evaluation,
object_detection_data_loaders,
)
from neodroidvision.utilities import (
CheckPointer,
MetricLogger,
global_distribution_rank,
reduce_loss_dict,
set_benchmark_device_dist,
setup_distributed_logger,
write_metrics_recursive,
)
def inner_train_ssd(
*,
data_root: Path,
cfg: NOD,
model: Module,
data_loader: DataLoader,
optimiser: torch.optim.Optimizer,
scheduler: WarmupMultiStepLR,
check_pointer: callable,
device: callable,
arguments: callable,
kws: NOD,
) -> Module:
"""
:param data_root:
:type data_root:
:param cfg:
:type cfg:
:param model:
:type model:
:param data_loader:
:type data_loader:
:param optimiser:
:type optimiser:
:param scheduler:
:type scheduler:
:param check_pointer:
:type check_pointer:
:param device:
:type device:
:param arguments:
:type arguments:
:param kws:
:type kws:
:return:
:rtype:"""
logger = logging.getLogger("SSD.trainer")
logger.info("Start training ...")
meters = MetricLogger()
with TorchTrainSession(model):
save_to_disk = global_distribution_rank() == 0
if kws.use_tensorboard and save_to_disk:
import tensorboardX
writer = tensorboardX.SummaryWriter(
log_dir=str(PROJECT_APP_PATH.user_data / "results" / "tf_logs")
)
else:
writer = None
max_iter = len(data_loader)
start_iter = arguments["iteration"]
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
arguments["iteration"] = iteration
images = images.to(device)
targets = targets.to(device)
loss_instance = MultiBoxLoss(neg_pos_ratio=cfg.model.neg_pos_ratio)
cls_logits, bbox_pred = model(images)
reg_loss, cls_loss = loss_instance(
cls_logits, bbox_pred, targets.labels, targets.boxes
)
loss_dict = dict(reg_loss=reg_loss, cls_loss=cls_loss)
loss = sum(loss for loss in loss_dict.values())
loss_dict_reduced = reduce_loss_dict(
loss_dict
) # reduce losses over all GPUs for logging purposes
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(total_loss=losses_reduced, **loss_dict_reduced)
optimiser.zero_grad()
loss.backward()
optimiser.step()
scheduler.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time)
if iteration % kws.log_step == 0:
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
f"iter: {iteration:06d}",
f"lr: {optimiser.param_groups[0]['lr']:.5f}",
f"{str(meters)}",
f"eta: {eta_string}",
f"mem: {round(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)}M",
]
)
)
if writer:
global_step = iteration
writer.add_scalar(
"losses/total_loss", losses_reduced, global_step=global_step
)
for loss_name, loss_item in loss_dict_reduced.items():
writer.add_scalar(
f"losses/{loss_name}", loss_item, global_step=global_step
)
writer.add_scalar(
"lr", optimiser.param_groups[0]["lr"], global_step=global_step
)
if iteration % kws.save_step == 0:
check_pointer.save(f"model_{iteration:06d}", **arguments)
if (
kws.eval_step > 0
and iteration % kws.eval_step == 0
and not iteration == max_iter
):
with TorchEvalSession(model):
eval_results = do_ssd_evaluation(
data_root,
cfg,
model,
distributed=kws.distributed,
iteration=iteration,
)
if global_distribution_rank() == 0 and writer:
for eval_result, dataset in zip(
eval_results, cfg.datasets.test
):
|
check_pointer.save("model_final", **arguments)
total_training_time = int(
time.time() - start_training_time
) # compute training time
logger.info(
f"Total training time: {datetime.timedelta(seconds = total_training_time)} ("
f"{total_training_time / max_iter:.4f} s / it)"
)
return model
def train_ssd(data_root: Path, cfg, solver_cfg: NOD, kws: NOD) -> Module:
"""
Args:
data_root:
cfg:
solver_cfg:
kws:
Returns:
"""
logger = logging.getLogger("SSD.trainer")
model = SingleShotDetectionNms(cfg)
device = torch.device(cfg.model.device)
if kws.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[kws.local_rank], output_device=kws.local_rank
)
lr = solver_cfg.lr * kws.num_gpus # scale by num gpus
lr = solver_cfg.base_lr if lr is None else lr
optimiser = torch.optim.SGD(
model.parameters(),
lr=lr,
momentum=solver_cfg.momentum,
weight_decay=solver_cfg.weight_decay,
)
milestones = [step // kws.num_gpus for step in solver_cfg.lr_steps]
scheduler = WarmupMultiStepLR(
optimiser=optimiser,
milestones=solver_cfg.lr_steps if milestones is None else milestones,
gamma=solver_cfg.gamma,
warmup_factor=solver_cfg.warmup_factor,
warmup_iters=solver_cfg.warmup_iters,
)
arguments = {"iteration": 0}
save_to_disk = global_distribution_rank() == 0
checkpointer = CheckPointer(
model, optimiser, scheduler, cfg.output_dir, save_to_disk, logger
)
arguments.update(checkpointer.load())
model.post_init()
model.to(device)
model = inner_train_ssd(
data_root=data_root,
cfg=cfg,
model=model,
data_loader=object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=SplitEnum.training,
distributed=kws.distributed,
max_iter=solver_cfg.max_iter // kws.num_gpus,
start_iter=arguments["iteration"],
),
optimiser=optimiser,
scheduler=scheduler,
check_pointer=checkpointer,
device=device,
arguments=arguments,
kws=kws,
)
return model
def main():
"""description"""
from configs.mobilenet_v2_ssd320_voc0712 import base_cfg
# from configs.efficient_net_b3_ssd300_voc0712 import base_cfg
# from configs.vgg_ssd300_voc0712 import base_cfg
parser = argparse.ArgumentParser(
description="Single Shot MultiBox Detector Training With PyTorch"
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--log_step", default=10, type=int, help="Print logs every log_step"
)
parser.add_argument(
"--save_step", default=2500, type=int, help="Save checkpoint every save_step"
)
parser.add_argument(
"--eval_step",
default=2500,
type=int,
help="Evaluate dataset every eval_step, disabled when eval_step < 0",
)
parser.add_argument("--use_tensorboard", default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
set_benchmark_device_dist(args.distributed, args.local_rank)
logger = setup_distributed_logger(
"SSD",
global_distribution_rank(),
ensure_existence(PROJECT_APP | write_metrics_recursive(
eval_result["metrics"],
"metrics/" + dataset,
writer,
iteration,
) | conditional_block |
controller.go | // Check for and handle deletion of cluster. Return early if it is being
// deleted or there was an error.
if result, err := r.handleDelete(ctx, cluster); err != nil {
span.RecordError(err)
log.Error(err, "deleting")
return reconcile.Result{}, err
} else if result != nil {
if log := log.V(1); log.Enabled() {
if result.RequeueAfter > 0 {
// RequeueAfter implies Requeue, but set both to make the next
// log message more clear.
result.Requeue = true
}
log.Info("deleting", "result", fmt.Sprintf("%+v", *result))
}
return *result, nil
}
// Perform initial validation on a cluster
// TODO: Move this to a defaulting (mutating admission) webhook
// to leverage regular validation.
// verify all needed image values are defined
if err := config.VerifyImageValues(cluster); err != nil {
// warning event with missing image information
r.Recorder.Event(cluster, corev1.EventTypeWarning, "MissingRequiredImage",
err.Error())
// specifically allow reconciliation if the cluster is shutdown to
// facilitate upgrades, otherwise return
if cluster.Spec.Shutdown == nil ||
(cluster.Spec.Shutdown != nil && !*cluster.Spec.Shutdown) {
return result, err
}
}
if cluster.Spec.Standby != nil &&
cluster.Spec.Standby.Enabled &&
cluster.Spec.Standby.Host == "" &&
cluster.Spec.Standby.RepoName == "" {
// When a standby cluster is requested but a repoName or host is not provided
// the cluster will be created as a non-standby. Reject any clusters with
// this configuration and provide an event
path := field.NewPath("spec", "standby")
err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled")
r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration",
err.Error())
return result, err
}
var (
clusterConfigMap *corev1.ConfigMap
clusterReplicationSecret *corev1.Secret
clusterPodService *corev1.Service
clusterVolumes []corev1.PersistentVolumeClaim
instanceServiceAccount *corev1.ServiceAccount
instances *observedInstances
patroniLeaderService *corev1.Service
primaryCertificate *corev1.SecretProjection
primaryService *corev1.Service
replicaService *corev1.Service
rootCA *pki.RootCertificateAuthority
monitoringSecret *corev1.Secret
exporterWebConfig *corev1.ConfigMap
err error
)
// Define the function for the updating the PostgresCluster status. Returns any error that
// occurs while attempting to patch the status, while otherwise simply returning the
// Result and error variables that are populated while reconciling the PostgresCluster.
patchClusterStatus := func() (reconcile.Result, error) {
if !equality.Semantic.DeepEqual(before.Status, cluster.Status) {
// NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track
// managed fields on the status subresource: https://issue.k8s.io/88901
if err := errors.WithStack(r.Client.Status().Patch(
ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil {
log.Error(err, "patching cluster status")
return result, err
}
log.V(1).Info("patched cluster status")
}
return result, err
}
// if the cluster is paused, set a condition and return
if cluster.Spec.Paused != nil && *cluster.Spec.Paused {
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: v1beta1.PostgresClusterProgressing,
Status: metav1.ConditionFalse,
Reason: "Paused",
Message: "No spec changes will be applied and no other statuses will be updated.",
ObservedGeneration: cluster.GetGeneration(),
})
return patchClusterStatus()
} else {
meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing)
}
pgHBAs := postgres.NewHBAs()
pgmonitor.PostgreSQLHBAs(cluster, &pgHBAs)
pgbouncer.PostgreSQL(cluster, &pgHBAs)
pgParameters := postgres.NewParameters()
pgaudit.PostgreSQLParameters(&pgParameters)
pgbackrest.PostgreSQL(cluster, &pgParameters)
pgmonitor.PostgreSQLParameters(cluster, &pgParameters)
// Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off"
postgres.SetHugePages(cluster, &pgParameters)
if err == nil {
rootCA, err = r.reconcileRootCertificate(ctx, cluster)
}
if err == nil {
// Since any existing data directories must be moved prior to bootstrapping the
// cluster, further reconciliation will not occur until the directory move Jobs
// (if configured) have completed. Func reconcileDirMoveJobs() will therefore
// return a bool indicating that the controller should return early while any
// required Jobs are running, after which it will indicate that an early
// return is no longer needed, and reconciliation can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDirMoveJobs(ctx, cluster)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster)
}
if err == nil {
clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes)
}
if err == nil {
instances, err = r.observeInstances(ctx, cluster)
}
if err == nil {
err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances))
}
if err == nil {
err = r.reconcilePatroniSwitchover(ctx, cluster, instances)
}
// reconcile the Pod service before reconciling any data source in case it is necessary
// to start Pods during data source reconciliation that require network connections (e.g.
// if it is necessary to start a dedicated repo host to bootstrap a new cluster using its
// own existing backups).
if err == nil {
clusterPodService, err = r.reconcileClusterPodService(ctx, cluster)
}
// reconcile the RBAC resources before reconciling any data source in case
// restore/move Job pods require the ServiceAccount to access any data source.
// e.g., we are restoring from an S3 source using an IAM for access
// - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html
if err == nil {
instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster)
}
// First handle reconciling any data source configured for the PostgresCluster. This includes
// reconciling the data source defined to bootstrap a new cluster, as well as a reconciling
// a data source to perform restore in-place and re-bootstrap the cluster.
if err == nil {
// Since the PostgreSQL data source needs to be populated prior to bootstrapping the
// cluster, further reconciliation will not occur until the data source (if configured) is
// initialized. Func reconcileDataSource() will therefore return a bool indicating that
// the controller should return early while data initialization is in progress, after
// which it will indicate that an early return is no longer needed, and reconciliation
// can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterConfigMap, err = r.reconcileClusterConfigMap(ctx, cluster, pgHBAs, pgParameters)
}
if err == nil {
clusterReplicationSecret, err = r.reconcileReplicationSecret(ctx, cluster, rootCA)
}
if err == nil {
patroniLeaderService, err = r.reconcilePatroniLeaderLease(ctx, cluster)
}
if err == nil {
primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService)
}
if err == nil {
replicaService, err = r.reconcileClusterReplicaService(ctx, cluster)
}
if err == nil {
primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService)
}
if err == nil {
err = r.reconcilePatroniDistributedConfiguration(ctx, cluster)
}
if err == nil {
err = r.reconcilePatroniDynamicConfiguration(ctx, cluster, instances, pgHBAs, pgParameters)
}
if err == nil {
| monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster)
}
| conditional_block |
|
controller.go | // ControllerName is the name of the PostgresCluster controller
ControllerName = "postgrescluster-controller"
)
// Reconciler holds resources for the PostgresCluster reconciler
type Reconciler struct {
Client client.Client
Owner client.FieldOwner
Recorder record.EventRecorder
Tracer trace.Tracer
IsOpenShift bool
PodExec func(
namespace, pod, container string,
stdin io.Reader, stdout, stderr io.Writer, command ...string,
) error
}
// +kubebuilder:rbac:groups="",resources="events",verbs={create,patch}
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters",verbs={get,list,watch}
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/status",verbs={patch}
// Reconcile reconciles a ConfigMap in a namespace managed by the PostgreSQL Operator
func (r *Reconciler) Reconcile(
ctx context.Context, request reconcile.Request) (reconcile.Result, error,
) {
ctx, span := r.Tracer.Start(ctx, "Reconcile")
log := logging.FromContext(ctx)
defer span.End()
// create the result that will be updated following a call to each reconciler
result := reconcile.Result{}
updateResult := func(next reconcile.Result, err error) error {
if err == nil {
result = updateReconcileResult(result, next)
}
return err
}
// get the postgrescluster from the cache
cluster := &v1beta1.PostgresCluster{}
if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil {
// NotFound cannot be fixed by requeuing so ignore it. During background
// deletion, we receive delete events from cluster's dependents after
// cluster is deleted.
if err = client.IgnoreNotFound(err); err != nil {
log.Error(err, "unable to fetch PostgresCluster")
span.RecordError(err)
}
return result, err
}
// Set any defaults that may not have been stored in the API. No DeepCopy
// is necessary because controller-runtime makes a copy before returning
// from its cache.
cluster.Default()
if cluster.Spec.OpenShift == nil {
cluster.Spec.OpenShift = &r.IsOpenShift
}
// Keep a copy of cluster prior to any manipulations.
before := cluster.DeepCopy()
// NOTE(cbandy): When a namespace is deleted, objects owned by a
// PostgresCluster may be deleted before the PostgresCluster is deleted.
// When this happens, any attempt to reconcile those objects is rejected
// as Forbidden: "unable to create new content in namespace … because it is
// being terminated".
// Check for and handle deletion of cluster. Return early if it is being
// deleted or there was an error.
if result, err := r.handleDelete(ctx, cluster); err != nil {
span.RecordError(err)
log.Error(err, "deleting")
return reconcile.Result{}, err
} else if result != nil {
if log := log.V(1); log.Enabled() {
if result.RequeueAfter > 0 {
// RequeueAfter implies Requeue, but set both to make the next
// log message more clear.
result.Requeue = true
}
log.Info("deleting", "result", fmt.Sprintf("%+v", *result))
}
return *result, nil
}
// Perform initial validation on a cluster
// TODO: Move this to a defaulting (mutating admission) webhook
// to leverage regular validation.
// verify all needed image values are defined
if err := config.VerifyImageValues(cluster); err != nil {
// warning event with missing image information
r.Recorder.Event(cluster, corev1.EventTypeWarning, "MissingRequiredImage",
err.Error())
// specifically allow reconciliation if the cluster is shutdown to
// facilitate upgrades, otherwise return
if cluster.Spec.Shutdown == nil ||
(cluster.Spec.Shutdown != nil && !*cluster.Spec.Shutdown) {
return result, err
}
}
if cluster.Spec.Standby != nil &&
cluster.Spec.Standby.Enabled &&
cluster.Spec.Standby.Host == "" &&
cluster.Spec.Standby.RepoName == "" {
// When a standby cluster is requested but a repoName or host is not provided
// the cluster will be created as a non-standby. Reject any clusters with
// this configuration and provide an event
path := field.NewPath("spec", "standby")
err := field.Invalid(path, cluster.Name, "Standby requires a host or repoName to be enabled")
r.Recorder.Event(cluster, corev1.EventTypeWarning, "InvalidStandbyConfiguration",
err.Error())
return result, err
}
var (
clusterConfigMap *corev1.ConfigMap
clusterReplicationSecret *corev1.Secret
clusterPodService *corev1.Service
clusterVolumes []corev1.PersistentVolumeClaim
instanceServiceAccount *corev1.ServiceAccount
instances *observedInstances
patroniLeaderService *corev1.Service
primaryCertificate *corev1.SecretProjection
primaryService *corev1.Service
replicaService *corev1.Service
rootCA *pki.RootCertificateAuthority
monitoringSecret *corev1.Secret
exporterWebConfig *corev1.ConfigMap
err error
)
// Define the function for the updating the PostgresCluster status. Returns any error that
// occurs while attempting to patch the status, while otherwise simply returning the
// Result and error variables that are populated while reconciling the PostgresCluster.
patchClusterStatus := func() (reconcile.Result, error) {
if !equality.Semantic.DeepEqual(before.Status, cluster.Status) {
// NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track
// managed fields on the status subresource: https://issue.k8s.io/88901
if err := errors.WithStack(r.Client.Status().Patch(
ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil {
log.Error(err, "patching cluster status")
return result, err
}
log.V(1).Info("patched cluster status")
}
return result, err
}
// if the cluster is paused, set a condition and return
if cluster.Spec.Paused != nil && *cluster.Spec.Paused {
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: v1beta1.PostgresClusterProgressing,
Status: metav1.ConditionFalse,
Reason: "Paused",
Message: "No spec changes will be applied and no other statuses will be updated.",
ObservedGeneration: cluster.GetGeneration(),
})
return patchClusterStatus()
} else {
meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing)
}
pgHBAs := postgres.NewHBAs()
pgmonitor.PostgreSQLHBAs(cluster, &pgHBAs)
pgbouncer.PostgreSQL(cluster, &pgHBAs)
pgParameters := postgres.NewParameters()
pgaudit.PostgreSQLParameters(&pgParameters)
pgbackrest.PostgreSQL(cluster, &pgParameters)
pgmonitor.PostgreSQLParameters(cluster, &pgParameters)
// Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off"
postgres.SetHugePages(cluster, &pgParameters)
if err == nil {
rootCA, err = r.reconcileRootCertificate(ctx, cluster)
}
if err == nil {
// Since any existing data directories must be moved prior to bootstrapping the
// cluster, further reconciliation will not occur until the directory move Jobs
// (if configured) have completed. Func reconcileDirMoveJobs() will therefore
// return a bool indicating that the controller should return early while any
// required Jobs are running, after which it will indicate that an early
// return is no longer needed, and reconciliation can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDirMoveJobs(ctx, cluster)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster)
}
if err == nil {
clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes)
}
if err == nil {
instances, err = r.observeInstances(ctx, cluster)
}
if err == nil {
err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances))
}
if err == nil {
err = r.reconcilePatroniSwitchover(ctx, cluster, instances)
}
// reconcile the Pod service before reconciling any data source in case it is necessary
// to start Pods during data source reconciliation that require network connections (e.g.
// if it is necessary to start a dedicated repo host to bootstrap a new cluster |
const ( | random_line_split |
|
controller.go | clusterConfigMap *corev1.ConfigMap
clusterReplicationSecret *corev1.Secret
clusterPodService *corev1.Service
clusterVolumes []corev1.PersistentVolumeClaim
instanceServiceAccount *corev1.ServiceAccount
instances *observedInstances
patroniLeaderService *corev1.Service
primaryCertificate *corev1.SecretProjection
primaryService *corev1.Service
replicaService *corev1.Service
rootCA *pki.RootCertificateAuthority
monitoringSecret *corev1.Secret
exporterWebConfig *corev1.ConfigMap
err error
)
// Define the function for the updating the PostgresCluster status. Returns any error that
// occurs while attempting to patch the status, while otherwise simply returning the
// Result and error variables that are populated while reconciling the PostgresCluster.
patchClusterStatus := func() (reconcile.Result, error) {
if !equality.Semantic.DeepEqual(before.Status, cluster.Status) {
// NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track
// managed fields on the status subresource: https://issue.k8s.io/88901
if err := errors.WithStack(r.Client.Status().Patch(
ctx, cluster, client.MergeFrom(before), r.Owner)); err != nil {
log.Error(err, "patching cluster status")
return result, err
}
log.V(1).Info("patched cluster status")
}
return result, err
}
// if the cluster is paused, set a condition and return
if cluster.Spec.Paused != nil && *cluster.Spec.Paused {
meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
Type: v1beta1.PostgresClusterProgressing,
Status: metav1.ConditionFalse,
Reason: "Paused",
Message: "No spec changes will be applied and no other statuses will be updated.",
ObservedGeneration: cluster.GetGeneration(),
})
return patchClusterStatus()
} else {
meta.RemoveStatusCondition(&cluster.Status.Conditions, v1beta1.PostgresClusterProgressing)
}
pgHBAs := postgres.NewHBAs()
pgmonitor.PostgreSQLHBAs(cluster, &pgHBAs)
pgbouncer.PostgreSQL(cluster, &pgHBAs)
pgParameters := postgres.NewParameters()
pgaudit.PostgreSQLParameters(&pgParameters)
pgbackrest.PostgreSQL(cluster, &pgParameters)
pgmonitor.PostgreSQLParameters(cluster, &pgParameters)
// Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off"
postgres.SetHugePages(cluster, &pgParameters)
if err == nil {
rootCA, err = r.reconcileRootCertificate(ctx, cluster)
}
if err == nil {
// Since any existing data directories must be moved prior to bootstrapping the
// cluster, further reconciliation will not occur until the directory move Jobs
// (if configured) have completed. Func reconcileDirMoveJobs() will therefore
// return a bool indicating that the controller should return early while any
// required Jobs are running, after which it will indicate that an early
// return is no longer needed, and reconciliation can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDirMoveJobs(ctx, cluster)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster)
}
if err == nil {
clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes)
}
if err == nil {
instances, err = r.observeInstances(ctx, cluster)
}
if err == nil {
err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances))
}
if err == nil {
err = r.reconcilePatroniSwitchover(ctx, cluster, instances)
}
// reconcile the Pod service before reconciling any data source in case it is necessary
// to start Pods during data source reconciliation that require network connections (e.g.
// if it is necessary to start a dedicated repo host to bootstrap a new cluster using its
// own existing backups).
if err == nil {
clusterPodService, err = r.reconcileClusterPodService(ctx, cluster)
}
// reconcile the RBAC resources before reconciling any data source in case
// restore/move Job pods require the ServiceAccount to access any data source.
// e.g., we are restoring from an S3 source using an IAM for access
// - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html
if err == nil {
instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster)
}
// First handle reconciling any data source configured for the PostgresCluster. This includes
// reconciling the data source defined to bootstrap a new cluster, as well as a reconciling
// a data source to perform restore in-place and re-bootstrap the cluster.
if err == nil {
// Since the PostgreSQL data source needs to be populated prior to bootstrapping the
// cluster, further reconciliation will not occur until the data source (if configured) is
// initialized. Func reconcileDataSource() will therefore return a bool indicating that
// the controller should return early while data initialization is in progress, after
// which it will indicate that an early return is no longer needed, and reconciliation
// can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterConfigMap, err = r.reconcileClusterConfigMap(ctx, cluster, pgHBAs, pgParameters)
}
if err == nil {
clusterReplicationSecret, err = r.reconcileReplicationSecret(ctx, cluster, rootCA)
}
if err == nil {
patroniLeaderService, err = r.reconcilePatroniLeaderLease(ctx, cluster)
}
if err == nil {
primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService)
}
if err == nil {
replicaService, err = r.reconcileClusterReplicaService(ctx, cluster)
}
if err == nil {
primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService)
}
if err == nil {
err = r.reconcilePatroniDistributedConfiguration(ctx, cluster)
}
if err == nil {
err = r.reconcilePatroniDynamicConfiguration(ctx, cluster, instances, pgHBAs, pgParameters)
}
if err == nil {
monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster)
}
if err == nil {
exporterWebConfig, err = r.reconcileExporterWebConfig(ctx, cluster)
}
if err == nil {
err = r.reconcileInstanceSets(
ctx, cluster, clusterConfigMap, clusterReplicationSecret,
rootCA, clusterPodService, instanceServiceAccount, instances,
patroniLeaderService, primaryCertificate, clusterVolumes, exporterWebConfig)
}
if err == nil {
err = r.reconcilePostgresDatabases(ctx, cluster, instances)
}
if err == nil {
err = r.reconcilePostgresUsers(ctx, cluster, instances)
}
if err == nil {
err = updateResult(r.reconcilePGBackRest(ctx, cluster, instances, rootCA))
}
if err == nil {
err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA)
}
if err == nil {
err = r.reconcilePGMonitor(ctx, cluster, instances, monitoringSecret)
}
if err == nil {
err = r.reconcileDatabaseInitSQL(ctx, cluster, instances)
}
if err == nil {
err = r.reconcilePGAdmin(ctx, cluster)
}
if err == nil {
// This is after [Reconciler.rolloutInstances] to ensure that recreating
// Pods takes precedence.
err = r.handlePatroniRestarts(ctx, cluster, instances)
}
// at this point everything reconciled successfully, and we can update the
// observedGeneration
cluster.Status.ObservedGeneration = cluster.GetGeneration()
log.V(1).Info("reconciled cluster")
return patchClusterStatus()
}
// deleteControlled safely deletes object when it is controlled by cluster.
func (r *Reconciler) deleteControlled(
ctx context.Context, cluster *v1beta1.PostgresCluster, object client.Object,
) error {
| if metav1.IsControlledBy(object, cluster) {
uid := object.GetUID()
version := object.GetResourceVersion()
exactly := client.Preconditions{UID: &uid, ResourceVersion: &version}
return r.Client.Delete(ctx, object, exactly)
}
return nil
}
| identifier_body |
|
controller.go | (ctx, cluster)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterVolumes, err = r.observePersistentVolumeClaims(ctx, cluster)
}
if err == nil {
clusterVolumes, err = r.configureExistingPVCs(ctx, cluster, clusterVolumes)
}
if err == nil {
instances, err = r.observeInstances(ctx, cluster)
}
if err == nil {
err = updateResult(r.reconcilePatroniStatus(ctx, cluster, instances))
}
if err == nil {
err = r.reconcilePatroniSwitchover(ctx, cluster, instances)
}
// reconcile the Pod service before reconciling any data source in case it is necessary
// to start Pods during data source reconciliation that require network connections (e.g.
// if it is necessary to start a dedicated repo host to bootstrap a new cluster using its
// own existing backups).
if err == nil {
clusterPodService, err = r.reconcileClusterPodService(ctx, cluster)
}
// reconcile the RBAC resources before reconciling any data source in case
// restore/move Job pods require the ServiceAccount to access any data source.
// e.g., we are restoring from an S3 source using an IAM for access
// - https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html
if err == nil {
instanceServiceAccount, err = r.reconcileRBACResources(ctx, cluster)
}
// First handle reconciling any data source configured for the PostgresCluster. This includes
// reconciling the data source defined to bootstrap a new cluster, as well as a reconciling
// a data source to perform restore in-place and re-bootstrap the cluster.
if err == nil {
// Since the PostgreSQL data source needs to be populated prior to bootstrapping the
// cluster, further reconciliation will not occur until the data source (if configured) is
// initialized. Func reconcileDataSource() will therefore return a bool indicating that
// the controller should return early while data initialization is in progress, after
// which it will indicate that an early return is no longer needed, and reconciliation
// can proceed normally.
var returnEarly bool
returnEarly, err = r.reconcileDataSource(ctx, cluster, instances, clusterVolumes, rootCA)
if err != nil || returnEarly {
return patchClusterStatus()
}
}
if err == nil {
clusterConfigMap, err = r.reconcileClusterConfigMap(ctx, cluster, pgHBAs, pgParameters)
}
if err == nil {
clusterReplicationSecret, err = r.reconcileReplicationSecret(ctx, cluster, rootCA)
}
if err == nil {
patroniLeaderService, err = r.reconcilePatroniLeaderLease(ctx, cluster)
}
if err == nil {
primaryService, err = r.reconcileClusterPrimaryService(ctx, cluster, patroniLeaderService)
}
if err == nil {
replicaService, err = r.reconcileClusterReplicaService(ctx, cluster)
}
if err == nil {
primaryCertificate, err = r.reconcileClusterCertificate(ctx, rootCA, cluster, primaryService, replicaService)
}
if err == nil {
err = r.reconcilePatroniDistributedConfiguration(ctx, cluster)
}
if err == nil {
err = r.reconcilePatroniDynamicConfiguration(ctx, cluster, instances, pgHBAs, pgParameters)
}
if err == nil {
monitoringSecret, err = r.reconcileMonitoringSecret(ctx, cluster)
}
if err == nil {
exporterWebConfig, err = r.reconcileExporterWebConfig(ctx, cluster)
}
if err == nil {
err = r.reconcileInstanceSets(
ctx, cluster, clusterConfigMap, clusterReplicationSecret,
rootCA, clusterPodService, instanceServiceAccount, instances,
patroniLeaderService, primaryCertificate, clusterVolumes, exporterWebConfig)
}
if err == nil {
err = r.reconcilePostgresDatabases(ctx, cluster, instances)
}
if err == nil {
err = r.reconcilePostgresUsers(ctx, cluster, instances)
}
if err == nil {
err = updateResult(r.reconcilePGBackRest(ctx, cluster, instances, rootCA))
}
if err == nil {
err = r.reconcilePGBouncer(ctx, cluster, instances, primaryCertificate, rootCA)
}
if err == nil {
err = r.reconcilePGMonitor(ctx, cluster, instances, monitoringSecret)
}
if err == nil {
err = r.reconcileDatabaseInitSQL(ctx, cluster, instances)
}
if err == nil {
err = r.reconcilePGAdmin(ctx, cluster)
}
if err == nil {
// This is after [Reconciler.rolloutInstances] to ensure that recreating
// Pods takes precedence.
err = r.handlePatroniRestarts(ctx, cluster, instances)
}
// at this point everything reconciled successfully, and we can update the
// observedGeneration
cluster.Status.ObservedGeneration = cluster.GetGeneration()
log.V(1).Info("reconciled cluster")
return patchClusterStatus()
}
// deleteControlled safely deletes object when it is controlled by cluster.
func (r *Reconciler) deleteControlled(
ctx context.Context, cluster *v1beta1.PostgresCluster, object client.Object,
) error {
if metav1.IsControlledBy(object, cluster) {
uid := object.GetUID()
version := object.GetResourceVersion()
exactly := client.Preconditions{UID: &uid, ResourceVersion: &version}
return r.Client.Delete(ctx, object, exactly)
}
return nil
}
// patch sends patch to object's endpoint in the Kubernetes API and updates
// object with any returned content. The fieldManager is set to r.Owner, but
// can be overridden in options.
// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers
func (r *Reconciler) patch(
ctx context.Context, object client.Object,
patch client.Patch, options ...client.PatchOption,
) error {
options = append([]client.PatchOption{r.Owner}, options...)
return r.Client.Patch(ctx, object, patch, options...)
}
// The owner reference created by controllerutil.SetControllerReference blocks
// deletion. The OwnerReferencesPermissionEnforcement plugin requires that the
// creator of such a reference have either "delete" permission on the owner or
// "update" permission on the owner's "finalizers" subresource.
// - https://docs.k8s.io/reference/access-authn-authz/admission-controllers/
// +kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="postgresclusters/finalizers",verbs={update}
// setControllerReference sets owner as a Controller OwnerReference on controlled.
// Only one OwnerReference can be a controller, so it returns an error if another
// is already set.
func (r *Reconciler) setControllerReference(
owner *v1beta1.PostgresCluster, controlled client.Object,
) error {
return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme())
}
// setOwnerReference sets an OwnerReference on the object without setting the
// owner as a controller. This allows for multiple OwnerReferences on an object.
func (r *Reconciler) setOwnerReference(
owner *v1beta1.PostgresCluster, controlled client.Object,
) error {
return controllerutil.SetOwnerReference(owner, controlled, r.Client.Scheme())
}
// +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="endpoints",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="persistentvolumeclaims",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="secrets",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="services",verbs={get,list,watch}
// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={get,list,watch}
// +kubebuilder:rbac:groups="apps",resources="deployments",verbs={get,list,watch}
// +kubebuilder:rbac:groups="apps",resources="statefulsets",verbs={get,list,watch}
// +kubebuilder:rbac:groups="batch",resources="jobs",verbs={get,list,watch}
// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="roles",verbs={get,list,watch}
// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources="rolebindings",verbs={get,list,watch}
// +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch}
// +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch}
// SetupWithManager adds the PostgresCluster controller to the provided runtime manager
func (r *Reconciler) Se | tupWithManager(m | identifier_name |
|
TimelineFlameChartNetworkDataProvider.ts | () {
this.#minimumBoundaryInternal = 0;
this.#timeSpan = 0;
this.#events = [];
this.#maxLevel = 0;
this.#networkTrackAppender = null;
this.#traceEngineData = null;
}
setModel(traceEngineData: TraceEngine.Handlers.Migration.PartialTraceData|null): void {
this.#timelineDataInternal = null;
this.#traceEngineData = traceEngineData;
this.#events = traceEngineData?.NetworkRequests.byTime || [];
if (this.#traceEngineData) {
this.#setTimingBoundsData(this.#traceEngineData);
}
}
isEmpty(): boolean {
this.timelineData();
return !this.#events.length;
}
maxStackDepth(): number {
return this.#maxLevel;
}
timelineData(): PerfUI.FlameChart.FlameChartTimelineData {
if (this.#timelineDataInternal && this.#timelineDataInternal.entryLevels.length !== 0) {
// The flame chart data is built already, so return the cached data.
return this.#timelineDataInternal;
}
this.#timelineDataInternal = PerfUI.FlameChart.FlameChartTimelineData.createEmpty();
if (!this.#traceEngineData) {
return this.#timelineDataInternal;
}
this.#events = this.#traceEngineData.NetworkRequests.byTime;
this.#networkTrackAppender = new NetworkTrackAppender(this.#traceEngineData, this.#timelineDataInternal);
this.#maxLevel = this.#networkTrackAppender.appendTrackAtLevel(0);
return this.#timelineDataInternal;
}
minimumBoundary(): number {
return this.#minimumBoundaryInternal;
}
totalTime(): number {
return this.#timeSpan;
}
setWindowTimes(startTime: number, endTime: number): void {
this.#updateTimelineData(startTime, endTime);
}
createSelection(index: number): TimelineSelection|null {
if (index === -1) {
return null;
}
const event = this.#events[index];
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(event), index);
return this.#lastSelection.timelineSelection;
}
entryIndexForSelection(selection: TimelineSelection|null): number {
if (!selection) {
return -1;
}
if (this.#lastSelection && this.#lastSelection.timelineSelection.object === selection.object) {
return this.#lastSelection.entryIndex;
}
if (!TimelineSelection.isSyntheticNetworkRequestDetailsEventSelection(selection.object)) {
return -1;
}
const index = this.#events.indexOf(selection.object);
if (index !== -1) {
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(selection.object), index);
}
return index;
}
entryColor(index: number): string {
if (!this.#networkTrackAppender) {
throw new Error('networkTrackAppender should not be empty');
}
return this.#networkTrackAppender.colorForEvent(this.#events[index]);
}
textColor(_index: number): string {
return FlameChartStyle.textColor;
}
entryTitle(index: number): string|null {
const event = this.#events[index];
const parsedURL = new Common.ParsedURL.ParsedURL(event.args.data.url);
return parsedURL.isValid ? `${parsedURL.displayName} (${parsedURL.host})` : event.args.data.url || null;
}
entryFont(_index: number): string|null {
return this.#networkTrackAppender?.font() || null;
}
/**
* Returns the pixels needed to decorate the event.
* The pixels compare to the start of the earliest event of the request.
*
* Request.beginTime(), which is used in FlameChart to calculate the unclippedBarX
* v
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param request
* @param unclippedBarX The start pixel of the request. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns the pixels to draw waiting time and left and right whiskers and url text
*/
getDecorationPixels(
event: TraceEngine.Types.TraceEvents.TraceEventSyntheticNetworkRequest, unclippedBarX: number,
timeToPixelRatio: number): {sendStart: number, headersEnd: number, finish: number, start: number, end: number} {
const beginTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const timeToPixel = (time: number): number => Math.floor(unclippedBarX + (time - beginTime) * timeToPixelRatio);
const minBarWidthPx = 2;
const startTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const endTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(
(event.ts + event.dur) as TraceEngine.Types.Timing.MicroSeconds);
const sendStartTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.sendStartTime);
const headersEndTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.downloadStart);
const sendStart = Math.max(timeToPixel(sendStartTime), unclippedBarX);
const headersEnd = Math.max(timeToPixel(headersEndTime), sendStart);
const finish = Math.max(
timeToPixel(TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.finishTime)),
headersEnd + minBarWidthPx);
const start = timeToPixel(startTime);
const end = Math.max(timeToPixel(endTime), finish);
return {sendStart, headersEnd, finish, start, end};
}
/**
* Decorates the entry:
* Draw a waiting time between |sendStart| and |headersEnd|
* By adding a extra transparent white layer
* Draw a whisk between |start| and |sendStart|
* Draw a whisk between |finish| and |end|
* By draw another layer of background color to "clear" the area
* Then draw the whisk
* Draw the URL after the |sendStart|
*
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param index
* @param context
* @param barX The x pixel of the visible part request
* @param barY The y pixel of the visible part request
* @param barWidth The width of the visible part request
* @param barHeight The height of the visible part request
* @param unclippedBarX The start pixel of the request compare to the visible area. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns if the entry needs to be decorate, which is alway true if the request has "timing" field
*/
decorateEntry(
index: number, context: CanvasRenderingContext2D, _text: string|null, barX: number, barY: number,
barWidth: number, barHeight: number, unclippedBarX: number, timeToPixelRatio: number): boolean {
const event = this.#events[index];
const {sendStart, headersEnd, finish, start, end} =
this.getDecorationPixels(event, unclippedBarX, timeToPixelRatio);
// Draw waiting time.
context.fillStyle = 'hsla(0, 100%, 100%, 0.8)';
context.fillRect(sendStart + 0.5, barY + 0.5, headersEnd - sendStart - 0.5, barHeight - 2);
// Clear portions of initial rect to prepare for the ticks.
context.fillStyle = ThemeSupport.ThemeSupport.instance().getComputedValue('--color-background');
context.fillRect(barX, barY - 0.5, sendStart - barX, barHeight);
context.fillRect(finish, barY - 0.5, barX + barWidth - finish, barHeight);
// Draws left and right whiskers
function drawTick(begin: number, end: number, y: number): void {
const /** @const */ tickHeightPx = 6;
context.moveTo(begin, y - tickHeightPx / 2);
context.lineTo(begin, y + tickHeightPx / 2);
context.moveTo(begin, y);
context.lineTo(end, y);
}
context.beginPath();
context.lineWidth = 1;
context.strokeStyle = '#ccc';
const lineY = Math.floor(barY + barHeight / 2) + 0.5;
const leftTick = start + 0.5;
const rightTick = end - 0.5;
drawTick(leftTick, sendStart, lineY);
drawTick(rightTick, finish, lineY);
context.stroke();
const color = this.#colorForPriority(event.args.data.priority);
if (color) {
context.fillStyle = color;
context.fillRect(sendStart + 0.5, barY + 0.5, 3.5, 3.5);
}
// Draw request URL as text
const textStart = Math.max(sendStart, 0);
const textWidth = finish - textStart;
| constructor | identifier_name |
|
TimelineFlameChartNetworkDataProvider.ts | this.timelineData();
return !this.#events.length;
}
maxStackDepth(): number {
return this.#maxLevel;
}
timelineData(): PerfUI.FlameChart.FlameChartTimelineData {
if (this.#timelineDataInternal && this.#timelineDataInternal.entryLevels.length !== 0) {
// The flame chart data is built already, so return the cached data.
return this.#timelineDataInternal;
}
this.#timelineDataInternal = PerfUI.FlameChart.FlameChartTimelineData.createEmpty();
if (!this.#traceEngineData) {
return this.#timelineDataInternal;
}
this.#events = this.#traceEngineData.NetworkRequests.byTime;
this.#networkTrackAppender = new NetworkTrackAppender(this.#traceEngineData, this.#timelineDataInternal);
this.#maxLevel = this.#networkTrackAppender.appendTrackAtLevel(0);
return this.#timelineDataInternal;
}
minimumBoundary(): number {
return this.#minimumBoundaryInternal;
}
totalTime(): number {
return this.#timeSpan;
}
setWindowTimes(startTime: number, endTime: number): void {
this.#updateTimelineData(startTime, endTime);
}
createSelection(index: number): TimelineSelection|null {
if (index === -1) {
return null;
}
const event = this.#events[index];
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(event), index);
return this.#lastSelection.timelineSelection;
}
entryIndexForSelection(selection: TimelineSelection|null): number {
if (!selection) {
return -1;
}
if (this.#lastSelection && this.#lastSelection.timelineSelection.object === selection.object) {
return this.#lastSelection.entryIndex;
}
if (!TimelineSelection.isSyntheticNetworkRequestDetailsEventSelection(selection.object)) {
return -1;
}
const index = this.#events.indexOf(selection.object);
if (index !== -1) {
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(selection.object), index);
}
return index;
}
entryColor(index: number): string {
if (!this.#networkTrackAppender) {
throw new Error('networkTrackAppender should not be empty');
}
return this.#networkTrackAppender.colorForEvent(this.#events[index]);
}
textColor(_index: number): string {
return FlameChartStyle.textColor;
}
entryTitle(index: number): string|null {
const event = this.#events[index];
const parsedURL = new Common.ParsedURL.ParsedURL(event.args.data.url);
return parsedURL.isValid ? `${parsedURL.displayName} (${parsedURL.host})` : event.args.data.url || null;
}
entryFont(_index: number): string|null {
return this.#networkTrackAppender?.font() || null;
}
/**
* Returns the pixels needed to decorate the event.
* The pixels compare to the start of the earliest event of the request.
*
* Request.beginTime(), which is used in FlameChart to calculate the unclippedBarX
* v
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param request
* @param unclippedBarX The start pixel of the request. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns the pixels to draw waiting time and left and right whiskers and url text
*/
getDecorationPixels(
event: TraceEngine.Types.TraceEvents.TraceEventSyntheticNetworkRequest, unclippedBarX: number,
timeToPixelRatio: number): {sendStart: number, headersEnd: number, finish: number, start: number, end: number} {
const beginTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const timeToPixel = (time: number): number => Math.floor(unclippedBarX + (time - beginTime) * timeToPixelRatio);
const minBarWidthPx = 2;
const startTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const endTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(
(event.ts + event.dur) as TraceEngine.Types.Timing.MicroSeconds);
const sendStartTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.sendStartTime);
const headersEndTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.downloadStart);
const sendStart = Math.max(timeToPixel(sendStartTime), unclippedBarX);
const headersEnd = Math.max(timeToPixel(headersEndTime), sendStart);
const finish = Math.max(
timeToPixel(TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.finishTime)),
headersEnd + minBarWidthPx);
const start = timeToPixel(startTime); | const end = Math.max(timeToPixel(endTime), finish);
return {sendStart, headersEnd, finish, start, end};
}
/**
* Decorates the entry:
* Draw a waiting time between |sendStart| and |headersEnd|
* By adding a extra transparent white layer
* Draw a whisk between |start| and |sendStart|
* Draw a whisk between |finish| and |end|
* By draw another layer of background color to "clear" the area
* Then draw the whisk
* Draw the URL after the |sendStart|
*
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param index
* @param context
* @param barX The x pixel of the visible part request
* @param barY The y pixel of the visible part request
* @param barWidth The width of the visible part request
* @param barHeight The height of the visible part request
* @param unclippedBarX The start pixel of the request compare to the visible area. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns if the entry needs to be decorate, which is alway true if the request has "timing" field
*/
decorateEntry(
index: number, context: CanvasRenderingContext2D, _text: string|null, barX: number, barY: number,
barWidth: number, barHeight: number, unclippedBarX: number, timeToPixelRatio: number): boolean {
const event = this.#events[index];
const {sendStart, headersEnd, finish, start, end} =
this.getDecorationPixels(event, unclippedBarX, timeToPixelRatio);
// Draw waiting time.
context.fillStyle = 'hsla(0, 100%, 100%, 0.8)';
context.fillRect(sendStart + 0.5, barY + 0.5, headersEnd - sendStart - 0.5, barHeight - 2);
// Clear portions of initial rect to prepare for the ticks.
context.fillStyle = ThemeSupport.ThemeSupport.instance().getComputedValue('--color-background');
context.fillRect(barX, barY - 0.5, sendStart - barX, barHeight);
context.fillRect(finish, barY - 0.5, barX + barWidth - finish, barHeight);
// Draws left and right whiskers
function drawTick(begin: number, end: number, y: number): void {
const /** @const */ tickHeightPx = 6;
context.moveTo(begin, y - tickHeightPx / 2);
context.lineTo(begin, y + tickHeightPx / 2);
context.moveTo(begin, y);
context.lineTo(end, y);
}
context.beginPath();
context.lineWidth = 1;
context.strokeStyle = '#ccc';
const lineY = Math.floor(barY + barHeight / 2) + 0.5;
const leftTick = start + 0.5;
const rightTick = end - 0.5;
drawTick(leftTick, sendStart, lineY);
drawTick(rightTick, finish, lineY);
context.stroke();
const color = this.#colorForPriority(event.args.data.priority);
if (color) {
context.fillStyle = color;
context.fillRect(sendStart + 0.5, barY + 0.5, 3.5, 3.5);
}
// Draw request URL as text
const textStart = Math.max(sendStart, 0);
const textWidth = finish - textStart;
const /** @const */ minTextWidthPx = 20;
if (textWidth >= minTextWidthPx) {
let title = this.entryTitle(index) || '';
if (event.args.data.fromServiceWorker) {
title = '⚙ ' + title;
}
if (title) {
const /** @const */ textPadding = 4;
const /** @const */ textBaseline = 5;
const textBaseHeight = barHeight - textBaseline;
const trimmedText = UI.UIUtils.trimTextEnd(context, title, textWidth - 2 * textPadding);
context.fillStyle = '#333';
context.fillText(trimmedText, textStart + textPadding | random_line_split |
|
TimelineFlameChartNetworkDataProvider.ts | this.timelineData();
return !this.#events.length;
}
maxStackDepth(): number {
return this.#maxLevel;
}
timelineData(): PerfUI.FlameChart.FlameChartTimelineData {
if (this.#timelineDataInternal && this.#timelineDataInternal.entryLevels.length !== 0) {
// The flame chart data is built already, so return the cached data.
return this.#timelineDataInternal;
}
this.#timelineDataInternal = PerfUI.FlameChart.FlameChartTimelineData.createEmpty();
if (!this.#traceEngineData) {
return this.#timelineDataInternal;
}
this.#events = this.#traceEngineData.NetworkRequests.byTime;
this.#networkTrackAppender = new NetworkTrackAppender(this.#traceEngineData, this.#timelineDataInternal);
this.#maxLevel = this.#networkTrackAppender.appendTrackAtLevel(0);
return this.#timelineDataInternal;
}
minimumBoundary(): number {
return this.#minimumBoundaryInternal;
}
totalTime(): number {
return this.#timeSpan;
}
setWindowTimes(startTime: number, endTime: number): void {
this.#updateTimelineData(startTime, endTime);
}
createSelection(index: number): TimelineSelection|null {
if (index === -1) {
return null;
}
const event = this.#events[index];
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(event), index);
return this.#lastSelection.timelineSelection;
}
entryIndexForSelection(selection: TimelineSelection|null): number {
if (!selection) |
if (this.#lastSelection && this.#lastSelection.timelineSelection.object === selection.object) {
return this.#lastSelection.entryIndex;
}
if (!TimelineSelection.isSyntheticNetworkRequestDetailsEventSelection(selection.object)) {
return -1;
}
const index = this.#events.indexOf(selection.object);
if (index !== -1) {
this.#lastSelection = new Selection(TimelineSelection.fromTraceEvent(selection.object), index);
}
return index;
}
entryColor(index: number): string {
if (!this.#networkTrackAppender) {
throw new Error('networkTrackAppender should not be empty');
}
return this.#networkTrackAppender.colorForEvent(this.#events[index]);
}
textColor(_index: number): string {
return FlameChartStyle.textColor;
}
entryTitle(index: number): string|null {
const event = this.#events[index];
const parsedURL = new Common.ParsedURL.ParsedURL(event.args.data.url);
return parsedURL.isValid ? `${parsedURL.displayName} (${parsedURL.host})` : event.args.data.url || null;
}
entryFont(_index: number): string|null {
return this.#networkTrackAppender?.font() || null;
}
/**
* Returns the pixels needed to decorate the event.
* The pixels compare to the start of the earliest event of the request.
*
* Request.beginTime(), which is used in FlameChart to calculate the unclippedBarX
* v
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param request
* @param unclippedBarX The start pixel of the request. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns the pixels to draw waiting time and left and right whiskers and url text
*/
getDecorationPixels(
event: TraceEngine.Types.TraceEvents.TraceEventSyntheticNetworkRequest, unclippedBarX: number,
timeToPixelRatio: number): {sendStart: number, headersEnd: number, finish: number, start: number, end: number} {
const beginTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const timeToPixel = (time: number): number => Math.floor(unclippedBarX + (time - beginTime) * timeToPixelRatio);
const minBarWidthPx = 2;
const startTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const endTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(
(event.ts + event.dur) as TraceEngine.Types.Timing.MicroSeconds);
const sendStartTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.sendStartTime);
const headersEndTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.downloadStart);
const sendStart = Math.max(timeToPixel(sendStartTime), unclippedBarX);
const headersEnd = Math.max(timeToPixel(headersEndTime), sendStart);
const finish = Math.max(
timeToPixel(TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.finishTime)),
headersEnd + minBarWidthPx);
const start = timeToPixel(startTime);
const end = Math.max(timeToPixel(endTime), finish);
return {sendStart, headersEnd, finish, start, end};
}
/**
* Decorates the entry:
* Draw a waiting time between |sendStart| and |headersEnd|
* By adding a extra transparent white layer
* Draw a whisk between |start| and |sendStart|
* Draw a whisk between |finish| and |end|
* By draw another layer of background color to "clear" the area
* Then draw the whisk
* Draw the URL after the |sendStart|
*
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param index
* @param context
* @param barX The x pixel of the visible part request
* @param barY The y pixel of the visible part request
* @param barWidth The width of the visible part request
* @param barHeight The height of the visible part request
* @param unclippedBarX The start pixel of the request compare to the visible area. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns if the entry needs to be decorate, which is alway true if the request has "timing" field
*/
decorateEntry(
index: number, context: CanvasRenderingContext2D, _text: string|null, barX: number, barY: number,
barWidth: number, barHeight: number, unclippedBarX: number, timeToPixelRatio: number): boolean {
const event = this.#events[index];
const {sendStart, headersEnd, finish, start, end} =
this.getDecorationPixels(event, unclippedBarX, timeToPixelRatio);
// Draw waiting time.
context.fillStyle = 'hsla(0, 100%, 100%, 0.8)';
context.fillRect(sendStart + 0.5, barY + 0.5, headersEnd - sendStart - 0.5, barHeight - 2);
// Clear portions of initial rect to prepare for the ticks.
context.fillStyle = ThemeSupport.ThemeSupport.instance().getComputedValue('--color-background');
context.fillRect(barX, barY - 0.5, sendStart - barX, barHeight);
context.fillRect(finish, barY - 0.5, barX + barWidth - finish, barHeight);
// Draws left and right whiskers
function drawTick(begin: number, end: number, y: number): void {
const /** @const */ tickHeightPx = 6;
context.moveTo(begin, y - tickHeightPx / 2);
context.lineTo(begin, y + tickHeightPx / 2);
context.moveTo(begin, y);
context.lineTo(end, y);
}
context.beginPath();
context.lineWidth = 1;
context.strokeStyle = '#ccc';
const lineY = Math.floor(barY + barHeight / 2) + 0.5;
const leftTick = start + 0.5;
const rightTick = end - 0.5;
drawTick(leftTick, sendStart, lineY);
drawTick(rightTick, finish, lineY);
context.stroke();
const color = this.#colorForPriority(event.args.data.priority);
if (color) {
context.fillStyle = color;
context.fillRect(sendStart + 0.5, barY + 0.5, 3.5, 3.5);
}
// Draw request URL as text
const textStart = Math.max(sendStart, 0);
const textWidth = finish - textStart;
const /** @const */ minTextWidthPx = 20;
if (textWidth >= minTextWidthPx) {
let title = this.entryTitle(index) || '';
if (event.args.data.fromServiceWorker) {
title = '⚙ ' + title;
}
if (title) {
const /** @const */ textPadding = 4;
const /** @const */ textBaseline = 5;
const textBaseHeight = barHeight - textBaseline;
const trimmedText = UI.UIUtils.trimTextEnd(context, title, textWidth - 2 * textPadding);
context.fillStyle = '#333';
context.fillText(trimmedText, textStart + | {
return -1;
} | conditional_block |
TimelineFlameChartNetworkDataProvider.ts | Selection.fromTraceEvent(selection.object), index);
}
return index;
}
entryColor(index: number): string {
if (!this.#networkTrackAppender) {
throw new Error('networkTrackAppender should not be empty');
}
return this.#networkTrackAppender.colorForEvent(this.#events[index]);
}
textColor(_index: number): string {
return FlameChartStyle.textColor;
}
entryTitle(index: number): string|null {
const event = this.#events[index];
const parsedURL = new Common.ParsedURL.ParsedURL(event.args.data.url);
return parsedURL.isValid ? `${parsedURL.displayName} (${parsedURL.host})` : event.args.data.url || null;
}
entryFont(_index: number): string|null {
return this.#networkTrackAppender?.font() || null;
}
/**
* Returns the pixels needed to decorate the event.
* The pixels compare to the start of the earliest event of the request.
*
* Request.beginTime(), which is used in FlameChart to calculate the unclippedBarX
* v
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param request
* @param unclippedBarX The start pixel of the request. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns the pixels to draw waiting time and left and right whiskers and url text
*/
getDecorationPixels(
event: TraceEngine.Types.TraceEvents.TraceEventSyntheticNetworkRequest, unclippedBarX: number,
timeToPixelRatio: number): {sendStart: number, headersEnd: number, finish: number, start: number, end: number} {
const beginTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const timeToPixel = (time: number): number => Math.floor(unclippedBarX + (time - beginTime) * timeToPixelRatio);
const minBarWidthPx = 2;
const startTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const endTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(
(event.ts + event.dur) as TraceEngine.Types.Timing.MicroSeconds);
const sendStartTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.sendStartTime);
const headersEndTime =
TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.downloadStart);
const sendStart = Math.max(timeToPixel(sendStartTime), unclippedBarX);
const headersEnd = Math.max(timeToPixel(headersEndTime), sendStart);
const finish = Math.max(
timeToPixel(TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.args.data.syntheticData.finishTime)),
headersEnd + minBarWidthPx);
const start = timeToPixel(startTime);
const end = Math.max(timeToPixel(endTime), finish);
return {sendStart, headersEnd, finish, start, end};
}
/**
* Decorates the entry:
* Draw a waiting time between |sendStart| and |headersEnd|
* By adding a extra transparent white layer
* Draw a whisk between |start| and |sendStart|
* Draw a whisk between |finish| and |end|
* By draw another layer of background color to "clear" the area
* Then draw the whisk
* Draw the URL after the |sendStart|
*
* |----------------[ (URL text) waiting time | request ]--------|
* ^start ^sendStart ^headersEnd ^Finish ^end
* @param index
* @param context
* @param barX The x pixel of the visible part request
* @param barY The y pixel of the visible part request
* @param barWidth The width of the visible part request
* @param barHeight The height of the visible part request
* @param unclippedBarX The start pixel of the request compare to the visible area. It is calculated with request.beginTime() in FlameChart.
* @param timeToPixelRatio
* @returns if the entry needs to be decorate, which is alway true if the request has "timing" field
*/
decorateEntry(
index: number, context: CanvasRenderingContext2D, _text: string|null, barX: number, barY: number,
barWidth: number, barHeight: number, unclippedBarX: number, timeToPixelRatio: number): boolean {
const event = this.#events[index];
const {sendStart, headersEnd, finish, start, end} =
this.getDecorationPixels(event, unclippedBarX, timeToPixelRatio);
// Draw waiting time.
context.fillStyle = 'hsla(0, 100%, 100%, 0.8)';
context.fillRect(sendStart + 0.5, barY + 0.5, headersEnd - sendStart - 0.5, barHeight - 2);
// Clear portions of initial rect to prepare for the ticks.
context.fillStyle = ThemeSupport.ThemeSupport.instance().getComputedValue('--color-background');
context.fillRect(barX, barY - 0.5, sendStart - barX, barHeight);
context.fillRect(finish, barY - 0.5, barX + barWidth - finish, barHeight);
// Draws left and right whiskers
function drawTick(begin: number, end: number, y: number): void {
const /** @const */ tickHeightPx = 6;
context.moveTo(begin, y - tickHeightPx / 2);
context.lineTo(begin, y + tickHeightPx / 2);
context.moveTo(begin, y);
context.lineTo(end, y);
}
context.beginPath();
context.lineWidth = 1;
context.strokeStyle = '#ccc';
const lineY = Math.floor(barY + barHeight / 2) + 0.5;
const leftTick = start + 0.5;
const rightTick = end - 0.5;
drawTick(leftTick, sendStart, lineY);
drawTick(rightTick, finish, lineY);
context.stroke();
const color = this.#colorForPriority(event.args.data.priority);
if (color) {
context.fillStyle = color;
context.fillRect(sendStart + 0.5, barY + 0.5, 3.5, 3.5);
}
// Draw request URL as text
const textStart = Math.max(sendStart, 0);
const textWidth = finish - textStart;
const /** @const */ minTextWidthPx = 20;
if (textWidth >= minTextWidthPx) {
let title = this.entryTitle(index) || '';
if (event.args.data.fromServiceWorker) {
title = '⚙ ' + title;
}
if (title) {
const /** @const */ textPadding = 4;
const /** @const */ textBaseline = 5;
const textBaseHeight = barHeight - textBaseline;
const trimmedText = UI.UIUtils.trimTextEnd(context, title, textWidth - 2 * textPadding);
context.fillStyle = '#333';
context.fillText(trimmedText, textStart + textPadding, barY + textBaseHeight);
}
}
return true;
}
forceDecoration(_index: number): boolean {
return true;
}
prepareHighlightedEntryInfo(index: number): Element|null {
const /** @const */ maxURLChars = 80;
const event = this.#events[index];
const element = document.createElement('div');
const root = UI.Utils.createShadowRootWithCoreStyles(element, {
cssFile: [timelineFlamechartPopoverStyles],
delegatesFocus: undefined,
});
const contents = root.createChild('div', 'timeline-flamechart-popover');
const startTime = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.ts);
const duration = TraceEngine.Helpers.Timing.microSecondsToMilliseconds(event.dur);
if (startTime && isFinite(duration)) {
contents.createChild('span', 'timeline-info-network-time').textContent =
i18n.TimeUtilities.millisToString(duration, true);
}
const div = (contents.createChild('span') as HTMLElement);
div.textContent = PerfUI.NetworkPriorities.uiLabelForNetworkPriority(
(event.args.data.priority as Protocol.Network.ResourcePriority));
div.style.color = this.#colorForPriority(event.args.data.priority) || 'black';
contents.createChild('span').textContent = Platform.StringUtilities.trimMiddle(event.args.data.url, maxURLChars);
return element;
}
#colorForPriority(priority: string): string|null {
| if (!this.#priorityToValue) {
this.#priorityToValue = new Map([
[Protocol.Network.ResourcePriority.VeryLow, 1],
[Protocol.Network.ResourcePriority.Low, 2],
[Protocol.Network.ResourcePriority.Medium, 3],
[Protocol.Network.ResourcePriority.High, 4],
[Protocol.Network.ResourcePriority.VeryHigh, 5],
]);
}
const value = this.#priorityToValue.get(priority);
return value ? `hsla(214, 80%, 50%, ${value / 5})` : null;
}
| identifier_body |
|
windows.rs | _registry_key(protocol: &str) -> String {
format!(r"SOFTWARE\Classes\{}", protocol)
}
fn get_configuration_registry_key(protocol: &str) -> String {
format!(r"Software\bitSpatter\Hermes\Protocols\{}", protocol)
}
/// Register associations with Windows to handle our protocol, and the command we'll invoke
fn register_command(
protocol: &str,
#[allow(clippy::ptr_arg)] commandline: &Vec<String>,
extra_args: Option<&str>,
) -> io::Result<()> {
use std::env::current_exe;
let exe_path = current_exe()?;
let exe_path = exe_path.to_str().unwrap_or_default().to_owned();
let icon_path = format!("\"{}\",0", exe_path);
let open_command = if let Some(extra_args) = extra_args {
format!("\"{}\" {} open \"%1\"", exe_path, extra_args)
} else {
format!("\"{}\" open \"%1\"", exe_path)
};
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
// Configure our ProgID to point to the right command
let protocol_path = get_protocol_registry_key(protocol);
let (progid_class, _) = hkcu.create_subkey(&protocol_path)?;
progid_class.set_value("", &format!("URL:{} Protocol", protocol))?;
// Indicates that this class defines a protocol handler
progid_class.set_value("URL Protocol", &"")?;
let (progid_class_defaulticon, _) = progid_class.create_subkey("DefaultIcon")?;
progid_class_defaulticon.set_value("", &icon_path)?;
debug!(
r"set HKEY_CURRENT_USER\{}\DefaultIcon to '{}'",
protocol_path, icon_path
);
let (progid_class_shell_open_command, _) = progid_class.create_subkey(r"shell\open\command")?;
progid_class_shell_open_command.set_value("", &open_command)?;
debug!(
r"set HKEY_CURRENT_USER\{}\shell\open\command to '{}'",
protocol_path, open_command
);
info!("registering command for {}://", protocol);
let config_path = get_configuration_registry_key(&protocol);
let (config, _) = hkcu.create_subkey(&config_path)?;
config.set_value("command", commandline)?;
debug!(
r"set HKEY_CURRENT_USER\{}\command to {:?}",
config_path, commandline
);
Ok(())
}
/// Remove all the registry keys that we've set up for a protocol
fn unregister_protocol(protocol: &str) {
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let protocol_path = get_protocol_registry_key(protocol);
trace!("querying protocol registration at {}", protocol_path);
if let Ok(protocol_registry_key) =
hkcu.open_subkey_with_flags(&protocol_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing protocol registration for {}://", protocol);
let result = protocol_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", protocol_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
protocol_path,
);
}
let _ = hkcu.delete_subkey(&protocol_path);
let configuration_path = get_configuration_registry_key(protocol);
trace!("querying configuration at {}", configuration_path);
if let Ok(configuration_registry_key) =
hkcu.open_subkey_with_flags(&configuration_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing configuration for {}://", protocol);
let result = configuration_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", configuration_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
configuration_path,
);
}
let _ = hkcu.delete_subkey(&configuration_path);
}
/// Combine the path and query string from the given Url
fn get_path_and_extras(url: &url::Url) -> String {
let mut path = url.path().to_owned();
if let Some(query) = url.query() {
path += "?";
path += query;
}
path
}
/// Dispatch the given URL to the correct mailslot or launch the editor
fn open_url(url: &str) -> Result<()> {
let url = url::Url::parse(url)?;
let protocol = url.scheme();
let hostname = url
.host_str()
.ok_or_else(|| anyhow!("could not parse hostname from {}", url))?;
let path = get_path_and_extras(&url);
let full_path = format!("/{}{}", hostname, path);
trace!(
"split url {} into protocol={}, full_path={} (hostname={} + path={})",
url,
protocol,
full_path,
hostname,
path
);
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let config = hkcu
.open_subkey(get_configuration_registry_key(protocol))
.with_context(|| format!("no hostnames registered when trying to handle url {}", url))?; | .get_value("command")
.with_context(|| format!("command not registered when trying to handle url {}", url))?;
let could_send = {
let slot = MailslotName::local(&format!(r"bitSpatter\Hermes\{}", protocol));
trace!("Attempting to send URL to mailslot {}", slot.to_string());
if let Ok(mut client) = MailslotClient::new(&slot) {
if let Err(error) = client.send_message(full_path.as_bytes()) {
warn!("Could not send mail slot message to {}: {} -- assuming application is shutting down, starting a new one", slot.to_string(), error);
false
} else {
trace!("Delivered using Mailslot");
true
}
} else {
trace!("Could not connect to Mailslot, assuming application is not running");
false
}
};
if !could_send {
let (exe_name, args) = {
debug!(
"registered handler for {}: {:?}",
protocol, protocol_command
);
let mut protocol_command = protocol_command.into_iter();
let exe_name = protocol_command
.next()
.ok_or_else(|| anyhow!("empty command specified for hostname {}", hostname))?;
// TODO: Handle %%1 as an escape?
let args: Vec<_> = protocol_command
.map(|arg: String| arg.replace("%1", &full_path))
.collect();
(exe_name, args)
};
info!("executing {:?} with arguments {:?}", exe_name, args);
Command::new(&exe_name)
.args(&args)
.stdout(Stdio::null())
.stderr(Stdio::null())
.stdin(Stdio::null())
.spawn()
.with_context(|| format!("Failed to execute {:?} {:?}", exe_name, args))?;
}
Ok(())
}
/// Validate the scheme according to RFC3986 (https://datatracker.ietf.org/doc/html/rfc3986)
fn parse_scheme(src: &str) -> Result<String, anyhow::Error> {
let src = src.trim();
let mut chars = src.chars();
let first_char = chars
.next()
.ok_or_else(|| anyhow!("protocol needs to contain at least one character"))?;
if !first_char.is_ascii_alphabetic() {
bail!(
"protocol '{}' needs to start with an alphabetic character",
src
);
}
for char in chars {
if !char.is_ascii_alphanumeric() && char != '+' && char != '-' && char != '.' {
bail!("protocol '{}' can only contain the letters a-z, the numbers 0-9, '+', '-', and '.'", src);
}
}
Ok(src.to_lowercase())
}
// This is the definition of our command line options
#[derive(Debug, StructOpt)]
#[structopt(
name = DISPLAY_NAME,
about = DESCRIPTION
)]
struct CommandOptions {
/// Use verbose logging
#[structopt(short, long)]
verbose: bool,
/// Use debug logging, even more verbose than --verbose
#[structopt(long)]
debug: bool,
/// Choose the mode of operation
#[structopt(subcommand)]
mode: ExecutionMode,
}
#[derive(Debug, StructOpt)]
enum ExecutionMode {
/// Dispatch the given URL to Unreal Engine (or launch it, if needed)
Open {
/// URL to open
url: String,
},
/// Register this EXE as a URL protocol handler
Register {
/// The protocol this exe will be registered for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
/// Enable debug logging for this registration
#[structopt(long)]
register_with_debugging: bool,
/// The command line that will handle the registration if needed, where %1 is the placeholder for the path
commandline: Vec<String>,
},
/// Remove all registry entries for the URL protocol handler & hostname configuration
Unregister {
/// The protocol we will delete the registration for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
},
}
fn get_exe_relative_path(filename: &str) -> io::Result<PathBuf> {
let mut path = std::env::current_exe()?;
path.set_file_name(filename);
Ok(path)
}
| let protocol_command: Vec<_> = config | random_line_split |
windows.rs | _key(protocol: &str) -> String {
format!(r"SOFTWARE\Classes\{}", protocol)
}
fn get_configuration_registry_key(protocol: &str) -> String {
format!(r"Software\bitSpatter\Hermes\Protocols\{}", protocol)
}
/// Register associations with Windows to handle our protocol, and the command we'll invoke
fn register_command(
protocol: &str,
#[allow(clippy::ptr_arg)] commandline: &Vec<String>,
extra_args: Option<&str>,
) -> io::Result<()> {
use std::env::current_exe;
let exe_path = current_exe()?;
let exe_path = exe_path.to_str().unwrap_or_default().to_owned();
let icon_path = format!("\"{}\",0", exe_path);
let open_command = if let Some(extra_args) = extra_args {
format!("\"{}\" {} open \"%1\"", exe_path, extra_args)
} else {
format!("\"{}\" open \"%1\"", exe_path)
};
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
// Configure our ProgID to point to the right command
let protocol_path = get_protocol_registry_key(protocol);
let (progid_class, _) = hkcu.create_subkey(&protocol_path)?;
progid_class.set_value("", &format!("URL:{} Protocol", protocol))?;
// Indicates that this class defines a protocol handler
progid_class.set_value("URL Protocol", &"")?;
let (progid_class_defaulticon, _) = progid_class.create_subkey("DefaultIcon")?;
progid_class_defaulticon.set_value("", &icon_path)?;
debug!(
r"set HKEY_CURRENT_USER\{}\DefaultIcon to '{}'",
protocol_path, icon_path
);
let (progid_class_shell_open_command, _) = progid_class.create_subkey(r"shell\open\command")?;
progid_class_shell_open_command.set_value("", &open_command)?;
debug!(
r"set HKEY_CURRENT_USER\{}\shell\open\command to '{}'",
protocol_path, open_command
);
info!("registering command for {}://", protocol);
let config_path = get_configuration_registry_key(&protocol);
let (config, _) = hkcu.create_subkey(&config_path)?;
config.set_value("command", commandline)?;
debug!(
r"set HKEY_CURRENT_USER\{}\command to {:?}",
config_path, commandline
);
Ok(())
}
/// Remove all the registry keys that we've set up for a protocol
fn unregister_protocol(protocol: &str) {
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let protocol_path = get_protocol_registry_key(protocol);
trace!("querying protocol registration at {}", protocol_path);
if let Ok(protocol_registry_key) =
hkcu.open_subkey_with_flags(&protocol_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing protocol registration for {}://", protocol);
let result = protocol_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", protocol_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
protocol_path,
);
}
let _ = hkcu.delete_subkey(&protocol_path);
let configuration_path = get_configuration_registry_key(protocol);
trace!("querying configuration at {}", configuration_path);
if let Ok(configuration_registry_key) =
hkcu.open_subkey_with_flags(&configuration_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing configuration for {}://", protocol);
let result = configuration_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", configuration_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
configuration_path,
);
}
let _ = hkcu.delete_subkey(&configuration_path);
}
/// Combine the path and query string from the given Url
fn ge | rl: &url::Url) -> String {
let mut path = url.path().to_owned();
if let Some(query) = url.query() {
path += "?";
path += query;
}
path
}
/// Dispatch the given URL to the correct mailslot or launch the editor
fn open_url(url: &str) -> Result<()> {
let url = url::Url::parse(url)?;
let protocol = url.scheme();
let hostname = url
.host_str()
.ok_or_else(|| anyhow!("could not parse hostname from {}", url))?;
let path = get_path_and_extras(&url);
let full_path = format!("/{}{}", hostname, path);
trace!(
"split url {} into protocol={}, full_path={} (hostname={} + path={})",
url,
protocol,
full_path,
hostname,
path
);
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let config = hkcu
.open_subkey(get_configuration_registry_key(protocol))
.with_context(|| format!("no hostnames registered when trying to handle url {}", url))?;
let protocol_command: Vec<_> = config
.get_value("command")
.with_context(|| format!("command not registered when trying to handle url {}", url))?;
let could_send = {
let slot = MailslotName::local(&format!(r"bitSpatter\Hermes\{}", protocol));
trace!("Attempting to send URL to mailslot {}", slot.to_string());
if let Ok(mut client) = MailslotClient::new(&slot) {
if let Err(error) = client.send_message(full_path.as_bytes()) {
warn!("Could not send mail slot message to {}: {} -- assuming application is shutting down, starting a new one", slot.to_string(), error);
false
} else {
trace!("Delivered using Mailslot");
true
}
} else {
trace!("Could not connect to Mailslot, assuming application is not running");
false
}
};
if !could_send {
let (exe_name, args) = {
debug!(
"registered handler for {}: {:?}",
protocol, protocol_command
);
let mut protocol_command = protocol_command.into_iter();
let exe_name = protocol_command
.next()
.ok_or_else(|| anyhow!("empty command specified for hostname {}", hostname))?;
// TODO: Handle %%1 as an escape?
let args: Vec<_> = protocol_command
.map(|arg: String| arg.replace("%1", &full_path))
.collect();
(exe_name, args)
};
info!("executing {:?} with arguments {:?}", exe_name, args);
Command::new(&exe_name)
.args(&args)
.stdout(Stdio::null())
.stderr(Stdio::null())
.stdin(Stdio::null())
.spawn()
.with_context(|| format!("Failed to execute {:?} {:?}", exe_name, args))?;
}
Ok(())
}
/// Validate the scheme according to RFC3986 (https://datatracker.ietf.org/doc/html/rfc3986)
fn parse_scheme(src: &str) -> Result<String, anyhow::Error> {
let src = src.trim();
let mut chars = src.chars();
let first_char = chars
.next()
.ok_or_else(|| anyhow!("protocol needs to contain at least one character"))?;
if !first_char.is_ascii_alphabetic() {
bail!(
"protocol '{}' needs to start with an alphabetic character",
src
);
}
for char in chars {
if !char.is_ascii_alphanumeric() && char != '+' && char != '-' && char != '.' {
bail!("protocol '{}' can only contain the letters a-z, the numbers 0-9, '+', '-', and '.'", src);
}
}
Ok(src.to_lowercase())
}
// This is the definition of our command line options
#[derive(Debug, StructOpt)]
#[structopt(
name = DISPLAY_NAME,
about = DESCRIPTION
)]
struct CommandOptions {
/// Use verbose logging
#[structopt(short, long)]
verbose: bool,
/// Use debug logging, even more verbose than --verbose
#[structopt(long)]
debug: bool,
/// Choose the mode of operation
#[structopt(subcommand)]
mode: ExecutionMode,
}
#[derive(Debug, StructOpt)]
enum ExecutionMode {
/// Dispatch the given URL to Unreal Engine (or launch it, if needed)
Open {
/// URL to open
url: String,
},
/// Register this EXE as a URL protocol handler
Register {
/// The protocol this exe will be registered for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
/// Enable debug logging for this registration
#[structopt(long)]
register_with_debugging: bool,
/// The command line that will handle the registration if needed, where %1 is the placeholder for the path
commandline: Vec<String>,
},
/// Remove all registry entries for the URL protocol handler & hostname configuration
Unregister {
/// The protocol we will delete the registration for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
},
}
fn get_exe_relative_path(filename: &str) -> io::Result<PathBuf> {
let mut path = std::env::current_exe()?;
path.set_file_name(filename);
Ok(path | t_path_and_extras(u | identifier_name |
windows.rs | _key(protocol: &str) -> String {
format!(r"SOFTWARE\Classes\{}", protocol)
}
fn get_configuration_registry_key(protocol: &str) -> String {
format!(r"Software\bitSpatter\Hermes\Protocols\{}", protocol)
}
/// Register associations with Windows to handle our protocol, and the command we'll invoke
fn register_command(
protocol: &str,
#[allow(clippy::ptr_arg)] commandline: &Vec<String>,
extra_args: Option<&str>,
) -> io::Result<()> {
use std::env::current_exe;
let exe_path = current_exe()?;
let exe_path = exe_path.to_str().unwrap_or_default().to_owned();
let icon_path = format!("\"{}\",0", exe_path);
let open_command = if let Some(extra_args) = extra_args {
format!("\"{}\" {} open \"%1\"", exe_path, extra_args)
} else {
format!("\"{}\" open \"%1\"", exe_path)
};
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
// Configure our ProgID to point to the right command
let protocol_path = get_protocol_registry_key(protocol);
let (progid_class, _) = hkcu.create_subkey(&protocol_path)?;
progid_class.set_value("", &format!("URL:{} Protocol", protocol))?;
// Indicates that this class defines a protocol handler
progid_class.set_value("URL Protocol", &"")?;
let (progid_class_defaulticon, _) = progid_class.create_subkey("DefaultIcon")?;
progid_class_defaulticon.set_value("", &icon_path)?;
debug!(
r"set HKEY_CURRENT_USER\{}\DefaultIcon to '{}'",
protocol_path, icon_path
);
let (progid_class_shell_open_command, _) = progid_class.create_subkey(r"shell\open\command")?;
progid_class_shell_open_command.set_value("", &open_command)?;
debug!(
r"set HKEY_CURRENT_USER\{}\shell\open\command to '{}'",
protocol_path, open_command
);
info!("registering command for {}://", protocol);
let config_path = get_configuration_registry_key(&protocol);
let (config, _) = hkcu.create_subkey(&config_path)?;
config.set_value("command", commandline)?;
debug!(
r"set HKEY_CURRENT_USER\{}\command to {:?}",
config_path, commandline
);
Ok(())
}
/// Remove all the registry keys that we've set up for a protocol
fn unregister_protocol(protocol: &str) {
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let protocol_path = get_protocol_registry_key(protocol);
trace!("querying protocol registration at {}", protocol_path);
if let Ok(protocol_registry_key) =
hkcu.open_subkey_with_flags(&protocol_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing protocol registration for {}://", protocol);
let result = protocol_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", protocol_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
protocol_path,
);
}
let _ = hkcu.delete_subkey(&protocol_path);
let configuration_path = get_configuration_registry_key(protocol);
trace!("querying configuration at {}", configuration_path);
if let Ok(configuration_registry_key) =
hkcu.open_subkey_with_flags(&configuration_path, ENUMERATE_AND_DELETE_FLAGS)
{
info!("removing configuration for {}://", protocol);
let result = configuration_registry_key.delete_subkey_all("");
if let Err(error) = result {
warn!("unable to delete {}: {}", configuration_path, error);
}
} else {
trace!(
"could not open {}, assuming it doesn't exist",
configuration_path,
);
}
let _ = hkcu.delete_subkey(&configuration_path);
}
/// Combine the path and query string from the given Url
fn get_path_and_extras(url: &url::Url) -> String {
let mut path = url.path().to_owned();
if let Some(query) = url.query() {
path += "?";
path += query;
}
path
}
/// Dispatch the given URL to the correct mailslot or launch the editor
fn open_url(url: &str) -> Result<()> {
let url = url::Url::parse(url)?;
let protocol = url.scheme();
let hostname = url
.host_str()
.ok_or_else(|| anyhow!("could not parse hostname from {}", url))?;
let path = get_path_and_extras(&url);
let full_path = format!("/{}{}", hostname, path);
trace!(
"split url {} into protocol={}, full_path={} (hostname={} + path={})",
url,
protocol,
full_path,
hostname,
path
);
let hkcu = RegKey::predef(HKEY_CURRENT_USER);
let config = hkcu
.open_subkey(get_configuration_registry_key(protocol))
.with_context(|| format!("no hostnames registered when trying to handle url {}", url))?;
let protocol_command: Vec<_> = config
.get_value("command")
.with_context(|| format!("command not registered when trying to handle url {}", url))?;
let could_send = {
let slot = MailslotName::local(&format!(r"bitSpatter\Hermes\{}", protocol));
trace!("Attempting to send URL to mailslot {}", slot.to_string());
if let Ok(mut client) = MailslotClient::new(&slot) {
| lse {
trace!("Could not connect to Mailslot, assuming application is not running");
false
}
};
if !could_send {
let (exe_name, args) = {
debug!(
"registered handler for {}: {:?}",
protocol, protocol_command
);
let mut protocol_command = protocol_command.into_iter();
let exe_name = protocol_command
.next()
.ok_or_else(|| anyhow!("empty command specified for hostname {}", hostname))?;
// TODO: Handle %%1 as an escape?
let args: Vec<_> = protocol_command
.map(|arg: String| arg.replace("%1", &full_path))
.collect();
(exe_name, args)
};
info!("executing {:?} with arguments {:?}", exe_name, args);
Command::new(&exe_name)
.args(&args)
.stdout(Stdio::null())
.stderr(Stdio::null())
.stdin(Stdio::null())
.spawn()
.with_context(|| format!("Failed to execute {:?} {:?}", exe_name, args))?;
}
Ok(())
}
/// Validate the scheme according to RFC3986 (https://datatracker.ietf.org/doc/html/rfc3986)
fn parse_scheme(src: &str) -> Result<String, anyhow::Error> {
let src = src.trim();
let mut chars = src.chars();
let first_char = chars
.next()
.ok_or_else(|| anyhow!("protocol needs to contain at least one character"))?;
if !first_char.is_ascii_alphabetic() {
bail!(
"protocol '{}' needs to start with an alphabetic character",
src
);
}
for char in chars {
if !char.is_ascii_alphanumeric() && char != '+' && char != '-' && char != '.' {
bail!("protocol '{}' can only contain the letters a-z, the numbers 0-9, '+', '-', and '.'", src);
}
}
Ok(src.to_lowercase())
}
// This is the definition of our command line options
#[derive(Debug, StructOpt)]
#[structopt(
name = DISPLAY_NAME,
about = DESCRIPTION
)]
struct CommandOptions {
/// Use verbose logging
#[structopt(short, long)]
verbose: bool,
/// Use debug logging, even more verbose than --verbose
#[structopt(long)]
debug: bool,
/// Choose the mode of operation
#[structopt(subcommand)]
mode: ExecutionMode,
}
#[derive(Debug, StructOpt)]
enum ExecutionMode {
/// Dispatch the given URL to Unreal Engine (or launch it, if needed)
Open {
/// URL to open
url: String,
},
/// Register this EXE as a URL protocol handler
Register {
/// The protocol this exe will be registered for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
/// Enable debug logging for this registration
#[structopt(long)]
register_with_debugging: bool,
/// The command line that will handle the registration if needed, where %1 is the placeholder for the path
commandline: Vec<String>,
},
/// Remove all registry entries for the URL protocol handler & hostname configuration
Unregister {
/// The protocol we will delete the registration for
#[structopt(parse(try_from_str = parse_scheme))]
protocol: String,
},
}
fn get_exe_relative_path(filename: &str) -> io::Result<PathBuf> {
let mut path = std::env::current_exe()?;
path.set_file_name(filename);
Ok(path)
| if let Err(error) = client.send_message(full_path.as_bytes()) {
warn!("Could not send mail slot message to {}: {} -- assuming application is shutting down, starting a new one", slot.to_string(), error);
false
} else {
trace!("Delivered using Mailslot");
true
}
} e | conditional_block |
lib.rs | 0,
// TSC = 0x40,
TSE = 0x41,
// TSW = 0x42,
// TSR = 0x43,
CDI = 0x50,
// LPD = 0x51,
TCON = 0x60,
TRES = 0x61,
DAM = 0x65,
// REV = 0x70,
// FLG = 0x71,
// AMV = 0x80,
// VV = 0x81,
// VDCS = 0x82,
PWS = 0xE3,
// TSSET = 0xE5,
}
/// An instance of a display which is governed by a particular `uc8159` controller.
#[derive(Debug)]
pub struct Display<SPI, TIMER, RESET, BUSY, DC, ERR = convert::Infallible>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
TIMER: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin,
ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>,
{
spi: SPI,
delay: TIMER,
reset: RESET,
busy: BUSY,
dc: DC,
config: Config,
buffer: [u8; WIDTH / 2 * HEIGHT],
phantom: marker::PhantomData<ERR>,
}
impl<SPI, DELAY, RESET, BUSY, DC, ERR> Display<SPI, DELAY, RESET, BUSY, DC, ERR>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
DELAY: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin,
ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>,
{
/// Creates a new display instance.
///
/// The provided `spi` bus will be used for most of the communication. The `delay` instance
/// is used when waiting for reset and drawing operations to complete. The `reset` pin can be
/// provided to make sure the device is reset before each new draw command. The `busy` pin is
/// used to poll to see when draw operations are complete. The `dc` pin is used to signal
/// whether the sent `spi` signal is a command (low) or data (high).
pub fn new(spi: SPI, delay: DELAY, reset: RESET, busy: BUSY, dc: DC, config: Config) -> Self {
let phantom = marker::PhantomData;
let buffer = [0; WIDTH / 2 * HEIGHT];
Self {
spi,
delay,
reset,
busy,
dc,
config,
buffer,
phantom,
}
}
/// The width of the display.
pub fn width(&self) -> usize {
WIDTH
}
/// The height of the display.
pub fn height(&self) -> usize {
HEIGHT
}
/// Fills the entire display using a single color.
///
/// This is a pretty fast operation compared to e.g. calling `set_pixel`.
pub fn fill(&mut self, color: Color) {
self.buffer = [((color as u8) << 4) | color as u8; WIDTH / 2 * HEIGHT];
}
/// Copies data from another source in bulk.
///
/// The color data must contain exactly `width() * height()` elements and be in row-major order.
pub fn copy_from(&mut self, color: &[Color]) {
for (idx, cell) in color.chunks(2).enumerate() {
self.buffer[idx] = ((cell[0] as u8) << 4) | cell[1] as u8;
}
}
/// Sets a specific pixel color.
pub fn set_pixel(&mut self, x: usize, y: usize, color: Color) {
let cell = &mut self.buffer[y * WIDTH / 2 + x / 2];
if (x & 1) == 0 {
*cell = (*cell & 0b00001111) | ((color as u8) << 4);
} else |
}
/// Displays the contents of the internal buffer to the screen.
///
/// This operation blocks until the contents are completely shown.
pub fn show(&mut self) -> Result<(), ERR> {
self.setup()?;
let ptr = &self.buffer as *const _ as *const u8;
let len = mem::size_of_val(&self.buffer);
let data = unsafe { slice::from_raw_parts(ptr, len) };
Self::send_command(&mut self.spi, &mut self.dc, Command::DTM1, data)?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::PON, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::DRF, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::POF, &[])?;
self.busy_wait()?;
Ok(())
}
fn setup(&mut self) -> Result<(), ERR> {
self.reset.set_low()?;
self.delay.delay_ms(100);
self.reset.set_high()?;
self.delay.delay_ms(100);
self.busy_wait()?;
let width_bytes = (WIDTH as u16).to_be_bytes();
let height_bytes = (HEIGHT as u16).to_be_bytes();
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::TRES,
&[
width_bytes[0],
width_bytes[1],
height_bytes[0],
height_bytes[1],
],
)?;
// Panel Setting
// 0b11000000 = Resolution select, 0b00 = 640x480, our panel is 0b11 = 600x448
// 0b00100000 = LUT selection, 0 = ext flash, 1 = registers, we use ext flash
// 0b00010000 = Ignore
// 0b00001000 = Gate scan direction, 0 = down, 1 = up (default)
// 0b00000100 = Source shift direction, 0 = left, 1 = right (default)
// 0b00000010 = DC-DC converter, 0 = off, 1 = on
// 0b00000001 = Soft reset, 0 = Reset, 1 = Normal (Default)
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PSR,
&[
0b11101111, // See above for more magic numbers
0x08, // display_colours == UC8159_7C
],
)?;
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PWR,
&[
(0x06 << 3) | // ??? - not documented in UC8159 datasheet
(0x01 << 2) | // SOURCE_INTERNAL_DC_DC
(0x01 << 1) | // GATE_INTERNAL_DC_DC
(0x01), // LV_SOURCE_INTERNAL_DC_DC
0x00, // VGx_20V
0x23, // UC8159_7C
0x23, // UC8159_7C
],
)?;
// Set the PLL clock frequency to 50Hz
// 0b11000000 = Ignore
// 0b00111000 = M
// 0b00000111 = N
// PLL = 2MHz * (M / N)
// PLL = 2MHz * (7 / 4)
// PLL = 2,800,000 ???
Self::send_command(&mut self.spi, &mut self.dc, Command::PLL, &[0x3C])?;
Self::send_command(&mut self.spi, &mut self.dc, Command::TSE, &[0x00 | {
*cell = (*cell & 0b11110000) | color as u8;
} | conditional_block |
lib.rs | ///
/// The provided `spi` bus will be used for most of the communication. The `delay` instance
/// is used when waiting for reset and drawing operations to complete. The `reset` pin can be
/// provided to make sure the device is reset before each new draw command. The `busy` pin is
/// used to poll to see when draw operations are complete. The `dc` pin is used to signal
/// whether the sent `spi` signal is a command (low) or data (high).
pub fn new(spi: SPI, delay: DELAY, reset: RESET, busy: BUSY, dc: DC, config: Config) -> Self {
let phantom = marker::PhantomData;
let buffer = [0; WIDTH / 2 * HEIGHT];
Self {
spi,
delay,
reset,
busy,
dc,
config,
buffer,
phantom,
}
}
/// The width of the display.
pub fn width(&self) -> usize {
WIDTH
}
/// The height of the display.
pub fn height(&self) -> usize {
HEIGHT
}
/// Fills the entire display using a single color.
///
/// This is a pretty fast operation compared to e.g. calling `set_pixel`.
pub fn fill(&mut self, color: Color) {
self.buffer = [((color as u8) << 4) | color as u8; WIDTH / 2 * HEIGHT];
}
/// Copies data from another source in bulk.
///
/// The color data must contain exactly `width() * height()` elements and be in row-major order.
pub fn copy_from(&mut self, color: &[Color]) {
for (idx, cell) in color.chunks(2).enumerate() {
self.buffer[idx] = ((cell[0] as u8) << 4) | cell[1] as u8;
}
}
/// Sets a specific pixel color.
pub fn set_pixel(&mut self, x: usize, y: usize, color: Color) {
let cell = &mut self.buffer[y * WIDTH / 2 + x / 2];
if (x & 1) == 0 {
*cell = (*cell & 0b00001111) | ((color as u8) << 4);
} else {
*cell = (*cell & 0b11110000) | color as u8;
}
}
/// Displays the contents of the internal buffer to the screen.
///
/// This operation blocks until the contents are completely shown.
pub fn show(&mut self) -> Result<(), ERR> {
self.setup()?;
let ptr = &self.buffer as *const _ as *const u8;
let len = mem::size_of_val(&self.buffer);
let data = unsafe { slice::from_raw_parts(ptr, len) };
Self::send_command(&mut self.spi, &mut self.dc, Command::DTM1, data)?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::PON, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::DRF, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::POF, &[])?;
self.busy_wait()?;
Ok(())
}
fn setup(&mut self) -> Result<(), ERR> {
self.reset.set_low()?;
self.delay.delay_ms(100);
self.reset.set_high()?;
self.delay.delay_ms(100);
self.busy_wait()?;
let width_bytes = (WIDTH as u16).to_be_bytes();
let height_bytes = (HEIGHT as u16).to_be_bytes();
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::TRES,
&[
width_bytes[0],
width_bytes[1],
height_bytes[0],
height_bytes[1],
],
)?;
// Panel Setting
// 0b11000000 = Resolution select, 0b00 = 640x480, our panel is 0b11 = 600x448
// 0b00100000 = LUT selection, 0 = ext flash, 1 = registers, we use ext flash
// 0b00010000 = Ignore
// 0b00001000 = Gate scan direction, 0 = down, 1 = up (default)
// 0b00000100 = Source shift direction, 0 = left, 1 = right (default)
// 0b00000010 = DC-DC converter, 0 = off, 1 = on
// 0b00000001 = Soft reset, 0 = Reset, 1 = Normal (Default)
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PSR,
&[
0b11101111, // See above for more magic numbers
0x08, // display_colours == UC8159_7C
],
)?;
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PWR,
&[
(0x06 << 3) | // ??? - not documented in UC8159 datasheet
(0x01 << 2) | // SOURCE_INTERNAL_DC_DC
(0x01 << 1) | // GATE_INTERNAL_DC_DC
(0x01), // LV_SOURCE_INTERNAL_DC_DC
0x00, // VGx_20V
0x23, // UC8159_7C
0x23, // UC8159_7C
],
)?;
// Set the PLL clock frequency to 50Hz
// 0b11000000 = Ignore
// 0b00111000 = M
// 0b00000111 = N
// PLL = 2MHz * (M / N)
// PLL = 2MHz * (7 / 4)
// PLL = 2,800,000 ???
Self::send_command(&mut self.spi, &mut self.dc, Command::PLL, &[0x3C])?;
Self::send_command(&mut self.spi, &mut self.dc, Command::TSE, &[0x00])?;
// VCOM and Data Interval setting
// 0b11100000 = Vborder control (0b001 = LUTB voltage)
// 0b00010000 = Data polarity
// 0b00001111 = Vcom and data interval (0b0111 = 10, default)
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::CDI,
&[((self.config.border_color as u8) << 5) | 0x17],
)?;
// Gate/Source non-overlap period
// 0b11110000 = Source to Gate (0b0010 = 12nS, default)
// 0b00001111 = Gate to Source
Self::send_command(&mut self.spi, &mut self.dc, Command::TCON, &[0x22])?;
// Disable external flash
Self::send_command(&mut self.spi, &mut self.dc, Command::DAM, &[0b00000000])?;
// UC8159_7C
Self::send_command(&mut self.spi, &mut self.dc, Command::PWS, &[0xAA])?;
// Power off sequence
// 0b00110000 = power off sequence of VDH and VDL, 0b00 = 1 frame (default)
// All other bits ignored?
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PFS,
&[0b00000000], // PFS_1_FRAME
)?;
Ok(())
}
fn busy_wait(&mut self) -> Result<(), ERR> {
while self.busy.is_low()? {
self.delay.delay_ms(10);
}
Ok(())
}
fn send_command(spi: &mut SPI, dc: &mut DC, command: Command, data: &[u8]) -> Result<(), ERR> | {
dc.set_low()?;
spi.write(&[command as u8])?;
if !data.is_empty() {
dc.set_high()?;
for chunk in data.chunks(SPI_CHUNK_SIZE) {
spi.write(chunk)?;
}
}
Ok(())
} | identifier_body |
|
lib.rs | 0,
// TSC = 0x40,
TSE = 0x41,
// TSW = 0x42,
// TSR = 0x43,
CDI = 0x50,
// LPD = 0x51,
TCON = 0x60,
TRES = 0x61,
DAM = 0x65,
// REV = 0x70,
// FLG = 0x71,
// AMV = 0x80,
// VV = 0x81,
// VDCS = 0x82,
PWS = 0xE3,
// TSSET = 0xE5,
}
/// An instance of a display which is governed by a particular `uc8159` controller.
#[derive(Debug)]
pub struct Display<SPI, TIMER, RESET, BUSY, DC, ERR = convert::Infallible>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
TIMER: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin,
ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>,
{
spi: SPI,
delay: TIMER,
reset: RESET,
busy: BUSY,
dc: DC,
config: Config,
buffer: [u8; WIDTH / 2 * HEIGHT],
phantom: marker::PhantomData<ERR>,
}
impl<SPI, DELAY, RESET, BUSY, DC, ERR> Display<SPI, DELAY, RESET, BUSY, DC, ERR>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
DELAY: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin, | {
/// Creates a new display instance.
///
/// The provided `spi` bus will be used for most of the communication. The `delay` instance
/// is used when waiting for reset and drawing operations to complete. The `reset` pin can be
/// provided to make sure the device is reset before each new draw command. The `busy` pin is
/// used to poll to see when draw operations are complete. The `dc` pin is used to signal
/// whether the sent `spi` signal is a command (low) or data (high).
pub fn new(spi: SPI, delay: DELAY, reset: RESET, busy: BUSY, dc: DC, config: Config) -> Self {
let phantom = marker::PhantomData;
let buffer = [0; WIDTH / 2 * HEIGHT];
Self {
spi,
delay,
reset,
busy,
dc,
config,
buffer,
phantom,
}
}
/// The width of the display.
pub fn width(&self) -> usize {
WIDTH
}
/// The height of the display.
pub fn height(&self) -> usize {
HEIGHT
}
/// Fills the entire display using a single color.
///
/// This is a pretty fast operation compared to e.g. calling `set_pixel`.
pub fn fill(&mut self, color: Color) {
self.buffer = [((color as u8) << 4) | color as u8; WIDTH / 2 * HEIGHT];
}
/// Copies data from another source in bulk.
///
/// The color data must contain exactly `width() * height()` elements and be in row-major order.
pub fn copy_from(&mut self, color: &[Color]) {
for (idx, cell) in color.chunks(2).enumerate() {
self.buffer[idx] = ((cell[0] as u8) << 4) | cell[1] as u8;
}
}
/// Sets a specific pixel color.
pub fn set_pixel(&mut self, x: usize, y: usize, color: Color) {
let cell = &mut self.buffer[y * WIDTH / 2 + x / 2];
if (x & 1) == 0 {
*cell = (*cell & 0b00001111) | ((color as u8) << 4);
} else {
*cell = (*cell & 0b11110000) | color as u8;
}
}
/// Displays the contents of the internal buffer to the screen.
///
/// This operation blocks until the contents are completely shown.
pub fn show(&mut self) -> Result<(), ERR> {
self.setup()?;
let ptr = &self.buffer as *const _ as *const u8;
let len = mem::size_of_val(&self.buffer);
let data = unsafe { slice::from_raw_parts(ptr, len) };
Self::send_command(&mut self.spi, &mut self.dc, Command::DTM1, data)?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::PON, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::DRF, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::POF, &[])?;
self.busy_wait()?;
Ok(())
}
fn setup(&mut self) -> Result<(), ERR> {
self.reset.set_low()?;
self.delay.delay_ms(100);
self.reset.set_high()?;
self.delay.delay_ms(100);
self.busy_wait()?;
let width_bytes = (WIDTH as u16).to_be_bytes();
let height_bytes = (HEIGHT as u16).to_be_bytes();
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::TRES,
&[
width_bytes[0],
width_bytes[1],
height_bytes[0],
height_bytes[1],
],
)?;
// Panel Setting
// 0b11000000 = Resolution select, 0b00 = 640x480, our panel is 0b11 = 600x448
// 0b00100000 = LUT selection, 0 = ext flash, 1 = registers, we use ext flash
// 0b00010000 = Ignore
// 0b00001000 = Gate scan direction, 0 = down, 1 = up (default)
// 0b00000100 = Source shift direction, 0 = left, 1 = right (default)
// 0b00000010 = DC-DC converter, 0 = off, 1 = on
// 0b00000001 = Soft reset, 0 = Reset, 1 = Normal (Default)
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PSR,
&[
0b11101111, // See above for more magic numbers
0x08, // display_colours == UC8159_7C
],
)?;
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PWR,
&[
(0x06 << 3) | // ??? - not documented in UC8159 datasheet
(0x01 << 2) | // SOURCE_INTERNAL_DC_DC
(0x01 << 1) | // GATE_INTERNAL_DC_DC
(0x01), // LV_SOURCE_INTERNAL_DC_DC
0x00, // VGx_20V
0x23, // UC8159_7C
0x23, // UC8159_7C
],
)?;
// Set the PLL clock frequency to 50Hz
// 0b11000000 = Ignore
// 0b00111000 = M
// 0b00000111 = N
// PLL = 2MHz * (M / N)
// PLL = 2MHz * (7 / 4)
// PLL = 2,800,000 ???
Self::send_command(&mut self.spi, &mut self.dc, Command::PLL, &[0x3C])?;
Self::send_command(&mut self.spi, &mut self.dc, Command::TSE, &[0x00])?;
| ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>, | random_line_split |
lib.rs | 0,
// TSC = 0x40,
TSE = 0x41,
// TSW = 0x42,
// TSR = 0x43,
CDI = 0x50,
// LPD = 0x51,
TCON = 0x60,
TRES = 0x61,
DAM = 0x65,
// REV = 0x70,
// FLG = 0x71,
// AMV = 0x80,
// VV = 0x81,
// VDCS = 0x82,
PWS = 0xE3,
// TSSET = 0xE5,
}
/// An instance of a display which is governed by a particular `uc8159` controller.
#[derive(Debug)]
pub struct Display<SPI, TIMER, RESET, BUSY, DC, ERR = convert::Infallible>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
TIMER: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin,
ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>,
{
spi: SPI,
delay: TIMER,
reset: RESET,
busy: BUSY,
dc: DC,
config: Config,
buffer: [u8; WIDTH / 2 * HEIGHT],
phantom: marker::PhantomData<ERR>,
}
impl<SPI, DELAY, RESET, BUSY, DC, ERR> Display<SPI, DELAY, RESET, BUSY, DC, ERR>
where
SPI: embedded_hal::blocking::spi::Write<u8>,
DELAY: embedded_hal::blocking::delay::DelayMs<u16>,
RESET: embedded_hal::digital::v2::OutputPin,
BUSY: embedded_hal::digital::v2::InputPin,
DC: embedded_hal::digital::v2::OutputPin,
ERR: From<SPI::Error> + From<RESET::Error> + From<BUSY::Error> + From<DC::Error>,
{
/// Creates a new display instance.
///
/// The provided `spi` bus will be used for most of the communication. The `delay` instance
/// is used when waiting for reset and drawing operations to complete. The `reset` pin can be
/// provided to make sure the device is reset before each new draw command. The `busy` pin is
/// used to poll to see when draw operations are complete. The `dc` pin is used to signal
/// whether the sent `spi` signal is a command (low) or data (high).
pub fn new(spi: SPI, delay: DELAY, reset: RESET, busy: BUSY, dc: DC, config: Config) -> Self {
let phantom = marker::PhantomData;
let buffer = [0; WIDTH / 2 * HEIGHT];
Self {
spi,
delay,
reset,
busy,
dc,
config,
buffer,
phantom,
}
}
/// The width of the display.
pub fn width(&self) -> usize {
WIDTH
}
/// The height of the display.
pub fn height(&self) -> usize {
HEIGHT
}
/// Fills the entire display using a single color.
///
/// This is a pretty fast operation compared to e.g. calling `set_pixel`.
pub fn fill(&mut self, color: Color) {
self.buffer = [((color as u8) << 4) | color as u8; WIDTH / 2 * HEIGHT];
}
/// Copies data from another source in bulk.
///
/// The color data must contain exactly `width() * height()` elements and be in row-major order.
pub fn | (&mut self, color: &[Color]) {
for (idx, cell) in color.chunks(2).enumerate() {
self.buffer[idx] = ((cell[0] as u8) << 4) | cell[1] as u8;
}
}
/// Sets a specific pixel color.
pub fn set_pixel(&mut self, x: usize, y: usize, color: Color) {
let cell = &mut self.buffer[y * WIDTH / 2 + x / 2];
if (x & 1) == 0 {
*cell = (*cell & 0b00001111) | ((color as u8) << 4);
} else {
*cell = (*cell & 0b11110000) | color as u8;
}
}
/// Displays the contents of the internal buffer to the screen.
///
/// This operation blocks until the contents are completely shown.
pub fn show(&mut self) -> Result<(), ERR> {
self.setup()?;
let ptr = &self.buffer as *const _ as *const u8;
let len = mem::size_of_val(&self.buffer);
let data = unsafe { slice::from_raw_parts(ptr, len) };
Self::send_command(&mut self.spi, &mut self.dc, Command::DTM1, data)?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::PON, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::DRF, &[])?;
self.busy_wait()?;
Self::send_command(&mut self.spi, &mut self.dc, Command::POF, &[])?;
self.busy_wait()?;
Ok(())
}
fn setup(&mut self) -> Result<(), ERR> {
self.reset.set_low()?;
self.delay.delay_ms(100);
self.reset.set_high()?;
self.delay.delay_ms(100);
self.busy_wait()?;
let width_bytes = (WIDTH as u16).to_be_bytes();
let height_bytes = (HEIGHT as u16).to_be_bytes();
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::TRES,
&[
width_bytes[0],
width_bytes[1],
height_bytes[0],
height_bytes[1],
],
)?;
// Panel Setting
// 0b11000000 = Resolution select, 0b00 = 640x480, our panel is 0b11 = 600x448
// 0b00100000 = LUT selection, 0 = ext flash, 1 = registers, we use ext flash
// 0b00010000 = Ignore
// 0b00001000 = Gate scan direction, 0 = down, 1 = up (default)
// 0b00000100 = Source shift direction, 0 = left, 1 = right (default)
// 0b00000010 = DC-DC converter, 0 = off, 1 = on
// 0b00000001 = Soft reset, 0 = Reset, 1 = Normal (Default)
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PSR,
&[
0b11101111, // See above for more magic numbers
0x08, // display_colours == UC8159_7C
],
)?;
Self::send_command(
&mut self.spi,
&mut self.dc,
Command::PWR,
&[
(0x06 << 3) | // ??? - not documented in UC8159 datasheet
(0x01 << 2) | // SOURCE_INTERNAL_DC_DC
(0x01 << 1) | // GATE_INTERNAL_DC_DC
(0x01), // LV_SOURCE_INTERNAL_DC_DC
0x00, // VGx_20V
0x23, // UC8159_7C
0x23, // UC8159_7C
],
)?;
// Set the PLL clock frequency to 50Hz
// 0b11000000 = Ignore
// 0b00111000 = M
// 0b00000111 = N
// PLL = 2MHz * (M / N)
// PLL = 2MHz * (7 / 4)
// PLL = 2,800,000 ???
Self::send_command(&mut self.spi, &mut self.dc, Command::PLL, &[0x3C])?;
Self::send_command(&mut self.spi, &mut self.dc, Command::TSE, &[0x00]) | copy_from | identifier_name |
proxy.go | , p.Handler()); err != nil {
log.WithError(err).Error("HTTP server shut down due to error")
}
log.Info("Stopped HTTP server")
graceful.Shutdown()
}
// gRPCServe starts the gRPC server and block until an error is encountered,
// or the server is shutdown.
//
// TODO this doesn't handle SIGUSR2 and SIGHUP on it's own, unlike HTTPServe
// As long as both are running this is actually fine, as Serve will stop
// the gRPC server when the HTTP one exits. When running just gRPC however,
// the signal handling won't work.
func (p *Proxy) gRPCServe() {
entry := log.WithField("address", p.grpcListenAddress)
entry.Info("Starting gRPC server")
if err := p.grpcServer.Serve(p.grpcListenAddress); err != nil {
entry.WithError(err).Error("gRPC server was not shut down cleanly")
}
entry.Info("Stopped gRPC server")
}
// Try to perform a graceful stop of the gRPC server. If it takes more than
// 10 seconds, timeout and force-stop.
func (p *Proxy) gRPCStop() {
if p.grpcServer == nil {
return
}
done := make(chan struct{})
go func() {
p.grpcServer.GracefulStop()
close(done)
}()
select {
case <-done:
return
case <-time.After(10 * time.Second):
log.Info("Force-stopping the gRPC server after waiting for a graceful shutdown")
p.grpcServer.Stop()
}
}
// RefreshDestinations updates the server's list of valid destinations
// for flushing. This should be called periodically to ensure we have
// the latest data.
func (p *Proxy) RefreshDestinations(serviceName string, ring *consistent.Consistent, mtx *sync.Mutex) {
samples := &ssf.Samples{}
defer metrics.Report(p.TraceClient, samples)
srvTags := map[string]string{"service": serviceName}
start := time.Now()
destinations, err := p.Discoverer.GetDestinationsForService(serviceName)
samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags))
log.WithFields(logrus.Fields{
"destinations": destinations,
"service": serviceName,
}).Debug("Got destinations")
samples.Add(ssf.Timing("discoverer.update_duration_ns", time.Since(start), time.Nanosecond, srvTags))
if err != nil || len(destinations) == 0 {
log.WithError(err).WithFields(logrus.Fields{
"service": serviceName,
"errorType": reflect.TypeOf(err),
"numDestinations": len(destinations),
}).Error("Discoverer found zero destinations and/or returned an error. Destinations may be stale!")
samples.Add(ssf.Count("discoverer.errors", 1, srvTags))
// Return since we got no hosts. We don't want to zero out the list. This
// should result in us leaving the "last good" values in the ring.
return
}
mtx.Lock()
ring.Set(destinations)
mtx.Unlock()
samples.Add(ssf.Gauge("discoverer.destination_number", float32(len(destinations)), srvTags))
}
// Handler returns the Handler responsible for routing request processing.
func (p *Proxy) Handler() http.Handler {
mux := goji.NewMux()
mux.HandleFuncC(pat.Get("/healthcheck"), func(c context.Context, w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok\n"))
})
mux.Handle(pat.Post("/import"), handleProxy(p))
mux.Handle(pat.Get("/debug/pprof/cmdline"), http.HandlerFunc(pprof.Cmdline))
mux.Handle(pat.Get("/debug/pprof/profile"), http.HandlerFunc(pprof.Profile))
mux.Handle(pat.Get("/debug/pprof/symbol"), http.HandlerFunc(pprof.Symbol))
mux.Handle(pat.Get("/debug/pprof/trace"), http.HandlerFunc(pprof.Trace))
// TODO match without trailing slash as well
mux.Handle(pat.Get("/debug/pprof/*"), http.HandlerFunc(pprof.Index))
return mux
}
func (p *Proxy) ProxyTraces(ctx context.Context, traces []DatadogTraceSpan) {
span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.proxy.proxy_traces")
defer span.ClientFinish(p.TraceClient)
if p.ForwardTimeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, p.ForwardTimeout)
defer cancel()
}
tracesByDestination := make(map[string][]*DatadogTraceSpan)
for _, h := range p.TraceDestinations.Members() {
tracesByDestination[h] = make([]*DatadogTraceSpan, 0)
}
for _, t := range traces {
dest, _ := p.TraceDestinations.Get(strconv.FormatInt(t.TraceID, 10))
tracesByDestination[dest] = append(tracesByDestination[dest], &t)
}
for dest, batch := range tracesByDestination {
if len(batch) != 0 {
// this endpoint is not documented to take an array... but it does
// another curious constraint of this endpoint is that it does not
// support "Content-Encoding: deflate"
err := vhttp.PostHelper(span.Attach(ctx), p.HTTPClient, p.TraceClient, http.MethodPost, fmt.Sprintf("%s/spans", dest), batch, "flush_traces", false, nil, log)
if err == nil {
log.WithFields(logrus.Fields{
"traces": len(batch),
"destination": dest,
}).Debug("Completed flushing traces to Datadog")
} else {
log.WithFields(
logrus.Fields{
"traces": len(batch),
logrus.ErrorKey: err}).Error("Error flushing traces to Datadog")
}
} else {
log.WithField("destination", dest).Info("No traces to flush, skipping.")
}
}
}
// ProxyMetrics takes a slice of JSONMetrics and breaks them up into
// multiple HTTP requests by MetricKey using the hash ring.
func (p *Proxy) ProxyMetrics(ctx context.Context, jsonMetrics []samplers.JSONMetric, origin string) {
span, _ := trace.StartSpanFromContext(ctx, "veneur.opentracing.proxy.proxy_metrics")
defer span.ClientFinish(p.TraceClient)
if p.ForwardTimeout > 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, p.ForwardTimeout)
defer cancel()
}
metricCount := len(jsonMetrics)
span.Add(ssf.RandomlySample(0.1,
ssf.Count("import.metrics_total", float32(metricCount), map[string]string{
"remote_addr": origin,
"veneurglobalonly": "",
}),
)...)
jsonMetricsByDestination := make(map[string][]samplers.JSONMetric)
for _, h := range p.ForwardDestinations.Members() {
jsonMetricsByDestination[h] = make([]samplers.JSONMetric, 0)
}
for _, jm := range jsonMetrics {
dest, _ := p.ForwardDestinations.Get(jm.MetricKey.String())
jsonMetricsByDestination[dest] = append(jsonMetricsByDestination[dest], jm)
}
// nb The response has already been returned at this point, because we
wg := sync.WaitGroup{}
wg.Add(len(jsonMetricsByDestination)) // Make our waitgroup the size of our destinations
for dest, batch := range jsonMetricsByDestination {
go p.doPost(ctx, &wg, dest, batch)
}
wg.Wait() // Wait for all the above goroutines to complete
log.WithField("count", metricCount).Debug("Completed forward")
span.Add(ssf.RandomlySample(0.1,
ssf.Timing("proxy.duration_ns", time.Since(span.Start), time.Nanosecond, nil),
ssf.Count("proxy.proxied_metrics_total", float32(len(jsonMetrics)), nil),
)...)
}
func (p *Proxy) doPost(ctx context.Context, wg *sync.WaitGroup, destination string, batch []samplers.JSONMetric) {
defer wg.Done()
samples := &ssf.Samples{}
defer metrics.Report(p.TraceClient, samples)
batchSize := len(batch)
if batchSize < 1 {
return
}
// Make sure the destination always has a valid 'http' prefix.
if !strings.HasPrefix(destination, "http") {
u := url.URL{Scheme: "http", Host: destination}
destination = u.String()
}
endpoint := fmt.Sprintf("%s/import", destination)
err := vhttp.PostHelper(ctx, p.HTTPClient, p.TraceClient, http.MethodPost, endpoint, batch, "forward", true, nil, log)
if err == nil {
log.WithField("metrics", batchSize).Debug("Completed forward to Veneur")
} else {
samples.Add(ssf.Count("forward.error_total", 1, map[string]string{"cause": "post"}))
log.WithError(err).WithFields(logrus.Fields{
"endpoint": endpoint,
"batchSize": batchSize,
}).Warn("Failed to POST metrics to destination")
}
samples.Add(ssf.RandomlySample(0.1,
ssf.Count("metrics_by_destination", float32(batchSize), map[string]string{"destination": destination, "protocol": "http"}),
)...)
}
func (p *Proxy) | ReportRuntimeMetrics | identifier_name |
|
proxy.go | //TODO don't overload this
if conf.ConsulForwardServiceName != "" {
p.AcceptingForwards = true
}
}
p.ForwardDestinations = consistent.New()
p.TraceDestinations = consistent.New()
p.ForwardGRPCDestinations = consistent.New()
if conf.ForwardTimeout != "" {
p.ForwardTimeout, err = time.ParseDuration(conf.ForwardTimeout)
if err != nil {
logger.WithError(err).
WithField("value", conf.ForwardTimeout).
Error("Could not parse forward timeout")
return
}
}
// We got a static forward address, stick it in the destination!
if p.ConsulForwardService == "" && conf.ForwardAddress != "" {
p.ForwardDestinations.Add(conf.ForwardAddress)
}
if p.ConsulTraceService == "" && conf.TraceAddress != "" {
p.TraceDestinations.Add(conf.TraceAddress)
}
if p.ConsulForwardGRPCService == "" && conf.GrpcForwardAddress != "" {
p.ForwardGRPCDestinations.Add(conf.GrpcForwardAddress)
}
if !p.AcceptingForwards && !p.AcceptingTraces && !p.AcceptingGRPCForwards {
err = errors.New("refusing to start with no Consul service names or static addresses in config")
logger.WithError(err).WithFields(logrus.Fields{
"consul_forward_service_name": p.ConsulForwardService,
"consul_trace_service_name": p.ConsulTraceService,
"consul_forward_grpc_service_name": p.ConsulForwardGRPCService,
"forward_address": conf.ForwardAddress,
"trace_address": conf.TraceAddress,
}).Error("Oops")
return
}
if p.usingConsul {
p.ConsulInterval, err = time.ParseDuration(conf.ConsulRefreshInterval)
if err != nil {
logger.WithError(err).Error("Error parsing Consul refresh interval")
return
}
logger.WithField("interval", conf.ConsulRefreshInterval).Info("Will use Consul for service discovery")
}
p.MetricsInterval = time.Second * 10
if conf.RuntimeMetricsInterval != "" {
p.MetricsInterval, err = time.ParseDuration(conf.RuntimeMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing metric refresh interval")
return
}
}
p.TraceClient = trace.DefaultClient
if conf.SsfDestinationAddress != "" {
stats, err := statsd.NewBuffered(conf.StatsAddress, 4096)
if err != nil {
return p, err
}
stats.Namespace = "veneur_proxy."
format := "ssf_format:packet"
if strings.HasPrefix(conf.SsfDestinationAddress, "unix://") {
format = "ssf_format:framed"
}
traceFlushInterval, err := time.ParseDuration(conf.TracingClientFlushInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing flush interval")
return p, err
}
traceMetricsInterval, err := time.ParseDuration(conf.TracingClientMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing metrics interval")
return p, err
}
p.TraceClient, err = trace.NewClient(conf.SsfDestinationAddress,
trace.Buffered,
trace.Capacity(uint(conf.TracingClientCapacity)),
trace.FlushInterval(traceFlushInterval),
trace.ReportStatistics(stats, traceMetricsInterval, []string{format}),
)
if err != nil {
logger.WithField("ssf_destination_address", conf.SsfDestinationAddress).
WithError(err).
Fatal("Error using SSF destination address")
}
}
if conf.GrpcAddress != "" {
p.grpcListenAddress = conf.GrpcAddress
p.grpcServer, err = proxysrv.New(p.ForwardGRPCDestinations,
proxysrv.WithForwardTimeout(p.ForwardTimeout),
proxysrv.WithLog(logrus.NewEntry(log)),
proxysrv.WithTraceClient(p.TraceClient),
)
if err != nil {
logger.WithError(err).Fatal("Failed to initialize the gRPC server")
}
}
// TODO Size of replicas in config?
//ret.ForwardDestinations.NumberOfReplicas = ???
if conf.Debug {
logger.SetLevel(logrus.DebugLevel)
}
logger.WithField("config", conf).Debug("Initialized server")
return
}
// Start fires up the various goroutines that run on behalf of the server.
// This is separated from the constructor for testing convenience.
func (p *Proxy) Start() {
log.WithField("version", VERSION).Info("Starting server")
config := api.DefaultConfig()
// Use the same HTTP Client we're using for other things, so we can leverage
// it for testing.
config.HttpClient = p.HTTPClient
if p.usingKubernetes {
disc, err := NewKubernetesDiscoverer()
if err != nil {
log.WithError(err).Error("Error creating KubernetesDiscoverer")
return
}
p.Discoverer = disc
log.Info("Set Kubernetes discoverer")
} else if p.usingConsul {
disc, consulErr := NewConsul(config)
if consulErr != nil {
log.WithError(consulErr).Error("Error creating Consul discoverer")
return
}
p.Discoverer = disc
log.Info("Set Consul discoverer")
}
if p.AcceptingForwards && p.ConsulForwardService != "" {
p.RefreshDestinations(p.ConsulForwardService, p.ForwardDestinations, &p.ForwardDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardService).Fatal("Refusing to start with zero destinations for forwarding.")
}
}
if p.AcceptingTraces && p.ConsulTraceService != "" {
p.RefreshDestinations(p.ConsulTraceService, p.TraceDestinations, &p.TraceDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulTraceService).Fatal("Refusing to start with zero destinations for tracing.")
}
}
if p.AcceptingGRPCForwards && p.ConsulForwardGRPCService != "" {
p.RefreshDestinations(p.ConsulForwardGRPCService, p.ForwardGRPCDestinations, &p.ForwardGRPCDestinationsMtx)
if len(p.ForwardGRPCDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardGRPCService).Fatal("Refusing to start with zero destinations for forwarding over gRPC.")
}
p.grpcServer.SetDestinations(p.ForwardGRPCDestinations)
}
if p.usingConsul || p.usingKubernetes {
log.Info("Creating service discovery goroutine")
go func() {
defer func() {
ConsumePanic(p.Sentry, p.TraceClient, p.Hostname, recover())
}()
ticker := time.NewTicker(p.ConsulInterval)
for range ticker.C {
log.WithFields(logrus.Fields{
"acceptingForwards": p.AcceptingForwards,
"consulForwardService": p.ConsulForwardService,
"consulTraceService": p.ConsulTraceService,
"consulForwardGRPCService": p.ConsulForwardGRPCService,
}).Debug("About to refresh destinations")
if p.AcceptingForwards && p.ConsulForwardService != "" |
if p.AcceptingTraces && p.ConsulTraceService != "" {
p.RefreshDestinations(p.ConsulTraceService, p.TraceDestinations, &p.TraceDestinationsMtx)
}
if p.AcceptingGRPCForwards && p.ConsulForwardGRPCService != "" {
p.RefreshDestinations(p.ConsulForwardGRPCService, p.ForwardGRPCDestinations, &p.ForwardGRPCDestinationsMtx)
p.grpcServer.SetDestinations(p.ForwardGRPCDestinations)
}
}
}()
}
go func() {
hostname, _ := os.Hostname()
defer func() {
ConsumePanic(p.Sentry, p.TraceClient, hostname, recover())
}()
ticker := time.NewTicker(p.MetricsInterval)
for {
select {
case <-p.shutdown:
// stop flushing on graceful shutdown
ticker.Stop()
return
case <-ticker.C:
p.ReportRuntimeMetrics()
}
}
}()
}
// Start all of the the configured servers (gRPC or HTTP) and block until
// one of them exist. At that point, stop them both.
func (p *Proxy) Serve() {
done := make(chan struct{}, 2)
go func() {
p.HTTPServe()
done <- struct{}{}
}()
if p.grpcListenAddress != "" {
go func() {
p.gRPCServe()
done <- struct{}{}
}()
}
// wait until at least one of the servers has shut down
<-done
graceful.Shutdown()
p.gRPCStop()
}
// HTTPServe starts the HTTP server and listens perpetually until it encounters an unrecoverable error.
func | {
p.RefreshDestinations(p.ConsulForwardService, p.ForwardDestinations, &p.ForwardDestinationsMtx)
} | conditional_block |
proxy.go |
p.HTTPClient = &http.Client{
Transport: transport,
}
p.numListeningHTTP = new(int32)
p.enableProfiling = conf.EnableProfiling
p.ConsulForwardService = conf.ConsulForwardServiceName
p.ConsulTraceService = conf.ConsulTraceServiceName
p.ConsulForwardGRPCService = conf.ConsulForwardGrpcServiceName
if p.ConsulForwardService != "" || conf.ForwardAddress != "" {
p.AcceptingForwards = true
}
if p.ConsulTraceService != "" || conf.TraceAddress != "" {
p.AcceptingTraces = true
}
if p.ConsulForwardGRPCService != "" || conf.GrpcForwardAddress != "" {
p.AcceptingGRPCForwards = true
}
// We need a convenient way to know if we're even using Consul later
if p.ConsulForwardService != "" || p.ConsulTraceService != "" || p.ConsulForwardGRPCService != "" {
log.WithFields(logrus.Fields{
"consulForwardService": p.ConsulForwardService,
"consulTraceService": p.ConsulTraceService,
"consulGRPCForwardService": p.ConsulForwardGRPCService,
}).Info("Using consul for service discovery")
p.usingConsul = true
}
// check if we are running on Kubernetes
if _, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount"); !os.IsNotExist(err) {
log.Info("Using Kubernetes for service discovery")
p.usingKubernetes = true
//TODO don't overload this
if conf.ConsulForwardServiceName != "" {
p.AcceptingForwards = true
}
}
p.ForwardDestinations = consistent.New()
p.TraceDestinations = consistent.New()
p.ForwardGRPCDestinations = consistent.New()
if conf.ForwardTimeout != "" {
p.ForwardTimeout, err = time.ParseDuration(conf.ForwardTimeout)
if err != nil {
logger.WithError(err).
WithField("value", conf.ForwardTimeout).
Error("Could not parse forward timeout")
return
}
}
// We got a static forward address, stick it in the destination!
if p.ConsulForwardService == "" && conf.ForwardAddress != "" {
p.ForwardDestinations.Add(conf.ForwardAddress)
}
if p.ConsulTraceService == "" && conf.TraceAddress != "" {
p.TraceDestinations.Add(conf.TraceAddress)
}
if p.ConsulForwardGRPCService == "" && conf.GrpcForwardAddress != "" {
p.ForwardGRPCDestinations.Add(conf.GrpcForwardAddress)
}
if !p.AcceptingForwards && !p.AcceptingTraces && !p.AcceptingGRPCForwards {
err = errors.New("refusing to start with no Consul service names or static addresses in config")
logger.WithError(err).WithFields(logrus.Fields{
"consul_forward_service_name": p.ConsulForwardService,
"consul_trace_service_name": p.ConsulTraceService,
"consul_forward_grpc_service_name": p.ConsulForwardGRPCService,
"forward_address": conf.ForwardAddress,
"trace_address": conf.TraceAddress,
}).Error("Oops")
return
}
if p.usingConsul {
p.ConsulInterval, err = time.ParseDuration(conf.ConsulRefreshInterval)
if err != nil {
logger.WithError(err).Error("Error parsing Consul refresh interval")
return
}
logger.WithField("interval", conf.ConsulRefreshInterval).Info("Will use Consul for service discovery")
}
p.MetricsInterval = time.Second * 10
if conf.RuntimeMetricsInterval != "" {
p.MetricsInterval, err = time.ParseDuration(conf.RuntimeMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing metric refresh interval")
return
}
}
p.TraceClient = trace.DefaultClient
if conf.SsfDestinationAddress != "" {
stats, err := statsd.NewBuffered(conf.StatsAddress, 4096)
if err != nil {
return p, err
}
stats.Namespace = "veneur_proxy."
format := "ssf_format:packet"
if strings.HasPrefix(conf.SsfDestinationAddress, "unix://") {
format = "ssf_format:framed"
}
traceFlushInterval, err := time.ParseDuration(conf.TracingClientFlushInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing flush interval")
return p, err
}
traceMetricsInterval, err := time.ParseDuration(conf.TracingClientMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing metrics interval")
return p, err
}
p.TraceClient, err = trace.NewClient(conf.SsfDestinationAddress,
trace.Buffered,
trace.Capacity(uint(conf.TracingClientCapacity)),
trace.FlushInterval(traceFlushInterval),
trace.ReportStatistics(stats, traceMetricsInterval, []string{format}),
)
if err != nil {
logger.WithField("ssf_destination_address", conf.SsfDestinationAddress).
WithError(err).
Fatal("Error using SSF destination address")
}
}
if conf.GrpcAddress != "" {
p.grpcListenAddress = conf.GrpcAddress
p.grpcServer, err = proxysrv.New(p.ForwardGRPCDestinations,
proxysrv.WithForwardTimeout(p.ForwardTimeout),
proxysrv.WithLog(logrus.NewEntry(log)),
proxysrv.WithTraceClient(p.TraceClient),
)
if err != nil {
logger.WithError(err).Fatal("Failed to initialize the gRPC server")
}
}
// TODO Size of replicas in config?
//ret.ForwardDestinations.NumberOfReplicas = ???
if conf.Debug {
logger.SetLevel(logrus.DebugLevel)
}
logger.WithField("config", conf).Debug("Initialized server")
return
}
// Start fires up the various goroutines that run on behalf of the server.
// This is separated from the constructor for testing convenience.
func (p *Proxy) Start() {
log.WithField("version", VERSION).Info("Starting server")
config := api.DefaultConfig()
// Use the same HTTP Client we're using for other things, so we can leverage
// it for testing.
config.HttpClient = p.HTTPClient
if p.usingKubernetes {
disc, err := NewKubernetesDiscoverer()
if err != nil {
log.WithError(err).Error("Error creating KubernetesDiscoverer")
return
}
p.Discoverer = disc
log.Info("Set Kubernetes discoverer")
} else if p.usingConsul {
disc, consulErr := NewConsul(config)
if consulErr != nil {
log.WithError(consulErr).Error("Error creating Consul discoverer")
return
}
p.Discoverer = disc
log.Info("Set Consul discoverer")
}
if p.AcceptingForwards && p.ConsulForwardService != "" {
p.RefreshDestinations(p.ConsulForwardService, p.ForwardDestinations, &p.ForwardDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardService).Fatal("Refusing to start with zero destinations for forwarding.")
}
}
if p.AcceptingTraces && p.ConsulTraceService != "" {
p.RefreshDestinations(p.ConsulTraceService, p.TraceDestinations, &p.TraceDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulTraceService).Fatal("Refusing to start with zero destinations for tracing.")
}
}
if p.AcceptingGRPCForwards && p.ConsulForwardGRPCService != "" {
p.RefreshDestinations(p.ConsulForwardGRPCService, p.ForwardGRPCDestinations, &p.ForwardGRPCDestinationsMtx)
if len(p.ForwardGRPCDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardGRPCService).Fatal("Refusing to start with zero destinations for forwarding over gRPC.")
}
p.grpcServer.SetDestinations(p.ForwardGRPCDestinations)
}
if p.usingConsul || p.usingKubernetes {
log.Info("Creating service discovery goroutine")
go func() {
defer func() {
ConsumePanic(p.Sentry, p.TraceClient, p.Hostname, recover())
}()
ticker := time.NewTicker(p.ConsulInterval)
for range ticker.C {
log.WithFields(logrus.Fields{
"acceptingForwards": p.AcceptingForwards,
"consulForwardService": p.ConsulForwardService,
"consulTraceService": p.ConsulTraceService,
"consulForwardGRPCService": p.ConsulForwardGRPCService,
}).Debug("About to refresh destinations")
if p.AcceptingForwards && p.ConsulForwardService != "" {
p.RefreshDestinations(p.ConsulForwardService, p | // zero values as of Go 0.10.3
MaxIdleConns: conf.MaxIdleConns,
MaxIdleConnsPerHost: conf.MaxIdleConnsPerHost,
} | random_line_split |
|
proxy.go | //TODO don't overload this
if conf.ConsulForwardServiceName != "" {
p.AcceptingForwards = true
}
}
p.ForwardDestinations = consistent.New()
p.TraceDestinations = consistent.New()
p.ForwardGRPCDestinations = consistent.New()
if conf.ForwardTimeout != "" {
p.ForwardTimeout, err = time.ParseDuration(conf.ForwardTimeout)
if err != nil {
logger.WithError(err).
WithField("value", conf.ForwardTimeout).
Error("Could not parse forward timeout")
return
}
}
// We got a static forward address, stick it in the destination!
if p.ConsulForwardService == "" && conf.ForwardAddress != "" {
p.ForwardDestinations.Add(conf.ForwardAddress)
}
if p.ConsulTraceService == "" && conf.TraceAddress != "" {
p.TraceDestinations.Add(conf.TraceAddress)
}
if p.ConsulForwardGRPCService == "" && conf.GrpcForwardAddress != "" {
p.ForwardGRPCDestinations.Add(conf.GrpcForwardAddress)
}
if !p.AcceptingForwards && !p.AcceptingTraces && !p.AcceptingGRPCForwards {
err = errors.New("refusing to start with no Consul service names or static addresses in config")
logger.WithError(err).WithFields(logrus.Fields{
"consul_forward_service_name": p.ConsulForwardService,
"consul_trace_service_name": p.ConsulTraceService,
"consul_forward_grpc_service_name": p.ConsulForwardGRPCService,
"forward_address": conf.ForwardAddress,
"trace_address": conf.TraceAddress,
}).Error("Oops")
return
}
if p.usingConsul {
p.ConsulInterval, err = time.ParseDuration(conf.ConsulRefreshInterval)
if err != nil {
logger.WithError(err).Error("Error parsing Consul refresh interval")
return
}
logger.WithField("interval", conf.ConsulRefreshInterval).Info("Will use Consul for service discovery")
}
p.MetricsInterval = time.Second * 10
if conf.RuntimeMetricsInterval != "" {
p.MetricsInterval, err = time.ParseDuration(conf.RuntimeMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing metric refresh interval")
return
}
}
p.TraceClient = trace.DefaultClient
if conf.SsfDestinationAddress != "" {
stats, err := statsd.NewBuffered(conf.StatsAddress, 4096)
if err != nil {
return p, err
}
stats.Namespace = "veneur_proxy."
format := "ssf_format:packet"
if strings.HasPrefix(conf.SsfDestinationAddress, "unix://") {
format = "ssf_format:framed"
}
traceFlushInterval, err := time.ParseDuration(conf.TracingClientFlushInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing flush interval")
return p, err
}
traceMetricsInterval, err := time.ParseDuration(conf.TracingClientMetricsInterval)
if err != nil {
logger.WithError(err).Error("Error parsing tracing metrics interval")
return p, err
}
p.TraceClient, err = trace.NewClient(conf.SsfDestinationAddress,
trace.Buffered,
trace.Capacity(uint(conf.TracingClientCapacity)),
trace.FlushInterval(traceFlushInterval),
trace.ReportStatistics(stats, traceMetricsInterval, []string{format}),
)
if err != nil {
logger.WithField("ssf_destination_address", conf.SsfDestinationAddress).
WithError(err).
Fatal("Error using SSF destination address")
}
}
if conf.GrpcAddress != "" {
p.grpcListenAddress = conf.GrpcAddress
p.grpcServer, err = proxysrv.New(p.ForwardGRPCDestinations,
proxysrv.WithForwardTimeout(p.ForwardTimeout),
proxysrv.WithLog(logrus.NewEntry(log)),
proxysrv.WithTraceClient(p.TraceClient),
)
if err != nil {
logger.WithError(err).Fatal("Failed to initialize the gRPC server")
}
}
// TODO Size of replicas in config?
//ret.ForwardDestinations.NumberOfReplicas = ???
if conf.Debug {
logger.SetLevel(logrus.DebugLevel)
}
logger.WithField("config", conf).Debug("Initialized server")
return
}
// Start fires up the various goroutines that run on behalf of the server.
// This is separated from the constructor for testing convenience.
func (p *Proxy) Start() {
log.WithField("version", VERSION).Info("Starting server")
config := api.DefaultConfig()
// Use the same HTTP Client we're using for other things, so we can leverage
// it for testing.
config.HttpClient = p.HTTPClient
if p.usingKubernetes {
disc, err := NewKubernetesDiscoverer()
if err != nil {
log.WithError(err).Error("Error creating KubernetesDiscoverer")
return
}
p.Discoverer = disc
log.Info("Set Kubernetes discoverer")
} else if p.usingConsul {
disc, consulErr := NewConsul(config)
if consulErr != nil {
log.WithError(consulErr).Error("Error creating Consul discoverer")
return
}
p.Discoverer = disc
log.Info("Set Consul discoverer")
}
if p.AcceptingForwards && p.ConsulForwardService != "" {
p.RefreshDestinations(p.ConsulForwardService, p.ForwardDestinations, &p.ForwardDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardService).Fatal("Refusing to start with zero destinations for forwarding.")
}
}
if p.AcceptingTraces && p.ConsulTraceService != "" {
p.RefreshDestinations(p.ConsulTraceService, p.TraceDestinations, &p.TraceDestinationsMtx)
if len(p.ForwardDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulTraceService).Fatal("Refusing to start with zero destinations for tracing.")
}
}
if p.AcceptingGRPCForwards && p.ConsulForwardGRPCService != "" {
p.RefreshDestinations(p.ConsulForwardGRPCService, p.ForwardGRPCDestinations, &p.ForwardGRPCDestinationsMtx)
if len(p.ForwardGRPCDestinations.Members()) == 0 {
log.WithField("serviceName", p.ConsulForwardGRPCService).Fatal("Refusing to start with zero destinations for forwarding over gRPC.")
}
p.grpcServer.SetDestinations(p.ForwardGRPCDestinations)
}
if p.usingConsul || p.usingKubernetes {
log.Info("Creating service discovery goroutine")
go func() {
defer func() {
ConsumePanic(p.Sentry, p.TraceClient, p.Hostname, recover())
}()
ticker := time.NewTicker(p.ConsulInterval)
for range ticker.C {
log.WithFields(logrus.Fields{
"acceptingForwards": p.AcceptingForwards,
"consulForwardService": p.ConsulForwardService,
"consulTraceService": p.ConsulTraceService,
"consulForwardGRPCService": p.ConsulForwardGRPCService,
}).Debug("About to refresh destinations")
if p.AcceptingForwards && p.ConsulForwardService != "" {
p.RefreshDestinations(p.ConsulForwardService, p.ForwardDestinations, &p.ForwardDestinationsMtx)
}
if p.AcceptingTraces && p.ConsulTraceService != "" {
p.RefreshDestinations(p.ConsulTraceService, p.TraceDestinations, &p.TraceDestinationsMtx)
}
if p.AcceptingGRPCForwards && p.ConsulForwardGRPCService != "" {
p.RefreshDestinations(p.ConsulForwardGRPCService, p.ForwardGRPCDestinations, &p.ForwardGRPCDestinationsMtx)
p.grpcServer.SetDestinations(p.ForwardGRPCDestinations)
}
}
}()
}
go func() {
hostname, _ := os.Hostname()
defer func() {
ConsumePanic(p.Sentry, p.TraceClient, hostname, recover())
}()
ticker := time.NewTicker(p.MetricsInterval)
for {
select {
case <-p.shutdown:
// stop flushing on graceful shutdown
ticker.Stop()
return
case <-ticker.C:
p.ReportRuntimeMetrics()
}
}
}()
}
// Start all of the the configured servers (gRPC or HTTP) and block until
// one of them exist. At that point, stop them both.
func (p *Proxy) Serve() |
// HTTPServe starts the HTTP server and listens perpetually until it encounters an unrecoverable error.
func | {
done := make(chan struct{}, 2)
go func() {
p.HTTPServe()
done <- struct{}{}
}()
if p.grpcListenAddress != "" {
go func() {
p.gRPCServe()
done <- struct{}{}
}()
}
// wait until at least one of the servers has shut down
<-done
graceful.Shutdown()
p.gRPCStop()
} | identifier_body |
args_info.rs | input);
// Based on the type generate the appropriate code.
let mut output_tokens = match &input.data {
syn::Data::Struct(ds) => {
impl_arg_info_struct(errors, &input.ident, type_attrs, &input.generics, ds)
}
syn::Data::Enum(de) => {
impl_arg_info_enum(errors, &input.ident, type_attrs, &input.generics, de)
}
syn::Data::Union(_) => {
errors.err(input, "`#[derive(ArgsInfo)]` cannot be applied to unions");
TokenStream::new()
}
};
errors.to_tokens(&mut output_tokens);
output_tokens
}
/// Implement the ArgsInfo trait for a struct annotated with argh attributes.
fn impl_arg_info_struct(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
ds: &syn::DataStruct,
) -> TokenStream {
// Collect the fields, skipping fields that are not supported.
let fields = match &ds.fields {
syn::Fields::Named(fields) => fields,
syn::Fields::Unnamed(_) => {
errors.err(
&ds.struct_token,
"`#![derive(ArgsInfo)]` is not currently supported on tuple structs",
);
return TokenStream::new();
}
syn::Fields::Unit => {
errors.err(&ds.struct_token, "#![derive(ArgsInfo)]` cannot be applied to unit structs");
return TokenStream::new();
}
};
// Map the fields into StructField objects.
let fields: Vec<_> = fields
.named
.iter()
.filter_map(|field| {
let attrs = FieldAttrs::parse(errors, field);
StructField::new(errors, field, attrs)
})
.collect();
let impl_span = Span::call_site();
// Generate the implementation of `get_args_info()` for this struct.
let args_info = impl_args_info_data(name, errors, type_attrs, &fields);
// Split out the generics info for the impl declaration.
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote_spanned! { impl_span =>
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
#args_info
}
}
}
}
/// Implement ArgsInfo for an enum. The enum is a collection of subcommands.
fn impl_arg_info_enum(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
de: &syn::DataEnum,
) -> TokenStream {
// Validate the enum is OK for argh.
check_enum_type_attrs(errors, type_attrs, &de.enum_token.span);
// Ensure that `#[argh(subcommand)]` is present.
if type_attrs.is_subcommand.is_none() {
errors.err_span(
de.enum_token.span,
concat!(
"`#![derive(ArgsInfo)]` on `enum`s can only be used to enumerate subcommands.\n",
"Consider adding `#[argh(subcommand)]` to the `enum` declaration.",
),
);
}
// One of the variants can be annotated as providing dynamic subcommands.
// We treat this differently since we need to call a function at runtime
// to determine the subcommands provided.
let mut dynamic_type_and_variant = None;
// An enum variant like `<name>(<ty>)`. This is used to collect
// the type of the variant for each subcommand.
struct ArgInfoVariant<'a> {
ty: &'a syn::Type,
}
let variants: Vec<ArgInfoVariant<'_>> = de
.variants
.iter()
.filter_map(|variant| {
let name = &variant.ident;
let ty = enum_only_single_field_unnamed_variants(errors, &variant.fields)?;
if VariantAttrs::parse(errors, variant).is_dynamic.is_some() {
if dynamic_type_and_variant.is_some() {
errors.err(variant, "Only one variant can have the `dynamic` attribute");
}
dynamic_type_and_variant = Some((ty, name));
None
} else {
Some(ArgInfoVariant { ty })
}
})
.collect();
let dynamic_subcommands = if let Some((dynamic_type, _)) = dynamic_type_and_variant {
quote! {
<#dynamic_type as argh::DynamicSubCommand>::commands().iter()
.map(|s|
SubCommandInfo {
name: s.name,
command: CommandInfoWithArgs {
name: s.name,
description: s.description,
..Default::default()
}
}).collect()
}
} else {
quote! { vec![]}
};
let variant_ty_info = variants.iter().map(|t| {
let ty = t.ty;
quote!(
argh::SubCommandInfo {
name: #ty::get_args_info().name,
command: #ty::get_args_info()
}
)
});
let cmd_name = if let Some(id) = &type_attrs.name {
id.clone()
} else {
LitStr::new("", Span::call_site())
};
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote! {
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
let mut the_subcommands = vec![#(#variant_ty_info),*];
let mut dynamic_commands = #dynamic_subcommands;
the_subcommands.append(&mut dynamic_commands);
argh::CommandInfoWithArgs {
name: #cmd_name, | commands: the_subcommands,
..Default::default()
}
} // end of get_args_ifo
} // end of impl ArgsInfo
}
}
fn impl_args_info_data<'a>(
name: &proc_macro2::Ident,
errors: &Errors,
type_attrs: &TypeAttrs,
fields: &'a [StructField<'a>],
) -> TokenStream {
let mut subcommands_iter =
fields.iter().filter(|field| field.kind == FieldKind::SubCommand).fuse();
let subcommand: Option<&StructField<'_>> = subcommands_iter.next();
for dup_subcommand in subcommands_iter {
errors.duplicate_attrs("subcommand", subcommand.unwrap().field, dup_subcommand.field);
}
let impl_span = Span::call_site();
let mut positionals = vec![];
let mut flags = vec![];
// Add the implicit --help flag
flags.push(quote! {
argh::FlagInfo {
short: None,
long: "--help",
description: "display usage information",
optionality: argh::Optionality::Optional,
kind: argh::FlagInfoKind::Switch,
hidden: false
}
});
for field in fields {
let optionality = match field.optionality {
Optionality::None => quote! { argh::Optionality::Required },
Optionality::Defaulted(_) => quote! { argh::Optionality::Optional },
Optionality::Optional => quote! { argh::Optionality::Optional },
Optionality::Repeating if field.attrs.greedy.is_some() => {
quote! { argh::Optionality::Greedy }
}
Optionality::Repeating => quote! { argh::Optionality::Repeating },
};
match field.kind {
FieldKind::Positional => {
let name = field.positional_arg_name();
let description = if let Some(desc) = &field.attrs.description {
desc.content.value().trim().to_owned()
} else {
String::new()
};
let hidden = field.attrs.hidden_help;
positionals.push(quote! {
argh::PositionalInfo {
name: #name,
description: #description,
optionality: #optionality,
hidden: #hidden,
}
});
}
FieldKind::Switch | FieldKind::Option => {
let short = if let Some(short) = &field.attrs.short {
quote! { Some(#short) }
} else {
quote! { None }
};
let long = field.long_name.as_ref().expect("missing long name for option");
let description = require_description(
errors,
field.name.span(),
&field.attrs.description,
"field",
);
let kind = if field.kind == FieldKind::Switch {
quote! {
argh::FlagInfoKind::Switch
}
} else {
let arg_name = if let Some(arg_name) = &field.attrs.arg_name {
quote! { #arg_name }
} else {
let arg_name = long.trim_start_matches("--");
quote! { #arg_name }
};
quote! {
argh::FlagInfoKind::Option {
arg_name: #arg_name,
}
}
};
let hidden = field.attrs.hidden_help;
flags.push(quote! {
argh::FlagInfo {
short: #short,
| /// A short description of the command's functionality.
description: " enum of subcommands", | random_line_split |
args_info.rs | input);
// Based on the type generate the appropriate code.
let mut output_tokens = match &input.data {
syn::Data::Struct(ds) => {
impl_arg_info_struct(errors, &input.ident, type_attrs, &input.generics, ds)
}
syn::Data::Enum(de) => {
impl_arg_info_enum(errors, &input.ident, type_attrs, &input.generics, de)
}
syn::Data::Union(_) => {
errors.err(input, "`#[derive(ArgsInfo)]` cannot be applied to unions");
TokenStream::new()
}
};
errors.to_tokens(&mut output_tokens);
output_tokens
}
/// Implement the ArgsInfo trait for a struct annotated with argh attributes.
fn impl_arg_info_struct(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
ds: &syn::DataStruct,
) -> TokenStream {
// Collect the fields, skipping fields that are not supported.
let fields = match &ds.fields {
syn::Fields::Named(fields) => fields,
syn::Fields::Unnamed(_) => {
errors.err(
&ds.struct_token,
"`#![derive(ArgsInfo)]` is not currently supported on tuple structs",
);
return TokenStream::new();
}
syn::Fields::Unit => {
errors.err(&ds.struct_token, "#![derive(ArgsInfo)]` cannot be applied to unit structs");
return TokenStream::new();
}
};
// Map the fields into StructField objects.
let fields: Vec<_> = fields
.named
.iter()
.filter_map(|field| {
let attrs = FieldAttrs::parse(errors, field);
StructField::new(errors, field, attrs)
})
.collect();
let impl_span = Span::call_site();
// Generate the implementation of `get_args_info()` for this struct.
let args_info = impl_args_info_data(name, errors, type_attrs, &fields);
// Split out the generics info for the impl declaration.
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote_spanned! { impl_span =>
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
#args_info
}
}
}
}
/// Implement ArgsInfo for an enum. The enum is a collection of subcommands.
fn impl_arg_info_enum(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
de: &syn::DataEnum,
) -> TokenStream | // An enum variant like `<name>(<ty>)`. This is used to collect
// the type of the variant for each subcommand.
struct ArgInfoVariant<'a> {
ty: &'a syn::Type,
}
let variants: Vec<ArgInfoVariant<'_>> = de
.variants
.iter()
.filter_map(|variant| {
let name = &variant.ident;
let ty = enum_only_single_field_unnamed_variants(errors, &variant.fields)?;
if VariantAttrs::parse(errors, variant).is_dynamic.is_some() {
if dynamic_type_and_variant.is_some() {
errors.err(variant, "Only one variant can have the `dynamic` attribute");
}
dynamic_type_and_variant = Some((ty, name));
None
} else {
Some(ArgInfoVariant { ty })
}
})
.collect();
let dynamic_subcommands = if let Some((dynamic_type, _)) = dynamic_type_and_variant {
quote! {
<#dynamic_type as argh::DynamicSubCommand>::commands().iter()
.map(|s|
SubCommandInfo {
name: s.name,
command: CommandInfoWithArgs {
name: s.name,
description: s.description,
..Default::default()
}
}).collect()
}
} else {
quote! { vec![]}
};
let variant_ty_info = variants.iter().map(|t| {
let ty = t.ty;
quote!(
argh::SubCommandInfo {
name: #ty::get_args_info().name,
command: #ty::get_args_info()
}
)
});
let cmd_name = if let Some(id) = &type_attrs.name {
id.clone()
} else {
LitStr::new("", Span::call_site())
};
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote! {
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
let mut the_subcommands = vec![#(#variant_ty_info),*];
let mut dynamic_commands = #dynamic_subcommands;
the_subcommands.append(&mut dynamic_commands);
argh::CommandInfoWithArgs {
name: #cmd_name,
/// A short description of the command's functionality.
description: " enum of subcommands",
commands: the_subcommands,
..Default::default()
}
} // end of get_args_ifo
} // end of impl ArgsInfo
}
}
fn impl_args_info_data<'a>(
name: &proc_macro2::Ident,
errors: &Errors,
type_attrs: &TypeAttrs,
fields: &'a [StructField<'a>],
) -> TokenStream {
let mut subcommands_iter =
fields.iter().filter(|field| field.kind == FieldKind::SubCommand).fuse();
let subcommand: Option<&StructField<'_>> = subcommands_iter.next();
for dup_subcommand in subcommands_iter {
errors.duplicate_attrs("subcommand", subcommand.unwrap().field, dup_subcommand.field);
}
let impl_span = Span::call_site();
let mut positionals = vec![];
let mut flags = vec![];
// Add the implicit --help flag
flags.push(quote! {
argh::FlagInfo {
short: None,
long: "--help",
description: "display usage information",
optionality: argh::Optionality::Optional,
kind: argh::FlagInfoKind::Switch,
hidden: false
}
});
for field in fields {
let optionality = match field.optionality {
Optionality::None => quote! { argh::Optionality::Required },
Optionality::Defaulted(_) => quote! { argh::Optionality::Optional },
Optionality::Optional => quote! { argh::Optionality::Optional },
Optionality::Repeating if field.attrs.greedy.is_some() => {
quote! { argh::Optionality::Greedy }
}
Optionality::Repeating => quote! { argh::Optionality::Repeating },
};
match field.kind {
FieldKind::Positional => {
let name = field.positional_arg_name();
let description = if let Some(desc) = &field.attrs.description {
desc.content.value().trim().to_owned()
} else {
String::new()
};
let hidden = field.attrs.hidden_help;
positionals.push(quote! {
argh::PositionalInfo {
name: #name,
description: #description,
optionality: #optionality,
hidden: #hidden,
}
});
}
FieldKind::Switch | FieldKind::Option => {
let short = if let Some(short) = &field.attrs.short {
quote! { Some(#short) }
} else {
quote! { None }
};
let long = field.long_name.as_ref().expect("missing long name for option");
let description = require_description(
errors,
field.name.span(),
&field.attrs.description,
"field",
);
let kind = if field.kind == FieldKind::Switch {
quote! {
argh::FlagInfoKind::Switch
}
} else {
let arg_name = if let Some(arg_name) = &field.attrs.arg_name {
quote! { #arg_name }
} else {
let arg_name = long.trim_start_matches("--");
quote! { #arg_name }
};
quote! {
argh::FlagInfoKind::Option {
arg_name: #arg_name,
}
}
};
let hidden = field.attrs.hidden_help;
flags.push(quote! {
argh::FlagInfo {
short: #short,
| {
// Validate the enum is OK for argh.
check_enum_type_attrs(errors, type_attrs, &de.enum_token.span);
// Ensure that `#[argh(subcommand)]` is present.
if type_attrs.is_subcommand.is_none() {
errors.err_span(
de.enum_token.span,
concat!(
"`#![derive(ArgsInfo)]` on `enum`s can only be used to enumerate subcommands.\n",
"Consider adding `#[argh(subcommand)]` to the `enum` declaration.",
),
);
}
// One of the variants can be annotated as providing dynamic subcommands.
// We treat this differently since we need to call a function at runtime
// to determine the subcommands provided.
let mut dynamic_type_and_variant = None;
| identifier_body |
args_info.rs | );
// Based on the type generate the appropriate code.
let mut output_tokens = match &input.data {
syn::Data::Struct(ds) => {
impl_arg_info_struct(errors, &input.ident, type_attrs, &input.generics, ds)
}
syn::Data::Enum(de) => |
syn::Data::Union(_) => {
errors.err(input, "`#[derive(ArgsInfo)]` cannot be applied to unions");
TokenStream::new()
}
};
errors.to_tokens(&mut output_tokens);
output_tokens
}
/// Implement the ArgsInfo trait for a struct annotated with argh attributes.
fn impl_arg_info_struct(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
ds: &syn::DataStruct,
) -> TokenStream {
// Collect the fields, skipping fields that are not supported.
let fields = match &ds.fields {
syn::Fields::Named(fields) => fields,
syn::Fields::Unnamed(_) => {
errors.err(
&ds.struct_token,
"`#![derive(ArgsInfo)]` is not currently supported on tuple structs",
);
return TokenStream::new();
}
syn::Fields::Unit => {
errors.err(&ds.struct_token, "#![derive(ArgsInfo)]` cannot be applied to unit structs");
return TokenStream::new();
}
};
// Map the fields into StructField objects.
let fields: Vec<_> = fields
.named
.iter()
.filter_map(|field| {
let attrs = FieldAttrs::parse(errors, field);
StructField::new(errors, field, attrs)
})
.collect();
let impl_span = Span::call_site();
// Generate the implementation of `get_args_info()` for this struct.
let args_info = impl_args_info_data(name, errors, type_attrs, &fields);
// Split out the generics info for the impl declaration.
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote_spanned! { impl_span =>
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
#args_info
}
}
}
}
/// Implement ArgsInfo for an enum. The enum is a collection of subcommands.
fn impl_arg_info_enum(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
de: &syn::DataEnum,
) -> TokenStream {
// Validate the enum is OK for argh.
check_enum_type_attrs(errors, type_attrs, &de.enum_token.span);
// Ensure that `#[argh(subcommand)]` is present.
if type_attrs.is_subcommand.is_none() {
errors.err_span(
de.enum_token.span,
concat!(
"`#![derive(ArgsInfo)]` on `enum`s can only be used to enumerate subcommands.\n",
"Consider adding `#[argh(subcommand)]` to the `enum` declaration.",
),
);
}
// One of the variants can be annotated as providing dynamic subcommands.
// We treat this differently since we need to call a function at runtime
// to determine the subcommands provided.
let mut dynamic_type_and_variant = None;
// An enum variant like `<name>(<ty>)`. This is used to collect
// the type of the variant for each subcommand.
struct ArgInfoVariant<'a> {
ty: &'a syn::Type,
}
let variants: Vec<ArgInfoVariant<'_>> = de
.variants
.iter()
.filter_map(|variant| {
let name = &variant.ident;
let ty = enum_only_single_field_unnamed_variants(errors, &variant.fields)?;
if VariantAttrs::parse(errors, variant).is_dynamic.is_some() {
if dynamic_type_and_variant.is_some() {
errors.err(variant, "Only one variant can have the `dynamic` attribute");
}
dynamic_type_and_variant = Some((ty, name));
None
} else {
Some(ArgInfoVariant { ty })
}
})
.collect();
let dynamic_subcommands = if let Some((dynamic_type, _)) = dynamic_type_and_variant {
quote! {
<#dynamic_type as argh::DynamicSubCommand>::commands().iter()
.map(|s|
SubCommandInfo {
name: s.name,
command: CommandInfoWithArgs {
name: s.name,
description: s.description,
..Default::default()
}
}).collect()
}
} else {
quote! { vec![]}
};
let variant_ty_info = variants.iter().map(|t| {
let ty = t.ty;
quote!(
argh::SubCommandInfo {
name: #ty::get_args_info().name,
command: #ty::get_args_info()
}
)
});
let cmd_name = if let Some(id) = &type_attrs.name {
id.clone()
} else {
LitStr::new("", Span::call_site())
};
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote! {
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
let mut the_subcommands = vec![#(#variant_ty_info),*];
let mut dynamic_commands = #dynamic_subcommands;
the_subcommands.append(&mut dynamic_commands);
argh::CommandInfoWithArgs {
name: #cmd_name,
/// A short description of the command's functionality.
description: " enum of subcommands",
commands: the_subcommands,
..Default::default()
}
} // end of get_args_ifo
} // end of impl ArgsInfo
}
}
fn impl_args_info_data<'a>(
name: &proc_macro2::Ident,
errors: &Errors,
type_attrs: &TypeAttrs,
fields: &'a [StructField<'a>],
) -> TokenStream {
let mut subcommands_iter =
fields.iter().filter(|field| field.kind == FieldKind::SubCommand).fuse();
let subcommand: Option<&StructField<'_>> = subcommands_iter.next();
for dup_subcommand in subcommands_iter {
errors.duplicate_attrs("subcommand", subcommand.unwrap().field, dup_subcommand.field);
}
let impl_span = Span::call_site();
let mut positionals = vec![];
let mut flags = vec![];
// Add the implicit --help flag
flags.push(quote! {
argh::FlagInfo {
short: None,
long: "--help",
description: "display usage information",
optionality: argh::Optionality::Optional,
kind: argh::FlagInfoKind::Switch,
hidden: false
}
});
for field in fields {
let optionality = match field.optionality {
Optionality::None => quote! { argh::Optionality::Required },
Optionality::Defaulted(_) => quote! { argh::Optionality::Optional },
Optionality::Optional => quote! { argh::Optionality::Optional },
Optionality::Repeating if field.attrs.greedy.is_some() => {
quote! { argh::Optionality::Greedy }
}
Optionality::Repeating => quote! { argh::Optionality::Repeating },
};
match field.kind {
FieldKind::Positional => {
let name = field.positional_arg_name();
let description = if let Some(desc) = &field.attrs.description {
desc.content.value().trim().to_owned()
} else {
String::new()
};
let hidden = field.attrs.hidden_help;
positionals.push(quote! {
argh::PositionalInfo {
name: #name,
description: #description,
optionality: #optionality,
hidden: #hidden,
}
});
}
FieldKind::Switch | FieldKind::Option => {
let short = if let Some(short) = &field.attrs.short {
quote! { Some(#short) }
} else {
quote! { None }
};
let long = field.long_name.as_ref().expect("missing long name for option");
let description = require_description(
errors,
field.name.span(),
&field.attrs.description,
"field",
);
let kind = if field.kind == FieldKind::Switch {
quote! {
argh::FlagInfoKind::Switch
}
} else {
let arg_name = if let Some(arg_name) = &field.attrs.arg_name {
quote! { #arg_name }
} else {
let arg_name = long.trim_start_matches("--");
quote! { #arg_name }
};
quote! {
argh::FlagInfoKind::Option {
arg_name: #arg_name,
}
}
};
let hidden = field.attrs.hidden_help;
flags.push(quote! {
argh::FlagInfo {
short: #short,
| {
impl_arg_info_enum(errors, &input.ident, type_attrs, &input.generics, de)
} | conditional_block |
args_info.rs | input);
// Based on the type generate the appropriate code.
let mut output_tokens = match &input.data {
syn::Data::Struct(ds) => {
impl_arg_info_struct(errors, &input.ident, type_attrs, &input.generics, ds)
}
syn::Data::Enum(de) => {
impl_arg_info_enum(errors, &input.ident, type_attrs, &input.generics, de)
}
syn::Data::Union(_) => {
errors.err(input, "`#[derive(ArgsInfo)]` cannot be applied to unions");
TokenStream::new()
}
};
errors.to_tokens(&mut output_tokens);
output_tokens
}
/// Implement the ArgsInfo trait for a struct annotated with argh attributes.
fn impl_arg_info_struct(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
ds: &syn::DataStruct,
) -> TokenStream {
// Collect the fields, skipping fields that are not supported.
let fields = match &ds.fields {
syn::Fields::Named(fields) => fields,
syn::Fields::Unnamed(_) => {
errors.err(
&ds.struct_token,
"`#![derive(ArgsInfo)]` is not currently supported on tuple structs",
);
return TokenStream::new();
}
syn::Fields::Unit => {
errors.err(&ds.struct_token, "#![derive(ArgsInfo)]` cannot be applied to unit structs");
return TokenStream::new();
}
};
// Map the fields into StructField objects.
let fields: Vec<_> = fields
.named
.iter()
.filter_map(|field| {
let attrs = FieldAttrs::parse(errors, field);
StructField::new(errors, field, attrs)
})
.collect();
let impl_span = Span::call_site();
// Generate the implementation of `get_args_info()` for this struct.
let args_info = impl_args_info_data(name, errors, type_attrs, &fields);
// Split out the generics info for the impl declaration.
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote_spanned! { impl_span =>
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
#args_info
}
}
}
}
/// Implement ArgsInfo for an enum. The enum is a collection of subcommands.
fn impl_arg_info_enum(
errors: &Errors,
name: &syn::Ident,
type_attrs: &TypeAttrs,
generic_args: &syn::Generics,
de: &syn::DataEnum,
) -> TokenStream {
// Validate the enum is OK for argh.
check_enum_type_attrs(errors, type_attrs, &de.enum_token.span);
// Ensure that `#[argh(subcommand)]` is present.
if type_attrs.is_subcommand.is_none() {
errors.err_span(
de.enum_token.span,
concat!(
"`#![derive(ArgsInfo)]` on `enum`s can only be used to enumerate subcommands.\n",
"Consider adding `#[argh(subcommand)]` to the `enum` declaration.",
),
);
}
// One of the variants can be annotated as providing dynamic subcommands.
// We treat this differently since we need to call a function at runtime
// to determine the subcommands provided.
let mut dynamic_type_and_variant = None;
// An enum variant like `<name>(<ty>)`. This is used to collect
// the type of the variant for each subcommand.
struct | <'a> {
ty: &'a syn::Type,
}
let variants: Vec<ArgInfoVariant<'_>> = de
.variants
.iter()
.filter_map(|variant| {
let name = &variant.ident;
let ty = enum_only_single_field_unnamed_variants(errors, &variant.fields)?;
if VariantAttrs::parse(errors, variant).is_dynamic.is_some() {
if dynamic_type_and_variant.is_some() {
errors.err(variant, "Only one variant can have the `dynamic` attribute");
}
dynamic_type_and_variant = Some((ty, name));
None
} else {
Some(ArgInfoVariant { ty })
}
})
.collect();
let dynamic_subcommands = if let Some((dynamic_type, _)) = dynamic_type_and_variant {
quote! {
<#dynamic_type as argh::DynamicSubCommand>::commands().iter()
.map(|s|
SubCommandInfo {
name: s.name,
command: CommandInfoWithArgs {
name: s.name,
description: s.description,
..Default::default()
}
}).collect()
}
} else {
quote! { vec![]}
};
let variant_ty_info = variants.iter().map(|t| {
let ty = t.ty;
quote!(
argh::SubCommandInfo {
name: #ty::get_args_info().name,
command: #ty::get_args_info()
}
)
});
let cmd_name = if let Some(id) = &type_attrs.name {
id.clone()
} else {
LitStr::new("", Span::call_site())
};
let (impl_generics, ty_generics, where_clause) = generic_args.split_for_impl();
quote! {
#[automatically_derived]
impl #impl_generics argh::ArgsInfo for #name #ty_generics #where_clause {
fn get_args_info() -> argh::CommandInfoWithArgs {
let mut the_subcommands = vec![#(#variant_ty_info),*];
let mut dynamic_commands = #dynamic_subcommands;
the_subcommands.append(&mut dynamic_commands);
argh::CommandInfoWithArgs {
name: #cmd_name,
/// A short description of the command's functionality.
description: " enum of subcommands",
commands: the_subcommands,
..Default::default()
}
} // end of get_args_ifo
} // end of impl ArgsInfo
}
}
fn impl_args_info_data<'a>(
name: &proc_macro2::Ident,
errors: &Errors,
type_attrs: &TypeAttrs,
fields: &'a [StructField<'a>],
) -> TokenStream {
let mut subcommands_iter =
fields.iter().filter(|field| field.kind == FieldKind::SubCommand).fuse();
let subcommand: Option<&StructField<'_>> = subcommands_iter.next();
for dup_subcommand in subcommands_iter {
errors.duplicate_attrs("subcommand", subcommand.unwrap().field, dup_subcommand.field);
}
let impl_span = Span::call_site();
let mut positionals = vec![];
let mut flags = vec![];
// Add the implicit --help flag
flags.push(quote! {
argh::FlagInfo {
short: None,
long: "--help",
description: "display usage information",
optionality: argh::Optionality::Optional,
kind: argh::FlagInfoKind::Switch,
hidden: false
}
});
for field in fields {
let optionality = match field.optionality {
Optionality::None => quote! { argh::Optionality::Required },
Optionality::Defaulted(_) => quote! { argh::Optionality::Optional },
Optionality::Optional => quote! { argh::Optionality::Optional },
Optionality::Repeating if field.attrs.greedy.is_some() => {
quote! { argh::Optionality::Greedy }
}
Optionality::Repeating => quote! { argh::Optionality::Repeating },
};
match field.kind {
FieldKind::Positional => {
let name = field.positional_arg_name();
let description = if let Some(desc) = &field.attrs.description {
desc.content.value().trim().to_owned()
} else {
String::new()
};
let hidden = field.attrs.hidden_help;
positionals.push(quote! {
argh::PositionalInfo {
name: #name,
description: #description,
optionality: #optionality,
hidden: #hidden,
}
});
}
FieldKind::Switch | FieldKind::Option => {
let short = if let Some(short) = &field.attrs.short {
quote! { Some(#short) }
} else {
quote! { None }
};
let long = field.long_name.as_ref().expect("missing long name for option");
let description = require_description(
errors,
field.name.span(),
&field.attrs.description,
"field",
);
let kind = if field.kind == FieldKind::Switch {
quote! {
argh::FlagInfoKind::Switch
}
} else {
let arg_name = if let Some(arg_name) = &field.attrs.arg_name {
quote! { #arg_name }
} else {
let arg_name = long.trim_start_matches("--");
quote! { #arg_name }
};
quote! {
argh::FlagInfoKind::Option {
arg_name: #arg_name,
}
}
};
let hidden = field.attrs.hidden_help;
flags.push(quote! {
argh::FlagInfo {
short: #short,
| ArgInfoVariant | identifier_name |
Ch12_Astrocrash_Game.py | ship in the direction the ship is facing. Since there’s no friction, the ship keeps moving based on all of the thrust the player applies to it.
When the player engages the ship’s engine, the code changes the velocity of the ship based on the ship’s angle (and produces an appropriate sound effect, too)"""
def update(self):
#print(self.missile_wait)
super(Ship, self).update()
""" Rotate based on keys pressed. """
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= Ship.ROTATION_STEP # Subtract 3 degrees
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += Ship.ROTATION_STEP # Add 3 degrees
if games.keyboard.is_pressed(games.K_UP):
Ship.sound.play()
#when the player presses the up arrow key, we need to alter the ship’s velocity components(the Ship object’s dx and dy).
# First convert degrees tro radians
angle=self.angle*math.pi/180
# Use sin=Perp/Hyp and cos=Base/Hyp
self.dx+=Ship.VELOCITY_STEP * math.sin(angle) # x is horizontal so use sin() to find dx(base); Ship's last position is retained to find out next position
self.dy+=Ship.VELOCITY_STEP * -math. cos(angle) # y is perp so use cos() to find dy (perp)
# cap velocity in each direction- I cap the ship’s speed to avoid several potential problems, including the ship running into its own missiles.
self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
if self.missile_wait>0:
self.missile_wait-=1
"""Firing Missiles- allows the player to fire missiles by pressing the spacebar, the code below limits the missile fire rate by creating a countdown that forces a delay between missile firings. Once the countdown ends, the player is able to fire another missile."""
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait==0:
new_missile=Missile(self.x, self.y, self.angle) # New Missile will have a x and y and an angle to be given to its constructor to calculate velocity etc.
games.screen.add(new_missile)
self.missile_wait=Ship.MISSILE_DELAY
def die(self):
""" Destroy ship and end the game. """
self.game.end()
super(Ship, self).die()
class Missile(Collider):
# Load the image and sound file
image=games.load_image("missile.bmp")
sound=games.load_sound("missile.wav")
BUFFER=40 # Missile should have a constant buffer(distance from the ship that a new missile is created (so that the missile isn’t created on top of the ship)
VEOCITY_FACTOR=7 # VELOCITY_FACTOR affects how fast the missile travels
LIFETIME=40 # LIFETIME represents how long the missile exists before it disappears (so that a missile won’t float around the screen forever)
def __init__(self,ship_x,ship_y,ship_angle): # Everytime a missile gets created pass on the ship's positionn to find out where would be the missile
Missile.sound.play() # Play the sound as soon as a missile gets created
"""Where the missile is created depends upon where the ship is located, and how the missile travels depends upon the angle of the ship"""
angle=ship_angle*math.pi/180 # angle is in radians
buffer_x=Missile.BUFFER * math.sin(angle)
buffer_y=Missile.BUFFER * math.cos(angle)
x=ship_x+buffer_x
y=ship_y+buffer_y
# Calculate velocity of missile
dx=Missile.VEOCITY_FACTOR * math. sin(angle)
dy=Missile.VEOCITY_FACTOR * -math.cos(angle)
super(Missile,self).__init__(image=Missile.image, # class variable needs class name
x=x,y=y, # function variable in the same func do not need class name
dx=dx,dy=dy)
self.lifetime=Missile.LIFETIME
# Method for movement of missile
def update(self):
super(Missile, self).update()
# If the missile's lifetime finishes then destroy the missile
self.lifetime-=1 # decrease the count of lifetime
if self.lifetime==0:
self.destroy()
class Explosion(games.Animation):
""" Explosion animation. """
sound = games.load_sound("explosion.wav")
images = ["explosion1.bmp",
"explosion2.bmp",
"explosion3.bmp",
"explosion4.bmp",
"explosion5.bmp",
"explosion6.bmp",
"explosion7.bmp",
"explosion8.bmp",
"explosion9.bmp"]
def __init__(self, x, y):
super(Explosion, self).__init__(images = Explosion.images,
x = x,
y = y,
repeat_interval = 4,
n_repeats = 1,
is_collideable = False)
# I pass is_collideable the value False so that the explosion animation doesn’t count as a collision for other sprites that might happen to overlap it.
Explosion.sound.play()
class Game(object): #The Game itself could certainly be an object with methods like play() to start the game, advance() to move the game to the next level, and end() to end the game.
""" The game itself. """
def __init__(self):
""" Initialize Game object.
level is an attribute for the current game level number. sound is an attribute for the leveladvance sound effect. score is an attribute for the game score—it’s a Text object that appears in the upper-right corner of the screen. The object’s is_collideable property is False, which means that the score won’t register in any collisions—so the player’s ship won’t “crash into” the score and explode! Finally, ship is an attribute for the player’s ship."""
# set level
self.level = 0
# load sound for level advance
self.sound = games.load_sound("level.wav")
# create score
self.score = games.Text(value = 0,
size = 30,
color = color.white,
top = 5,
right = games.screen.width - 10,
is_collideable = False)
games.screen.add(self.score)
# create player's ship
self.ship = Ship(game = self,
x = games.screen.width/2,
y = games.screen.height/2)
games.screen.add(self.ship)
def play(self):
""" Play the game. """
# begin theme music
games.music.load("theme.mid")
games.music.play(-1) # -1 : Forever
# load and set background
nebula_image = games.load_image("nebula.jpg")
games.screen.background = nebula_image
# advance to level 1
self.advance()
# start play
games.screen.mainloop()
def advance(self):
""" Advance to the next game level. """
self.level += 1
"""Creating the new wave of asteroids. Each level starts with the number of asteroids equal to the level number. So, the first level starts with only one asteroid, the second with two, and so on. Now, creating a bunch of asteroids is easy, but I need to make sure that no new asteroid is created right on top of the ship. Otherwise, the ship will explode just as the new level begins."""
# amount of space around ship to preserve when creating asteroids
BUFFER = 150 #BUFFER is a constant for the amount of safe space needed around the ship. BUFFER=x_min+y_min
# create new asteroids
for i in range(self.level):
# calculate an x and y at least BUFFER distance from the ship
# choose minimum distance along x-axis and y-axis
x_min = random.randrange(BUFFER)# x_min is the minimum distance the new asteroid should be from the ship along the x-axis,
y_min = BUFFER - x_min # y_min is the minimum distance that the new asteroid should be from the ship alongthe y-axis
# choose distance along x-axis and y-axis based on minimum distance
x_distance = random.randrange(x_min, games.screen.width - x_min) # x_distance is the distance from the ship for the new asteroid along the x-axis, It is a randomly
#selected number that ensures that the new asteroid will be at least x_min distance from the ship
y_distance = random.randrange(y_min, games.screen.height - y_min) # y_distance is the distance from the ship for the new asteroid along the y-axis. It is a randomly #selected number that ensures that the new asteroid will be at least y_min distance from the ship
# calculate location based on distance
x = self.ship.x + x_distance #x is the x-coordinate for the new asteroid
y = self.ship.y + y_distance #y is the y-coordinate for the new asteroid | # wrap around screen, if necessary | random_line_split |
|
Ch12_Astrocrash_Game.py | ):
super(Collider, self).update()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die() # See its code above
self.die() # See its definition below
# Creating a die() method for the class, since all Collider objects will do the same thing when they die—create an explosion and destroy themselves
def die(self):
""" Destroy self and leave explosion behind. """
new_explosion = Explosion(x = self.x, y = self.y)
games.screen.add(new_explosion)
self.destroy()
class Asteroid(Wrapper):
#class CONSTANTS
SMALL=1
MEDIUM=2
LARGE=3
images={SMALL:games.load_image("asteroid_small.bmp"),
MEDIUM:games.load_image("asteroid_med.bmp"),
LARGE:games.load_image("asteroid_big.bmp")}
SPEED=2
SPAWN=2 #SPAWN is the number of new asteroids that an asteroid spawns when it’s destroyed
POINTS = 30 #The constant will act as a base value for the number of points an asteroid is worth. The actual point value will be modified according to the size of the #asteroid—smaller asteroids will be worth more than larger ones
total=0 # In order to change levels, the program needs to know when all of the asteroids on the current level are destroyed so keep track of the total number of asteroids with #a new class variable 'total'
# Create a dictionary for images
# Constructor of Asteroid should initialize x, y and size of asteroids
def __init__(self,game, x,y,size):
Asteroid.total += 1 # Increase total count of asteroids in a level
"""Based on size, the correct image for the new asteroid is retrieved and then passed along to Sprite’s constructor (since
Sprite is the superclass of Asteroid). The x and y values passed to Asteroid for the location of
the new space rock are also passed on to Sprite’s constructor."""
super(Asteroid,self).__init__(image=Asteroid.images[size],
x=x,
y=y,
dx=random.choice([1,-1])*Asteroid.SPEED*random.random()/size,
#velocity calculated as (1 or 0 or -1*SPEED*random no/size) as velocity can be either 0,+ or -ive
dy=random.choice([1,-1])*Asteroid.SPEED*random.random()/size)
self.game=game # An asteroid should be able to send the Game object a message, so I give each Asteroid object a reference to the Game object. I accept the Game object in #the Asteroid constructor by creating a new parameter
self.size=size # This is not in games.Sprite
def die(self):
Asteroid.total -= 1
self.game.score.value += int(Asteroid.POINTS / self.size)
self.game.score.right = games.screen.width - 10
if self.size!=Asteroid.SMALL:
for i in range(Asteroid.SPAWN):
new_asteroid=Asteroid(game=self.game,x=self.x,
y=self.y,
size=self.size-1) # Large will be redeveloped in 2 medium sized asteroids and Medium will be in two small asteroids
games.screen.add(new_asteroid)
""" Toward the end of Asteroid’s die() method, I test Asteroid.total to see if all the asteroids have been destroyed. If so, the final asteroid invokes the Game object’s advance() method, which advances the game to the next level and creates a new group of asteroids."""
if Asteroid.total == 0:
self.game.advance()
super(Asteroid,self).die() # if size is small, medium or large, destroy each but for large and medium astroids two new medium and small asteroids are created
class Ship(Collider):
# Load the ship's Image and thrust sound file
image = games.load_image("ship.bmp")
sound = games.load_sound("thrust.wav")
ROTATION_STEP = 3
VELOCITY_STEP=.03 # Higher Number makes ship accelerate faster and lower number makes it accelerate slower
MISSILE_DELAY = 25 # represents the delay a player must wait between missile firings
VELOCITY_MAX = 3
# Constructor of the Ship
def __init__(self,game, x, y):
super(Ship,self).__init__(image=Ship.image,x=x, y=y)
self.game=game
self.missile_wait=0 # First time the ship will can fire missile without waiting but next time onwards it has a wait time=delay
# Move the ship
""" The player can press the up arrow key to engage the ship’s engine. This applies thrust to the ship in the direction the ship is facing. Since there’s no friction, the ship keeps moving based on all of the thrust the player applies to it.
When the player engages the ship’s engine, the code changes the velocity of the ship based on the ship’s angle (and produces an appropriate sound effect, too)"""
def update(self):
#print(self.missile_wait)
super(Ship, self).update()
""" Rotate based on keys pressed. """
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= Ship.ROTATION_STEP # Subtract 3 degrees
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += Ship.ROTATION_STEP # Add 3 degrees
if games.keyboard.is_pressed(games.K_UP):
Ship.sound.play()
#when the player presses the up arrow key, we need to alter the ship’s velocity components(the Ship object’s dx and dy).
# First convert degrees tro radians
angle=self.angle*math.pi/180
# Use sin=Perp/Hyp and cos=Base/Hyp
self.dx+=Ship.VELOCITY_STEP * math.sin(angle) # x is horizontal so use sin() to find dx(base); Ship's last position is retained to find out next position
self.dy+=Ship.VELOCITY_STEP * -math. cos(angle) # y is perp so use cos() to find dy (perp)
# cap velocity in each direction- I cap the ship’s speed to avoid several potential problems, including the ship running into its own missiles.
self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
if self.missile_wait>0:
self.missile_wait-=1
"""Firing Missiles- allows the player to fire missiles by pressing the spacebar, the code below limits the missile fire rate by creating a countdown that forces a delay between missile firings. Once the countdown ends, the player is able to fire another missile."""
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait==0:
new_missile=Missile(self.x, self.y, self.angle) # New Missile will have a x and y and an angle to be given to its constructor to calculate velocity etc.
games.screen.add(new_missile)
self.missile_wait=Ship.MISSILE_DELAY
def die(self):
""" Destroy ship and end the game. """
self.game.end()
super(Ship, self).die()
class Missile(Collider):
# Load the image and sound file
image=games.load_image("missile.bmp")
sound=games.load_sound("missile.wav")
BUFFER=40 # Missile should have a constant buffer(distance from the ship that a new missile is created (so that the missile isn’t created on top of the ship)
VEOCITY_FACTOR=7 # VELOCITY_FACTOR affects how fast the missile travels
LIFETIME=40 # LIFETIME represents how long the missile exists before it disappears (so that a missile won’t float around the screen forever)
def __init__(self,ship_x,ship_y,ship_angle): # Everytime a missile gets created pass on the ship's positionn to find out where would be the missile
Missile.sound.play() # Play the sound | def update(self):
super(Missile, self).update()
# If the missile's lifetime finishes then destroy the missile
self.lifetime-=1 # decrease the count of lifetime
if self.lifetime==0:
self.destroy()
class Explosion(games.Animation):
| as soon as a missile gets created
"""Where the missile is created depends upon where the ship is located, and how the missile travels depends upon the angle of the ship"""
angle=ship_angle*math.pi/180 # angle is in radians
buffer_x=Missile.BUFFER * math.sin(angle)
buffer_y=Missile.BUFFER * math.cos(angle)
x=ship_x+buffer_x
y=ship_y+buffer_y
# Calculate velocity of missile
dx=Missile.VEOCITY_FACTOR * math. sin(angle)
dy=Missile.VEOCITY_FACTOR * -math.cos(angle)
super(Missile,self).__init__(image=Missile.image, # class variable needs class name
x=x,y=y, # function variable in the same func do not need class name
dx=dx,dy=dy)
self.lifetime=Missile.LIFETIME
# Method for movement of missile
| identifier_body |
Ch12_Astrocrash_Game.py | ’s velocity components(the Ship object’s dx and dy).
# First convert degrees tro radians
angle=self.angle*math.pi/180
# Use sin=Perp/Hyp and cos=Base/Hyp
self.dx+=Ship.VELOCITY_STEP * math.sin(angle) # x is horizontal so use sin() to find dx(base); Ship's last position is retained to find out next position
self.dy+=Ship.VELOCITY_STEP * -math. cos(angle) # y is perp so use cos() to find dy (perp)
# cap velocity in each direction- I cap the ship’s speed to avoid several potential problems, including the ship running into its own missiles.
self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
if self.missile_wait>0:
self.missile_wait-=1
"""Firing Missiles- allows the player to fire missiles by pressing the spacebar, the code below limits the missile fire rate by creating a countdown that forces a delay between missile firings. Once the countdown ends, the player is able to fire another missile."""
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait==0:
new_missile=Missile(self.x, self.y, self.angle) # New Missile will have a x and y and an angle to be given to its constructor to calculate velocity etc.
games.screen.add(new_missile)
self.missile_wait=Ship.MISSILE_DELAY
def die(self):
""" Destroy ship and end the game. """
self.game.end()
super(Ship, self).die()
class Missile(Collider):
# Load the image and sound file
image=games.load_image("missile.bmp")
sound=games.load_sound("missile.wav")
BUFFER=40 # Missile should have a constant buffer(distance from the ship that a new missile is created (so that the missile isn’t created on top of the ship)
VEOCITY_FACTOR=7 # VELOCITY_FACTOR affects how fast the missile travels
LIFETIME=40 # LIFETIME represents how long the missile exists before it disappears (so that a missile won’t float around the screen forever)
def __init__(self,ship_x,ship_y,ship_angle): # Everytime a missile gets created pass on the ship's positionn to find out where would be the missile
Missile.sound.play() # Play the sound as soon as a missile gets created
"""Where the missile is created depends upon where the ship is located, and how the missile travels depends upon the angle of the ship"""
angle=ship_angle*math.pi/180 # angle is in radians
buffer_x=Missile.BUFFER * math.sin(angle)
buffer_y=Missile.BUFFER * math.cos(angle)
x=ship_x+buffer_x
y=ship_y+buffer_y
# Calculate velocity of missile
dx=Missile.VEOCITY_FACTOR * math. sin(angle)
dy=Missile.VEOCITY_FACTOR * -math.cos(angle)
super(Missile,self).__init__(image=Missile.image, # class variable needs class name
x=x,y=y, # function variable in the same func do not need class name
dx=dx,dy=dy)
self.lifetime=Missile.LIFETIME
# Method for movement of missile
def update(self):
super(Missile, self).update()
# If the missile's lifetime finishes then destroy the missile
self.lifetime-=1 # decrease the count of lifetime
if self.lifetime==0:
self.destroy()
class Explosion(games.Animation):
""" Explosion animation. """
sound = games.load_sound("explosion.wav")
images = ["explosion1.bmp",
"explosion2.bmp",
"explosion3.bmp",
"explosion4.bmp",
"explosion5.bmp",
"explosion6.bmp",
"explosion7.bmp",
"explosion8.bmp",
"explosion9.bmp"]
def __init__(self, x, y):
super(Explosion, self).__init__(images = Explosion.images,
x = x,
y = y,
repeat_interval = 4,
n_repeats = 1,
is_collideable = False)
# I pass is_collideable the value False so that the explosion animation doesn’t count as a collision for other sprites that might happen to overlap it.
Explosion.sound.play()
class Game(object): #The Game itself could certainly be an object with methods like play() to start the game, advance() to move the game to the next level, and end() to end the game.
""" The game itself. """
def __init__(self):
""" Initialize Game object.
level is an attribute for the current game level number. sound is an attribute for the leveladvance sound effect. score is an attribute for the game score—it’s a Text object that appears in the upper-right corner of the screen. The object’s is_collideable property is False, which means that the score won’t register in any collisions—so the player’s ship won’t “crash into” the score and explode! Finally, ship is an attribute for the player’s ship."""
# set level
self.level = 0
# load sound for level advance
self.sound = games.load_sound("level.wav")
# create score
self.score = games.Text(value = 0,
size = 30,
color = color.white,
top = 5,
right = games.screen.width - 10,
is_collideable = False)
games.screen.add(self.score)
# create player's ship
self.ship = Ship(game = self,
x = games.screen.width/2,
y = games.screen.height/2)
games.screen.add(self.ship)
def play(self):
""" Play the game. """
# begin theme music
games.music.load("theme.mid")
games.music.play(-1) # -1 : Forever
# load and set background
nebula_image = games.load_image("nebula.jpg")
games.screen.background = nebula_image
# advance to level 1
self.advance()
# start play
games.screen.mainloop()
def advance(self):
""" Advance to the next game level. """
self.level += 1
"""Creating the new wave of asteroids. Each level starts with the number of asteroids equal to the level number. So, the first level starts with only one asteroid, the second with two, and so on. Now, creating a bunch of asteroids is easy, but I need to make sure that no new asteroid is created right on top of the ship. Otherwise, the ship will explode just as the new level begins."""
# amount of space around ship to preserve when creating asteroids
BUFFER = 150 #BUFFER is a constant for the amount of safe space needed around the ship. BUFFER=x_min+y_min
# create new asteroids
for i in range(self.level):
# calculate an x and y at least BUFFER distance from the ship
# choose minimum distance along x-axis and y-axis
x_min = random.randrange(BUFFER)# x_min is the minimum distance the new asteroid should be from the ship along the x-axis,
y_min = BUFFER - x_min # y_min is the minimum distance that the new asteroid should be from the ship alongthe y-axis
# choose distance along x-axis and y-axis based on minimum distance
x_distance = random.randrange(x_min, games.screen.width - x_min) # x_distance is the distance from the ship for the new asteroid along the x-axis, It is a randomly
#selected number that ensures that the new asteroid will be at least x_min distance from the ship
y_distance = random.randrange(y_min, games.screen.height - y_min) # y_distance is the distance from the ship for the new asteroid along the y-axis. It is a randomly #selected number that ensures that the new asteroid will be at least y_min distance from the ship
# calculate location based on distance
x = self.ship.x + x_distance #x is the x-coordinate for the new asteroid
y = self.ship.y + y_distance #y is the y-coordinate for the new asteroid
# wrap around screen, if necessary
x %= games.screen.width
y %= games.screen.height
# create the asteroid
new_asteroid = Asteroid(game = self,x = x, y = y,size = Asteroid.LARGE)
games.screen.add(new_asteroid)
# display level number
level_message = games.Message(value = "Level " + str(self.level),
size = 40,
color = color.yellow,
x = games.screen.width/2,
y = games.screen.width/10,
lifetime = 3 * games.screen.fps,
is_collideable = False)
games.screen.add(level_message)
# play new level sound (except at first level)
if self.level > 1:
self.sound.play()
def end(self):
""" End the game. "" | "
# show 'Game | conditional_block |
|
Ch12_Astrocrash_Game.py | super(Collider, self).update()
if self.overlapping_sprites:
for sprite in self.overlapping_sprites:
sprite.die() # See its code above
self.die() # See its definition below
# Creating a die() method for the class, since all Collider objects will do the same thing when they die—create an explosion and destroy themselves
def die(self):
""" Destroy self and leave explosion behind. """
new_explosion = Explosion(x = self.x, y = self.y)
games.screen.add(new_explosion)
self.destroy()
class Asteroid(Wrapper):
#class CONSTANTS
SMALL=1
MEDIUM=2
LARGE=3
images={SMALL:games.load_image("asteroid_small.bmp"),
MEDIUM:games.load_image("asteroid_med.bmp"),
LARGE:games.load_image("asteroid_big.bmp")}
SPEED=2
SPAWN=2 #SPAWN is the number of new asteroids that an asteroid spawns when it’s destroyed
POINTS = 30 #The constant will act as a base value for the number of points an asteroid is worth. The actual point value will be modified according to the size of the #asteroid—smaller asteroids will be worth more than larger ones
total=0 # In order to change levels, the program needs to know when all of the asteroids on the current level are destroyed so keep track of the total number of asteroids with #a new class variable 'total'
# Create a dictionary for images
# Constructor of Asteroid should initialize x, y and size of asteroids
def __init__(self,game, x,y,size):
Asteroid.total += 1 # Increase total count of asteroids in a level
"""Based on size, the correct image for the new asteroid is retrieved and then passed along to Sprite’s constructor (since
Sprite is the superclass of Asteroid). The x and y values passed to Asteroid for the location of
the new space rock are also passed on to Sprite’s constructor."""
super(Asteroid,self).__init__(image=Asteroid.images[size],
x=x,
y=y,
dx=random.choice([1,-1])*Asteroid.SPEED*random.random()/size,
#velocity calculated as (1 or 0 or -1*SPEED*random no/size) as velocity can be either 0,+ or -ive
dy=random.choice([1,-1])*Asteroid.SPEED*random.random()/size)
self.game=game # An asteroid should be able to send the Game object a message, so I give each Asteroid object a reference to the Game object. I accept the Game object in #the Asteroid constructor by creating a new parameter
self.size=size # This is not in games.Sprite
def die(self):
Asteroid.total -= 1
self.game.score.value += int(Asteroid.POINTS / self.size)
self.game.score.right = games.screen.width - 10
if self.size!=Asteroid.SMALL:
for i in range(Asteroid.SPAWN):
new_asteroid=Asteroid(game=self.game,x=self.x,
y=self.y,
size=self.size-1) # Large will be redeveloped in 2 medium sized asteroids and Medium will be in two small asteroids
games.screen.add(new_asteroid)
""" Toward the end of Asteroid’s die() method, I test Asteroid.total to see if all the asteroids have been destroyed. If so, the final asteroid invokes the Game object’s advance() method, which advances the game to the next level and creates a new group of asteroids."""
if Asteroid.total == 0:
self.game.advance()
super(Asteroid,self).die() # if size is small, medium or large, destroy each but for large and medium astroids two new medium and small asteroids are created
class Ship(Collider):
# Load the ship's Image and thrust sound file
image = games.load_image("ship.bmp")
sound = games.load_sound("thrust.wav")
ROTATION_STEP = 3
VELOCITY_STEP=.03 # Higher Number makes ship accelerate faster and lower number makes it accelerate slower
MISSILE_DELAY = 25 # represents the delay a player must wait between missile firings
VELOCITY_MAX = 3
# Constructor of the Ship
def __init__(self,game, x, y):
super(Ship,self).__init__(image=Ship.image,x=x, y=y)
self.game=game
self.missile_wait=0 # First time the ship will can fire missile without waiting but next time onwards it has a wait time=delay
# Move the ship
""" The player can press the up arrow key to engage the ship’s engine. This applies thrust to the ship in the direction the ship is facing. Since there’s no friction, the ship keeps moving based on all of the thrust the player applies to it.
When the player engages the ship’s engine, the code changes the velocity of the ship based on the ship’s angle (and produces an appropriate sound effect, too)"""
def update(self):
#print(self.missile_wait)
super(Ship, self).update()
""" Rotate based on keys pressed. """
if games.keyboard.is_pressed(games.K_LEFT):
self.angle -= Ship.ROTATION_STEP # Subtract 3 degrees
if games.keyboard.is_pressed(games.K_RIGHT):
self.angle += Ship.ROTATION_STEP # Add 3 degrees
if games.keyboard.is_pressed(games.K_UP):
Ship.sound.play()
#when the player presses the up arrow key, we need to alter the ship’s velocity components(the Ship object’s dx and dy).
# First convert degrees tro radians
angle=self.angle*math.pi/180
# Use sin=Perp/Hyp and cos=Base/Hyp
self.dx+=Ship.VELOCITY_STEP * math.sin(angle) # x is horizontal so use sin() to find dx(base); Ship's last position is retained to find out next position
self.dy+=Ship.VELOCITY_STEP * -math. cos(angle) # y is perp so use cos() to find dy (perp)
# cap velocity in each direction- I cap the ship’s speed to avoid several potential problems, including the ship running into its own missiles.
self.dx = min(max(self.dx, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
self.dy = min(max(self.dy, -Ship.VELOCITY_MAX), Ship.VELOCITY_MAX)
if self.missile_wait>0:
self.missile_wait-=1
"""Firing Missiles- allows the player to fire missiles by pressing the spacebar, the code below limits the missile fire rate by creating a countdown that forces a delay between missile firings. Once the countdown ends, the player is able to fire another missile."""
if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait==0:
new_missile=Missile(self.x, self.y, self.angle) # New Missile will have a x and y and an angle to be given to its constructor to calculate velocity etc.
games.screen.add(new_missile)
self.missile_wait=Ship.MISSILE_DELAY
def die(self):
""" Destroy ship and end the game. """
self.game.end()
super(Ship, self).die()
class Missile(Collider):
# Load the image and sound file
image=games.load_image("missile.bmp")
sound=games.load_sound("missile.wav")
BUFFER=40 # Missile should have a constant buffer(distance from the ship that a new missile is created (so that the missile isn’t created on top of the ship)
VEOCITY_FACTOR=7 # VELOCITY_FACTOR affects how fast the missile travels
LIFETIME=40 # LIFETIME represents how long the missile exists before it disappears (so that a missile won’t float around the screen forever)
def __init__(self,ship_x,ship_y,ship_angle): # Everytime a missile gets created pass on the ship's positionn to find out where would be the missile
Missile.sound.play() # Play the sound as soon as a missile gets created
"""Where the missile is created depends upon where the ship is located, and how the missile travels depends upon the angle of the ship"""
angle=ship_angle*math.pi/180 # angle is in radians
buffer_x=Missile.BUFFER * math.sin(angle)
buffer_y=Missile.BUFFER * math.cos(angle)
x=ship_x+buffer_x
y=ship_y+buffer_y
# Calculate velocity of missile
dx=Missile.VEOCITY_FACTOR * math. sin(angle)
dy=Missile.VEOCITY_FACTOR * -math.cos(angle)
super(Missile,self).__init__(image=Missile.image, # class variable needs class name
x=x,y=y, # function variable in the same func do not need class name
dx=dx,dy=dy)
self.lifetime=Missile.LIFETIME
# Method for movement of missile
def update(self):
super(Missile, self).u | )
# If the missile's lifetime finishes then destroy the missile
self.lifetime-=1 # decrease the count of lifetime
if self.lifetime==0:
self.destroy()
class Explosion(games.Animation | pdate( | identifier_name |
command.rs | let mut cat = Command::new("cat")
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().expect("fail to execute cat command");
let mut tr = Command::new("tr")
.arg("[:blank:]")
.arg(" ")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().expect("fail to execute tr command");
// see https://www.reddit.com/r/rust/comments/3azfie/how_to_pipe_one_process_into_another/
if let Some(ref mut stdout) = cat.stdout {
if let Some(ref mut stdin) = tr.stdin {
let mut buf: Vec<u8> = Vec::new();
stdout.read_to_end(&mut buf).unwrap();
stdin.write_all(&buf).unwrap();
}
}
let res = tr.wait_with_output().unwrap().stdout;
String::from_utf8(res).expect("contain invalid utf-8 character")
}
/// preparation to ch02_12
pub fn extract_row(&self, n: usize) -> String {
let res = Command::new("cut")
.args(&["-f", &format!("{}", n + 1)]) // start at 0
.arg(&self.path)
.output().expect("fail to execute cut command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.13 merge 2 files
pub fn merge<P: AsRef<Path>>(file1: &P, file2: &P)->String {
let res = Command::new("paste")
.args(&[file1.as_ref(), file2.as_ref()])
.output().expect("fail to execute paste command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// helper for ch02. 14&15
fn take(&self, n: usize, pos: &str)->String {
let res = Command::new(pos)
.args(&["-n", format!("{}", n).as_str()])
.arg(&self.path)
.output().expect("fail to execute head command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.14 `head -n ${file}`
pub fn head(&self, n: usize)->String {
self.take(n, "head")
}
/// ch02.15 `tail -n ${file}
pub fn tail(&self, n: usize)->String {
self.take(n, "tail")
}
/// ch02.16 split n files.
pub fn split<P: AsRef<Path>>(&self, n: usize, dst: &P) {
let size = self.count_lines().unwrap();
use ch02::util;
let lines = util::get_split_line_count(size, n);
debug!("split per {} lines", lines);
assert!(lines >0);
let res = Command::new("split")
.args(&["-l", &format!("{}", lines)])
.arg(&self.path) // src
.arg(dst.as_ref().to_str().unwrap()) // dst
.output()
.expect("fail to execute split command");
}
/// ch02.17 take unique items of first row.
pub fn uniq_first_row(&self)->String {
let cutf1 = Command::new("cut")
.args(&["-f", "1"])
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().unwrap();
let sort = Command::new("sort")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
// note that sort and uniq cannot be swapped.
// uniq command makes duplicated items in sequence single!
let mut uniq = Command::new("uniq")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
let mut buf: Vec<u8> = Vec::new();
cutf1.stdout.unwrap().read_to_end(&mut buf).unwrap();
sort.stdin.unwrap().write_all(&buf).unwrap();
let mut buf: Vec<u8> = Vec::new();
sort.stdout.unwrap().read_to_end(&mut buf).unwrap();
if let Some(ref mut stdin) = uniq.stdin {
stdin.write_all(&buf).unwrap();
}
// wait_with_output(self) -> Result<Output>
let res = uniq.wait_with_output().unwrap().stdout;
String::from_utf8_lossy(&res).trim().to_string()
}
/// ch02.18 sort by third columns descendingly
/// that means `sort -r -k 3 ./data/ch02/hightemp.txt`
pub fn sort_in_descending(&self, key: usize)->String {
let res = Command::new("sort")
.arg("-r")
.args(&["-k", &format!("{}", key)])
.arg(&self.path)
.output().unwrap();
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate env_logger;
extern crate getopts;
extern crate glob;
use self::getopts::Options;
/// env_logger output is controlled by RUST_LOG environmental variable
/// to debug only to this module, set `RUST_LOG=natural_lang::ch02::command=debug` in Environment variable.
/// before save file, confirm existance in file or create dir in fs::create_dir method.
/// create_dir method is equivalent to `mkdir -p` in unix command
#[test]
fn test_prepare() {
use std::fs;
env_logger::init().unwrap();
let save_path = Path::new("./data/ch02/hightemp.txt");
// Success or not, ignore result
// see also https://github.com/rust-lang/rust/pull/11754#issuecomment-33202664
let _ = fs::create_dir(save_path.parent().unwrap());
let commander = Commander::new(save_path);
commander.prepare();
assert!(save_path.exists())
}
#[test]
fn test_count_lines() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
assert_eq!(commander.count_lines().unwrap(), 24);
}
#[test]
fn test_replace_tab_to_space() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
let res = commander.replace_tab_to_space();
assert_eq!(
res.lines().take(1).collect::<String>(),
"高知県 江川崎 41 2013-08-12"
)
}
#[test]
fn test_extract_row() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(load_path);
assert_eq!(
commander.extract_row(0).lines().next().unwrap(), // take first line
"高知県"
);
}
#[test]
fn test_merge() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let parent = load_path.parent().unwrap(); | debug!("{:?}", res);
assert_eq!(
(&mut res.lines()).next().unwrap(),
"高知県\t江川崎"
)
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
}
/// with cargo test -- [<OPTIONS>], there seems to be panicked at '"Unrecognized option: \'n\'."'
/// so set args directly instead of using env::args()
#[test]
fn test_head() {
// let args = env::args()::collect::<Vec<String>>();
let args = vec!["program", "-n", "5", "./data/ch02/hightemp.txt"];
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt("n", "num", "set first ${num} rows", "NUMBER");
opts.optflag("h", "help", "print this help menu");
let matches = opts.parse(&args[1..]).unwrap();
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let n = matches
.opt_str("n")
.expect("invalid number")
.parse::<usize>()
.unwrap();
let input = matches.free.first().unwrap();
let commander = Commander::new(input);
let res = commander.head(n);
assert_eq!(
res,
"高知県\t江川崎\t41\t2013-08-12\n埼玉県\t熊谷\t40.9\t2007-08-16\n |
let file1 = parent.join("col1.txt");
let file2 = parent.join("col2.txt");
let res = Commander::merge(&file1, &file2); | random_line_split |
command.rs | let mut cat = Command::new("cat")
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().expect("fail to execute cat command");
let mut tr = Command::new("tr")
.arg("[:blank:]")
.arg(" ")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().expect("fail to execute tr command");
// see https://www.reddit.com/r/rust/comments/3azfie/how_to_pipe_one_process_into_another/
if let Some(ref mut stdout) = cat.stdout |
let res = tr.wait_with_output().unwrap().stdout;
String::from_utf8(res).expect("contain invalid utf-8 character")
}
/// preparation to ch02_12
pub fn extract_row(&self, n: usize) -> String {
let res = Command::new("cut")
.args(&["-f", &format!("{}", n + 1)]) // start at 0
.arg(&self.path)
.output().expect("fail to execute cut command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.13 merge 2 files
pub fn merge<P: AsRef<Path>>(file1: &P, file2: &P)->String {
let res = Command::new("paste")
.args(&[file1.as_ref(), file2.as_ref()])
.output().expect("fail to execute paste command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// helper for ch02. 14&15
fn take(&self, n: usize, pos: &str)->String {
let res = Command::new(pos)
.args(&["-n", format!("{}", n).as_str()])
.arg(&self.path)
.output().expect("fail to execute head command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.14 `head -n ${file}`
pub fn head(&self, n: usize)->String {
self.take(n, "head")
}
/// ch02.15 `tail -n ${file}
pub fn tail(&self, n: usize)->String {
self.take(n, "tail")
}
/// ch02.16 split n files.
pub fn split<P: AsRef<Path>>(&self, n: usize, dst: &P) {
let size = self.count_lines().unwrap();
use ch02::util;
let lines = util::get_split_line_count(size, n);
debug!("split per {} lines", lines);
assert!(lines >0);
let res = Command::new("split")
.args(&["-l", &format!("{}", lines)])
.arg(&self.path) // src
.arg(dst.as_ref().to_str().unwrap()) // dst
.output()
.expect("fail to execute split command");
}
/// ch02.17 take unique items of first row.
pub fn uniq_first_row(&self)->String {
let cutf1 = Command::new("cut")
.args(&["-f", "1"])
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().unwrap();
let sort = Command::new("sort")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
// note that sort and uniq cannot be swapped.
// uniq command makes duplicated items in sequence single!
let mut uniq = Command::new("uniq")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
let mut buf: Vec<u8> = Vec::new();
cutf1.stdout.unwrap().read_to_end(&mut buf).unwrap();
sort.stdin.unwrap().write_all(&buf).unwrap();
let mut buf: Vec<u8> = Vec::new();
sort.stdout.unwrap().read_to_end(&mut buf).unwrap();
if let Some(ref mut stdin) = uniq.stdin {
stdin.write_all(&buf).unwrap();
}
// wait_with_output(self) -> Result<Output>
let res = uniq.wait_with_output().unwrap().stdout;
String::from_utf8_lossy(&res).trim().to_string()
}
/// ch02.18 sort by third columns descendingly
/// that means `sort -r -k 3 ./data/ch02/hightemp.txt`
pub fn sort_in_descending(&self, key: usize)->String {
let res = Command::new("sort")
.arg("-r")
.args(&["-k", &format!("{}", key)])
.arg(&self.path)
.output().unwrap();
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate env_logger;
extern crate getopts;
extern crate glob;
use self::getopts::Options;
/// env_logger output is controlled by RUST_LOG environmental variable
/// to debug only to this module, set `RUST_LOG=natural_lang::ch02::command=debug` in Environment variable.
/// before save file, confirm existance in file or create dir in fs::create_dir method.
/// create_dir method is equivalent to `mkdir -p` in unix command
#[test]
fn test_prepare() {
use std::fs;
env_logger::init().unwrap();
let save_path = Path::new("./data/ch02/hightemp.txt");
// Success or not, ignore result
// see also https://github.com/rust-lang/rust/pull/11754#issuecomment-33202664
let _ = fs::create_dir(save_path.parent().unwrap());
let commander = Commander::new(save_path);
commander.prepare();
assert!(save_path.exists())
}
#[test]
fn test_count_lines() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
assert_eq!(commander.count_lines().unwrap(), 24);
}
#[test]
fn test_replace_tab_to_space() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
let res = commander.replace_tab_to_space();
assert_eq!(
res.lines().take(1).collect::<String>(),
"高知県 江川崎 41 2013-08-12"
)
}
#[test]
fn test_extract_row() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(load_path);
assert_eq!(
commander.extract_row(0).lines().next().unwrap(), // take first line
"高知県"
);
}
#[test]
fn test_merge() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let parent = load_path.parent().unwrap();
let file1 = parent.join("col1.txt");
let file2 = parent.join("col2.txt");
let res = Commander::merge(&file1, &file2);
debug!("{:?}", res);
assert_eq!(
(&mut res.lines()).next().unwrap(),
"高知県\t江川崎"
)
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
}
/// with cargo test -- [<OPTIONS>], there seems to be panicked at '"Unrecognized option: \'n\'."'
/// so set args directly instead of using env::args()
#[test]
fn test_head() {
// let args = env::args()::collect::<Vec<String>>();
let args = vec!["program", "-n", "5", "./data/ch02/hightemp.txt"];
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt("n", "num", "set first ${num} rows", "NUMBER");
opts.optflag("h", "help", "print this help menu");
let matches = opts.parse(&args[1..]).unwrap();
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let n = matches
.opt_str("n")
.expect("invalid number")
.parse::<usize>()
.unwrap();
let input = matches.free.first().unwrap();
let commander = Commander::new(input);
let res = commander.head(n);
assert_eq!(
res,
"高知県\t江川崎\t41\t2013-08-12\n埼玉県\t熊谷\t40.9\t2007-08-16 | {
if let Some(ref mut stdin) = tr.stdin {
let mut buf: Vec<u8> = Vec::new();
stdout.read_to_end(&mut buf).unwrap();
stdin.write_all(&buf).unwrap();
}
} | conditional_block |
command.rs | fie/how_to_pipe_one_process_into_another/
if let Some(ref mut stdout) = cat.stdout {
if let Some(ref mut stdin) = tr.stdin {
let mut buf: Vec<u8> = Vec::new();
stdout.read_to_end(&mut buf).unwrap();
stdin.write_all(&buf).unwrap();
}
}
let res = tr.wait_with_output().unwrap().stdout;
String::from_utf8(res).expect("contain invalid utf-8 character")
}
/// preparation to ch02_12
pub fn extract_row(&self, n: usize) -> String {
let res = Command::new("cut")
.args(&["-f", &format!("{}", n + 1)]) // start at 0
.arg(&self.path)
.output().expect("fail to execute cut command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.13 merge 2 files
pub fn merge<P: AsRef<Path>>(file1: &P, file2: &P)->String {
let res = Command::new("paste")
.args(&[file1.as_ref(), file2.as_ref()])
.output().expect("fail to execute paste command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// helper for ch02. 14&15
fn take(&self, n: usize, pos: &str)->String {
let res = Command::new(pos)
.args(&["-n", format!("{}", n).as_str()])
.arg(&self.path)
.output().expect("fail to execute head command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.14 `head -n ${file}`
pub fn head(&self, n: usize)->String {
self.take(n, "head")
}
/// ch02.15 `tail -n ${file}
pub fn tail(&self, n: usize)->String {
self.take(n, "tail")
}
/// ch02.16 split n files.
pub fn split<P: AsRef<Path>>(&self, n: usize, dst: &P) {
let size = self.count_lines().unwrap();
use ch02::util;
let lines = util::get_split_line_count(size, n);
debug!("split per {} lines", lines);
assert!(lines >0);
let res = Command::new("split")
.args(&["-l", &format!("{}", lines)])
.arg(&self.path) // src
.arg(dst.as_ref().to_str().unwrap()) // dst
.output()
.expect("fail to execute split command");
}
/// ch02.17 take unique items of first row.
pub fn uniq_first_row(&self)->String {
let cutf1 = Command::new("cut")
.args(&["-f", "1"])
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().unwrap();
let sort = Command::new("sort")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
// note that sort and uniq cannot be swapped.
// uniq command makes duplicated items in sequence single!
let mut uniq = Command::new("uniq")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
let mut buf: Vec<u8> = Vec::new();
cutf1.stdout.unwrap().read_to_end(&mut buf).unwrap();
sort.stdin.unwrap().write_all(&buf).unwrap();
let mut buf: Vec<u8> = Vec::new();
sort.stdout.unwrap().read_to_end(&mut buf).unwrap();
if let Some(ref mut stdin) = uniq.stdin {
stdin.write_all(&buf).unwrap();
}
// wait_with_output(self) -> Result<Output>
let res = uniq.wait_with_output().unwrap().stdout;
String::from_utf8_lossy(&res).trim().to_string()
}
/// ch02.18 sort by third columns descendingly
/// that means `sort -r -k 3 ./data/ch02/hightemp.txt`
pub fn sort_in_descending(&self, key: usize)->String {
let res = Command::new("sort")
.arg("-r")
.args(&["-k", &format!("{}", key)])
.arg(&self.path)
.output().unwrap();
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate env_logger;
extern crate getopts;
extern crate glob;
use self::getopts::Options;
/// env_logger output is controlled by RUST_LOG environmental variable
/// to debug only to this module, set `RUST_LOG=natural_lang::ch02::command=debug` in Environment variable.
/// before save file, confirm existance in file or create dir in fs::create_dir method.
/// create_dir method is equivalent to `mkdir -p` in unix command
#[test]
fn test_prepare() {
use std::fs;
env_logger::init().unwrap();
let save_path = Path::new("./data/ch02/hightemp.txt");
// Success or not, ignore result
// see also https://github.com/rust-lang/rust/pull/11754#issuecomment-33202664
let _ = fs::create_dir(save_path.parent().unwrap());
let commander = Commander::new(save_path);
commander.prepare();
assert!(save_path.exists())
}
#[test]
fn test_count_lines() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
assert_eq!(commander.count_lines().unwrap(), 24);
}
#[test]
fn test_replace_tab_to_space() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
let res = commander.replace_tab_to_space();
assert_eq!(
res.lines().take(1).collect::<String>(),
"高知県 江川崎 41 2013-08-12"
)
}
#[test]
fn test_extract_row() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(load_path);
assert_eq!(
commander.extract_row(0).lines().next().unwrap(), // take first line
"高知県"
);
}
#[test]
fn test_merge() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let parent = load_path.parent().unwrap();
let file1 = parent.join("col1.txt");
let file2 = parent.join("col2.txt");
let res = Commander::merge(&file1, &file2);
debug!("{:?}", res);
assert_eq!(
(&mut res.lines()).next().unwrap(),
"高知県\t江川崎"
)
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
}
/// with cargo test -- [<OPTIONS>], there seems to be panicked at '"Unrecognized option: \'n\'."'
/// so set args directly instead of using env::args()
#[test]
fn test_head() {
// let args = env::args()::collect::<Vec<String>>();
let args = vec!["program", "-n", "5", "./data/ch02/hightemp.txt"];
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt("n", "num", "set first ${num} rows", "NUMBER");
opts.optflag("h", "help", "print this help menu");
let matches = opts.parse(&args[1..]).unwrap();
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let n = matches
.opt_str("n")
.expect("invalid number")
.parse::<usize>()
.unwrap();
let input = matches.free.first().unwrap();
let commander = Commander::new(input);
let res = commander.head(n);
assert_eq!(
res,
"高知県\t江川崎\t41\t2013-08-12\n埼玉県\t熊谷\t40.9\t2007-08-16\n\
岐阜県\t多治見\t40.9\t2007-08-16\n山形県\t山形\t40.8\t1933-07-25\n\
山梨県\t甲府\t40.7\t2013-08-10"
);
}
#[test]
fn test_tail() {
// let args = env::args()::collect::<Vec<String>>();
l | et args = | identifier_name |
|
command.rs | let mut cat = Command::new("cat")
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().expect("fail to execute cat command");
let mut tr = Command::new("tr")
.arg("[:blank:]")
.arg(" ")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().expect("fail to execute tr command");
// see https://www.reddit.com/r/rust/comments/3azfie/how_to_pipe_one_process_into_another/
if let Some(ref mut stdout) = cat.stdout {
if let Some(ref mut stdin) = tr.stdin {
let mut buf: Vec<u8> = Vec::new();
stdout.read_to_end(&mut buf).unwrap();
stdin.write_all(&buf).unwrap();
}
}
let res = tr.wait_with_output().unwrap().stdout;
String::from_utf8(res).expect("contain invalid utf-8 character")
}
/// preparation to ch02_12
pub fn extract_row(&self, n: usize) -> String {
let res = Command::new("cut")
.args(&["-f", &format!("{}", n + 1)]) // start at 0
.arg(&self.path)
.output().expect("fail to execute cut command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.13 merge 2 files
pub fn merge<P: AsRef<Path>>(file1: &P, file2: &P)->String |
/// helper for ch02. 14&15
fn take(&self, n: usize, pos: &str)->String {
let res = Command::new(pos)
.args(&["-n", format!("{}", n).as_str()])
.arg(&self.path)
.output().expect("fail to execute head command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
/// ch02.14 `head -n ${file}`
pub fn head(&self, n: usize)->String {
self.take(n, "head")
}
/// ch02.15 `tail -n ${file}
pub fn tail(&self, n: usize)->String {
self.take(n, "tail")
}
/// ch02.16 split n files.
pub fn split<P: AsRef<Path>>(&self, n: usize, dst: &P) {
let size = self.count_lines().unwrap();
use ch02::util;
let lines = util::get_split_line_count(size, n);
debug!("split per {} lines", lines);
assert!(lines >0);
let res = Command::new("split")
.args(&["-l", &format!("{}", lines)])
.arg(&self.path) // src
.arg(dst.as_ref().to_str().unwrap()) // dst
.output()
.expect("fail to execute split command");
}
/// ch02.17 take unique items of first row.
pub fn uniq_first_row(&self)->String {
let cutf1 = Command::new("cut")
.args(&["-f", "1"])
.arg(&self.path)
.stdout(Stdio::piped())
.spawn().unwrap();
let sort = Command::new("sort")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
// note that sort and uniq cannot be swapped.
// uniq command makes duplicated items in sequence single!
let mut uniq = Command::new("uniq")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
let mut buf: Vec<u8> = Vec::new();
cutf1.stdout.unwrap().read_to_end(&mut buf).unwrap();
sort.stdin.unwrap().write_all(&buf).unwrap();
let mut buf: Vec<u8> = Vec::new();
sort.stdout.unwrap().read_to_end(&mut buf).unwrap();
if let Some(ref mut stdin) = uniq.stdin {
stdin.write_all(&buf).unwrap();
}
// wait_with_output(self) -> Result<Output>
let res = uniq.wait_with_output().unwrap().stdout;
String::from_utf8_lossy(&res).trim().to_string()
}
/// ch02.18 sort by third columns descendingly
/// that means `sort -r -k 3 ./data/ch02/hightemp.txt`
pub fn sort_in_descending(&self, key: usize)->String {
let res = Command::new("sort")
.arg("-r")
.args(&["-k", &format!("{}", key)])
.arg(&self.path)
.output().unwrap();
String::from_utf8_lossy(&res.stdout).trim().to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
extern crate env_logger;
extern crate getopts;
extern crate glob;
use self::getopts::Options;
/// env_logger output is controlled by RUST_LOG environmental variable
/// to debug only to this module, set `RUST_LOG=natural_lang::ch02::command=debug` in Environment variable.
/// before save file, confirm existance in file or create dir in fs::create_dir method.
/// create_dir method is equivalent to `mkdir -p` in unix command
#[test]
fn test_prepare() {
use std::fs;
env_logger::init().unwrap();
let save_path = Path::new("./data/ch02/hightemp.txt");
// Success or not, ignore result
// see also https://github.com/rust-lang/rust/pull/11754#issuecomment-33202664
let _ = fs::create_dir(save_path.parent().unwrap());
let commander = Commander::new(save_path);
commander.prepare();
assert!(save_path.exists())
}
#[test]
fn test_count_lines() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
assert_eq!(commander.count_lines().unwrap(), 24);
}
#[test]
fn test_replace_tab_to_space() {
let save_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(save_path);
let res = commander.replace_tab_to_space();
assert_eq!(
res.lines().take(1).collect::<String>(),
"高知県 江川崎 41 2013-08-12"
)
}
#[test]
fn test_extract_row() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let commander = Commander::new(load_path);
assert_eq!(
commander.extract_row(0).lines().next().unwrap(), // take first line
"高知県"
);
}
#[test]
fn test_merge() {
let load_path = Path::new("./data/ch02/hightemp.txt");
let parent = load_path.parent().unwrap();
let file1 = parent.join("col1.txt");
let file2 = parent.join("col2.txt");
let res = Commander::merge(&file1, &file2);
debug!("{:?}", res);
assert_eq!(
(&mut res.lines()).next().unwrap(),
"高知県\t江川崎"
)
}
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE [options]", program);
print!("{}", opts.usage(&brief));
}
/// with cargo test -- [<OPTIONS>], there seems to be panicked at '"Unrecognized option: \'n\'."'
/// so set args directly instead of using env::args()
#[test]
fn test_head() {
// let args = env::args()::collect::<Vec<String>>();
let args = vec!["program", "-n", "5", "./data/ch02/hightemp.txt"];
let program = args[0].clone();
let mut opts = Options::new();
opts.optopt("n", "num", "set first ${num} rows", "NUMBER");
opts.optflag("h", "help", "print this help menu");
let matches = opts.parse(&args[1..]).unwrap();
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let n = matches
.opt_str("n")
.expect("invalid number")
.parse::<usize>()
.unwrap();
let input = matches.free.first().unwrap();
let commander = Commander::new(input);
let res = commander.head(n);
assert_eq!(
res,
"高知県\t江川崎\t41\t2013-08-12\n埼玉県\t熊谷\t40.9\t2007-08-16 | {
let res = Command::new("paste")
.args(&[file1.as_ref(), file2.as_ref()])
.output().expect("fail to execute paste command");
String::from_utf8_lossy(&res.stdout).trim().to_string()
} | identifier_body |
lib.rs |
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libp2prs_core::PeerId;
use libp2prs_runtime::task;
use libp2prs_swarm::Control;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use tide::http::mime;
use tide::{Body, Request, Response, Server};
#[macro_use]
extern crate lazy_static;
lazy_static! {
static ref NON_PARAM_ROUTE: Vec<String> = {
vec![
"".to_string(),
"/recv".to_string(),
"/send".to_string(),
"/peer".to_string(),
"/connection".to_string(),
]
};
static ref PARAM_ROUTE: Vec<String> = vec!["/peer/_".to_string(), "/protocol?protocol_id=_".to_string()];
}
/// Response, message contains error info if statusCode isn't 200.
#[derive(Serialize, Deserialize)]
struct ResponseBody {
status: i64,
message: String,
result: Vec<String>,
}
/// Tide server
pub struct InfoServer {
monitor: Server<Control>,
// map: HashMap<String, IRouteHandler>,
}
/// Save package count&size
#[derive(Serialize, Deserialize)]
struct PackageInfo {
package_count: usize,
package_bytes: usize,
}
/// Save package count&size by peer_id or protocol_id
#[derive(Serialize, Deserialize)]
struct SpecInfo {
package_in: usize,
package_out: usize,
}
/// A struct that deserialize protocol_id.
#[derive(Serialize, Deserialize, Debug)]
struct Protocol {
protocol_id: String,
}
/// A struct that deserialize peer_id.
#[derive(Serialize, Deserialize, Debug)]
struct Peer {
count: usize,
}
/// Save data from network_info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionStatus {
/// The total number of connections, both established and pending.
num_connections: usize,
/// The total number of pending connections, both incoming and outgoing.
num_connections_pending: usize,
/// The total number of established connections.
num_connections_established: usize,
/// The total number of active sub streams.
num_active_streams: usize,
/// The information of all established connections.
connection_info: Vec<NetworkConnectionInfo>,
}
/// A struct that save connection info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionInfo {
la: Vec<u8>,
ra: Vec<u8>,
local_peer_id: String,
remote_peer_id: String,
num_inbound_streams: usize,
num_outbound_streams: usize,
}
impl InfoServer {
pub fn new(control: Control) -> Self {
let mut monitor = tide::with_state(control);
monitor.at("").get(get_all);
monitor.at("/recv").get(get_recv_pkg);
monitor.at("/send").get(get_sent_pkg);
monitor.at("/protocol").get(get_protocol_info);
monitor.at("/peer").get(get_peer_count).at("/:peer_id").get(get_peer_info);
monitor.at("/connection").get(get_connection_info);
InfoServer { monitor }
}
pub fn start(self, addr: String) {
task::spawn(async move {
let r = self.monitor.listen(addr).await;
log::info!("Info server started result={:?}", r);
});
}
}
/// Return route list
async fn | (req: Request<Control>) -> tide::Result {
let addr = req.local_addr().unwrap();
let mut available = "<h3>Available Endpoints:</h3></br>".to_string();
for item in NON_PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
available = available + &format!("<a href=//{}>{}</a></br>", route, route);
}
let mut argument = "<h3>Endpoints that require arguments:</h3></br>".to_string();
for item in PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
argument += &format!("<a href=//{}>{}</a></br>", route, route);
}
let res_body =
"<head><link rel=\"icon\" href=\"data:;base64,=\"></head>".to_string() + "<body>" + &available + &argument + "</body>";
let response = Response::builder(200).content_type(mime::HTML).body(res_body).build();
Ok(response)
}
/// Get peer count
async fn get_peer_count(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let peer = serde_json::to_string(&Peer {
count: network_info.num_peers,
})
.unwrap();
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![peer],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get connection info
async fn get_connection_info(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let cis = control.dump_connections(None).await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let mut connection_info = Vec::new();
for item in cis {
let info = NetworkConnectionInfo {
la: item.info.la.to_vec(),
ra: item.info.ra.to_vec(),
local_peer_id: item.info.local_peer_id.to_string(),
remote_peer_id: item.info.remote_peer_id.to_string(),
num_inbound_streams: item.info.num_inbound_streams,
num_outbound_streams: item.info.num_outbound_streams,
};
connection_info.push(info);
}
let network_connection_status = NetworkConnectionStatus {
num_connections: network_info.num_connections,
num_connections_pending: network_info.num_connections_pending,
num_connections_established: network_info.num_connections_established,
num_active_streams: network_info.num_active_streams,
connection_info,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&network_connection_status).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get received package counts and bytes
async fn get_recv_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_recv_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&package)?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent package counts and bytes
async fn get_sent_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_sent_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&package).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by protocol_id
async fn get_protocol_info(req: Request<Control>) -> tide::Result {
let protocol: Protocol = req.query()?;
let (receive, send) = req.state().get_protocol_in_and_out(&protocol.protocol_id);
let mut spec_info = SpecInfo {
package_in: 0,
package_out: 0,
};
if let Some(value) = receive {
spec_info.package_in = value
}
if let Some(value) = send {
spec_info.package_out = value
}
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&spec_info).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by peer_id
async fn get_peer_info(req: Request<Control>) -> tide::Result {
let peer = req.param("peer_id")?;
let peer_id = match PeerId::from_str(peer) {
Ok(info) => info,
Err(e | get_all | identifier_name |
lib.rs |
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libp2prs_core::PeerId;
use libp2prs_runtime::task;
use libp2prs_swarm::Control;
use serde::{Deserialize, Serialize};
use std::str::FromStr;
use tide::http::mime;
use tide::{Body, Request, Response, Server};
#[macro_use]
extern crate lazy_static;
lazy_static! {
static ref NON_PARAM_ROUTE: Vec<String> = {
vec![
"".to_string(),
"/recv".to_string(),
"/send".to_string(),
"/peer".to_string(),
"/connection".to_string(),
]
};
static ref PARAM_ROUTE: Vec<String> = vec!["/peer/_".to_string(), "/protocol?protocol_id=_".to_string()];
}
/// Response, message contains error info if statusCode isn't 200.
#[derive(Serialize, Deserialize)]
struct ResponseBody {
status: i64,
message: String,
result: Vec<String>,
}
/// Tide server
pub struct InfoServer {
monitor: Server<Control>,
// map: HashMap<String, IRouteHandler>,
}
/// Save package count&size
#[derive(Serialize, Deserialize)]
struct PackageInfo {
package_count: usize,
package_bytes: usize,
}
/// Save package count&size by peer_id or protocol_id
#[derive(Serialize, Deserialize)]
struct SpecInfo {
package_in: usize,
package_out: usize,
}
/// A struct that deserialize protocol_id.
#[derive(Serialize, Deserialize, Debug)]
struct Protocol {
protocol_id: String,
}
/// A struct that deserialize peer_id.
#[derive(Serialize, Deserialize, Debug)]
struct Peer {
count: usize,
}
/// Save data from network_info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionStatus {
/// The total number of connections, both established and pending.
num_connections: usize,
/// The total number of pending connections, both incoming and outgoing.
num_connections_pending: usize,
/// The total number of established connections.
num_connections_established: usize,
/// The total number of active sub streams.
num_active_streams: usize,
/// The information of all established connections.
connection_info: Vec<NetworkConnectionInfo>,
}
/// A struct that save connection info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionInfo {
la: Vec<u8>,
ra: Vec<u8>,
local_peer_id: String,
remote_peer_id: String,
num_inbound_streams: usize,
num_outbound_streams: usize,
}
impl InfoServer {
pub fn new(control: Control) -> Self {
let mut monitor = tide::with_state(control);
monitor.at("").get(get_all);
monitor.at("/recv").get(get_recv_pkg);
monitor.at("/send").get(get_sent_pkg);
monitor.at("/protocol").get(get_protocol_info);
monitor.at("/peer").get(get_peer_count).at("/:peer_id").get(get_peer_info);
monitor.at("/connection").get(get_connection_info);
InfoServer { monitor }
}
pub fn start(self, addr: String) {
task::spawn(async move {
let r = self.monitor.listen(addr).await;
log::info!("Info server started result={:?}", r);
});
}
}
/// Return route list
async fn get_all(req: Request<Control>) -> tide::Result {
let addr = req.local_addr().unwrap();
let mut available = "<h3>Available Endpoints:</h3></br>".to_string();
for item in NON_PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
available = available + &format!("<a href=//{}>{}</a></br>", route, route);
}
let mut argument = "<h3>Endpoints that require arguments:</h3></br>".to_string();
for item in PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
argument += &format!("<a href=//{}>{}</a></br>", route, route);
}
let res_body =
"<head><link rel=\"icon\" href=\"data:;base64,=\"></head>".to_string() + "<body>" + &available + &argument + "</body>";
let response = Response::builder(200).content_type(mime::HTML).body(res_body).build();
Ok(response)
}
/// Get peer count
async fn get_peer_count(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let peer = serde_json::to_string(&Peer {
count: network_info.num_peers,
})
.unwrap();
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![peer],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get connection info
async fn get_connection_info(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let cis = control.dump_connections(None).await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let mut connection_info = Vec::new();
for item in cis {
let info = NetworkConnectionInfo {
la: item.info.la.to_vec(),
ra: item.info.ra.to_vec(),
local_peer_id: item.info.local_peer_id.to_string(),
remote_peer_id: item.info.remote_peer_id.to_string(),
num_inbound_streams: item.info.num_inbound_streams,
num_outbound_streams: item.info.num_outbound_streams,
};
connection_info.push(info);
}
let network_connection_status = NetworkConnectionStatus {
num_connections: network_info.num_connections,
num_connections_pending: network_info.num_connections_pending,
num_connections_established: network_info.num_connections_established,
num_active_streams: network_info.num_active_streams,
connection_info,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&network_connection_status).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get received package counts and bytes
async fn get_recv_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_recv_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&package)?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent package counts and bytes
async fn get_sent_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_sent_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&package).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by protocol_id
async fn get_protocol_info(req: Request<Control>) -> tide::Result {
let protocol: Protocol = req.query()?;
let (receive, send) = req.state().get_protocol_in_and_out(&protocol.protocol_id);
let mut spec_info = SpecInfo {
package_in: 0,
package_out: 0,
};
if let Some(value) = receive {
spec_info.package_in = value
}
if let Some(value) = send |
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&spec_info).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by peer_id
async fn get_peer_info(req: Request<Control>) -> tide::Result {
let peer = req.param("peer_id")?;
let peer_id = match PeerId::from_str(peer) {
Ok(info) => info,
Err | {
spec_info.package_out = value
} | conditional_block |
lib.rs |
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libp2prs_core::PeerId;
use libp2prs_runtime::task;
use libp2prs_swarm::Control;
use serde::{Deserialize, Serialize}; | extern crate lazy_static;
lazy_static! {
static ref NON_PARAM_ROUTE: Vec<String> = {
vec![
"".to_string(),
"/recv".to_string(),
"/send".to_string(),
"/peer".to_string(),
"/connection".to_string(),
]
};
static ref PARAM_ROUTE: Vec<String> = vec!["/peer/_".to_string(), "/protocol?protocol_id=_".to_string()];
}
/// Response, message contains error info if statusCode isn't 200.
#[derive(Serialize, Deserialize)]
struct ResponseBody {
status: i64,
message: String,
result: Vec<String>,
}
/// Tide server
pub struct InfoServer {
monitor: Server<Control>,
// map: HashMap<String, IRouteHandler>,
}
/// Save package count&size
#[derive(Serialize, Deserialize)]
struct PackageInfo {
package_count: usize,
package_bytes: usize,
}
/// Save package count&size by peer_id or protocol_id
#[derive(Serialize, Deserialize)]
struct SpecInfo {
package_in: usize,
package_out: usize,
}
/// A struct that deserialize protocol_id.
#[derive(Serialize, Deserialize, Debug)]
struct Protocol {
protocol_id: String,
}
/// A struct that deserialize peer_id.
#[derive(Serialize, Deserialize, Debug)]
struct Peer {
count: usize,
}
/// Save data from network_info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionStatus {
/// The total number of connections, both established and pending.
num_connections: usize,
/// The total number of pending connections, both incoming and outgoing.
num_connections_pending: usize,
/// The total number of established connections.
num_connections_established: usize,
/// The total number of active sub streams.
num_active_streams: usize,
/// The information of all established connections.
connection_info: Vec<NetworkConnectionInfo>,
}
/// A struct that save connection info.
#[derive(Serialize, Deserialize, Debug)]
struct NetworkConnectionInfo {
la: Vec<u8>,
ra: Vec<u8>,
local_peer_id: String,
remote_peer_id: String,
num_inbound_streams: usize,
num_outbound_streams: usize,
}
impl InfoServer {
pub fn new(control: Control) -> Self {
let mut monitor = tide::with_state(control);
monitor.at("").get(get_all);
monitor.at("/recv").get(get_recv_pkg);
monitor.at("/send").get(get_sent_pkg);
monitor.at("/protocol").get(get_protocol_info);
monitor.at("/peer").get(get_peer_count).at("/:peer_id").get(get_peer_info);
monitor.at("/connection").get(get_connection_info);
InfoServer { monitor }
}
pub fn start(self, addr: String) {
task::spawn(async move {
let r = self.monitor.listen(addr).await;
log::info!("Info server started result={:?}", r);
});
}
}
/// Return route list
async fn get_all(req: Request<Control>) -> tide::Result {
let addr = req.local_addr().unwrap();
let mut available = "<h3>Available Endpoints:</h3></br>".to_string();
for item in NON_PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
available = available + &format!("<a href=//{}>{}</a></br>", route, route);
}
let mut argument = "<h3>Endpoints that require arguments:</h3></br>".to_string();
for item in PARAM_ROUTE.iter() {
let route = addr.to_owned() + item;
argument += &format!("<a href=//{}>{}</a></br>", route, route);
}
let res_body =
"<head><link rel=\"icon\" href=\"data:;base64,=\"></head>".to_string() + "<body>" + &available + &argument + "</body>";
let response = Response::builder(200).content_type(mime::HTML).body(res_body).build();
Ok(response)
}
/// Get peer count
async fn get_peer_count(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let peer = serde_json::to_string(&Peer {
count: network_info.num_peers,
})
.unwrap();
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![peer],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get connection info
async fn get_connection_info(req: Request<Control>) -> tide::Result {
let mut control = req.state().clone();
let network_info = control.retrieve_networkinfo().await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let cis = control.dump_connections(None).await.map_err(|e| {
log::error!("{:?}", e);
tide::Error::new(500, e)
})?;
let mut connection_info = Vec::new();
for item in cis {
let info = NetworkConnectionInfo {
la: item.info.la.to_vec(),
ra: item.info.ra.to_vec(),
local_peer_id: item.info.local_peer_id.to_string(),
remote_peer_id: item.info.remote_peer_id.to_string(),
num_inbound_streams: item.info.num_inbound_streams,
num_outbound_streams: item.info.num_outbound_streams,
};
connection_info.push(info);
}
let network_connection_status = NetworkConnectionStatus {
num_connections: network_info.num_connections,
num_connections_pending: network_info.num_connections_pending,
num_connections_established: network_info.num_connections_established,
num_active_streams: network_info.num_active_streams,
connection_info,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&network_connection_status).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get received package counts and bytes
async fn get_recv_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_recv_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&package)?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent package counts and bytes
async fn get_sent_pkg(req: Request<Control>) -> tide::Result {
let (package_count, package_bytes) = req.state().get_sent_count_and_size();
let package = PackageInfo {
package_count,
package_bytes,
};
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&package).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by protocol_id
async fn get_protocol_info(req: Request<Control>) -> tide::Result {
let protocol: Protocol = req.query()?;
let (receive, send) = req.state().get_protocol_in_and_out(&protocol.protocol_id);
let mut spec_info = SpecInfo {
package_in: 0,
package_out: 0,
};
if let Some(value) = receive {
spec_info.package_in = value
}
if let Some(value) = send {
spec_info.package_out = value
}
let result_body = Body::from_json(&ResponseBody {
status: 0,
message: "".to_string(),
result: vec![serde_json::to_string(&spec_info).unwrap()],
})?;
let response = Response::builder(200).body(result_body).build();
Ok(response)
}
/// Get sent&received package bytes by peer_id
async fn get_peer_info(req: Request<Control>) -> tide::Result {
let peer = req.param("peer_id")?;
let peer_id = match PeerId::from_str(peer) {
Ok(info) => info,
Err(e) | use std::str::FromStr;
use tide::http::mime;
use tide::{Body, Request, Response, Server};
#[macro_use] | random_line_split |
linux.rs | to `SIGKILL`. And you should keep it that way
/// unless you know what you are doing.
///
/// Particularly you should consider the following choices:
///
/// 1. Instead of setting ``PDEATHSIG`` to some other signal, send signal
/// yourself and wait until child gracefully finishes.
///
/// 2. Instead of daemonizing use ``systemd``/``upstart``/whatever system
/// init script to run your service
///
/// Another issue with this option is that it works only with immediate
/// child. To better control all descendant processes you may need the
/// following:
///
/// 1. The `prctl(PR_SET_CHILD_SUBREAPER..)` in parent which allows to
/// "catch" descendant processes.
///
/// 2. The pid namespaces
///
/// The former is out of scope of this library. The latter works by
/// ``cmd.unshare(Namespace::Pid)``, but you may need to setup mount points
/// and other important things (which are out of scope too).
///
/// To reset this behavior use ``allow_daemonize()``.
///
pub fn set_parent_death_signal(&mut self, sig: Signal) -> &mut Command {
self.config.death_sig = Some(sig);
self
}
/// Set chroot dir. Only absolute path is supported
///
/// This method has a non-standard security feature: even if current_dir
/// is unspecified we set it to the directory inside the new root dir.
/// see more details in the description of `Command::current_dir`.
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root. If chroot dir is relative it's relative
/// to either suffix of the current directory with stripped off pivot dir
/// or the pivot dir itself (if old workdir is not prefixed by pivot dir)
///
/// # Panics
///
/// If directory is not absolute
pub fn chroot_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
let dir = dir.as_ref();
if !dir.is_absolute() {
panic!("Chroot dir must be absolute");
}
self.chroot_dir = Some(dir.to_path_buf());
self
}
/// Moves the root of the file system to the directory `put_old` and
/// makes `new_root` the new root file system. Also it's optionally
/// unmount `new_root` mount point after moving root (but it must exist
/// anyway).
///
/// The documentation says that `put_old` must be underneath the
/// `new_root`. Currently we have a restriction that both must be absolute
/// and `new_root` be prefix of `put_old`, but we may lift it later.
///
/// **Warning** if you don't unshare the mount namespace you will get
/// moved filesystem root for *all processes running in that namespace*
/// including parent (currently running) process itself. If you don't
/// run equivalent to ``mount --make-private`` for the old root filesystem
/// and set ``unmount`` to true, you may get unmounted filesystem for
/// running processes too.
///
/// See `man 2 pivot` for further details
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root.
///
/// # Panics
///
/// Panics if either path is not absolute or new_root is not a prefix of
/// put_old.
pub fn pivot_root<A: AsRef<Path>, B:AsRef<Path>>(&mut self,
new_root: A, put_old: B, unmount: bool)
-> &mut Command
{
let new_root = new_root.as_ref();
let put_old = put_old.as_ref();
if !new_root.is_absolute() {
panic!("New root must be absolute");
};
if !put_old.is_absolute() {
panic!("The `put_old` dir must be absolute");
}
let mut old_cmp = put_old.components();
for (n, o) in new_root.components().zip(old_cmp.by_ref()) {
if n != o {
panic!("The new_root is not a prefix of put old");
}
}
self.pivot_root = Some((new_root.to_path_buf(), put_old.to_path_buf(),
unmount));
self
}
/// Unshare given namespaces
///
/// Note: each namespace have some consequences on how new process will
/// work, some of them are described in the `Namespace` type documentation.
pub fn unshare<'x>(&mut self, iter: impl IntoIterator<Item=&'x Namespace>)
-> &mut Command
|
/// Reassociate child process with a namespace specified by a file
/// descriptor
///
/// `file` argument is an open file referring to a namespace
///
/// 'ns' is a namespace type
///
/// See `man 2 setns` for further details
///
/// Note: using `unshare` and `setns` for the same namespace is meaningless.
pub fn set_namespace<F: AsRawFd>(&mut self, file: &F, ns: Namespace)
-> io::Result<&mut Command>
{
let fd = dup_file_cloexec(file)?;
self.config.setns_namespaces.insert(ns, fd);
Ok(self)
}
/// Sets user id and group id mappings for new process
///
/// This automatically enables `User` namespace. You should also set `uid`
/// and `gid` with respective methods for the new process.
///
/// Note there are basically two ways to enable id maps:
///
/// 1. Write them directly
/// 2. Invoke a `newuidmap`, `newgidmap` commands
///
/// First option works either if current process is root or if resulting
/// map only contains current user in the mapping.
///
/// The library will not try to guess the behavior. By default it will
/// write directly. You need to call the `set_id_map_commands` when you
/// want non-default behavior.
///
/// See `man 7 user_namespaces` for more info
pub fn set_id_maps(&mut self, uid_map: Vec<UidMap>, gid_map: Vec<GidMap>)
-> &mut Command
{
self.unshare(&[Namespace::User]);
self.config.id_maps = Some((uid_map, gid_map));
self
}
/// Set path to command-line utilities for writing uid/gid maps
///
/// The utilities provided my obey same interface as `newuidmap` and
/// `newgidmap` from `shadow` (or sometimes `uidmap`) package. To get it
/// working you usually need to setup `/etc/subuid` and `/etc/subgid`
/// files.
///
/// See `man 1 newuidmap`, `man 1 newgidmap` for details
///
/// This method is no-op unless `set_id_maps` is called.
pub fn set_id_map_commands<A: AsRef<Path>, B: AsRef<Path>>(&mut self,
newuidmap: A, newgidmap: B)
-> &mut Command
{
self.id_map_commands = Some((
newuidmap.as_ref().to_path_buf(),
newgidmap.as_ref().to_path_buf()));
self
}
/// Keep signal mask intact after executing child, keeps also ignored
/// signals
///
/// By default signal mask is empty and all signals are reset to the
/// `SIG_DFL` value right before `execve()` syscall.
///
/// This is only useful if started process is aware of the issue and sets
/// sigmasks to some reasonable value. When used wisely it may avoid some
/// race conditions when signal is sent after child is cloned but before
/// child have been able to establish it's state.
pub fn keep_sigmask(&mut self) -> &mut Command {
self.config.restore_sigmask = false;
self
}
/// Set the argument zero for the process
///
/// By default argument zero is same as path to the program to run. You
/// may set it to a short name of the command or to something else to
/// pretend there is a symlink to a program (for example to run `gzip` as
/// `gunzip`).
pub fn arg0<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args[0] = arg.to_cstring();
self
}
/// Makes child process a group leader
///
/// If child process is being launched as a foreground job,
/// the child process group needs to be put into the foreground on
/// the controlling terminal using `tcsetpgrp`. To request status
/// information from stopped child process you should call `waitpid` with
/// `WUNTRACED` flag. And then check status with `WIFSTOPPED` macro.
/// After | {
for ns in iter {
self.config.namespaces |= to_clone_flag(*ns);
}
self
} | identifier_body |
linux.rs | set to `SIGKILL`. And you should keep it that way
/// unless you know what you are doing.
///
/// Particularly you should consider the following choices:
///
/// 1. Instead of setting ``PDEATHSIG`` to some other signal, send signal
/// yourself and wait until child gracefully finishes.
///
/// 2. Instead of daemonizing use ``systemd``/``upstart``/whatever system
/// init script to run your service
///
/// Another issue with this option is that it works only with immediate
/// child. To better control all descendant processes you may need the
/// following:
///
/// 1. The `prctl(PR_SET_CHILD_SUBREAPER..)` in parent which allows to
/// "catch" descendant processes.
///
/// 2. The pid namespaces
///
/// The former is out of scope of this library. The latter works by
/// ``cmd.unshare(Namespace::Pid)``, but you may need to setup mount points
/// and other important things (which are out of scope too).
///
/// To reset this behavior use ``allow_daemonize()``.
///
pub fn set_parent_death_signal(&mut self, sig: Signal) -> &mut Command {
self.config.death_sig = Some(sig);
self
}
/// Set chroot dir. Only absolute path is supported
///
/// This method has a non-standard security feature: even if current_dir
/// is unspecified we set it to the directory inside the new root dir.
/// see more details in the description of `Command::current_dir`.
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root. If chroot dir is relative it's relative
/// to either suffix of the current directory with stripped off pivot dir
/// or the pivot dir itself (if old workdir is not prefixed by pivot dir)
///
/// # Panics
///
/// If directory is not absolute
pub fn chroot_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
let dir = dir.as_ref();
if !dir.is_absolute() {
panic!("Chroot dir must be absolute");
}
self.chroot_dir = Some(dir.to_path_buf());
self
}
/// Moves the root of the file system to the directory `put_old` and
/// makes `new_root` the new root file system. Also it's optionally
/// unmount `new_root` mount point after moving root (but it must exist
/// anyway).
///
/// The documentation says that `put_old` must be underneath the
/// `new_root`. Currently we have a restriction that both must be absolute
/// and `new_root` be prefix of `put_old`, but we may lift it later.
///
/// **Warning** if you don't unshare the mount namespace you will get
/// moved filesystem root for *all processes running in that namespace*
/// including parent (currently running) process itself. If you don't
/// run equivalent to ``mount --make-private`` for the old root filesystem
/// and set ``unmount`` to true, you may get unmounted filesystem for
/// running processes too.
///
/// See `man 2 pivot` for further details
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root.
/// | pub fn pivot_root<A: AsRef<Path>, B:AsRef<Path>>(&mut self,
new_root: A, put_old: B, unmount: bool)
-> &mut Command
{
let new_root = new_root.as_ref();
let put_old = put_old.as_ref();
if !new_root.is_absolute() {
panic!("New root must be absolute");
};
if !put_old.is_absolute() {
panic!("The `put_old` dir must be absolute");
}
let mut old_cmp = put_old.components();
for (n, o) in new_root.components().zip(old_cmp.by_ref()) {
if n != o {
panic!("The new_root is not a prefix of put old");
}
}
self.pivot_root = Some((new_root.to_path_buf(), put_old.to_path_buf(),
unmount));
self
}
/// Unshare given namespaces
///
/// Note: each namespace have some consequences on how new process will
/// work, some of them are described in the `Namespace` type documentation.
pub fn unshare<'x>(&mut self, iter: impl IntoIterator<Item=&'x Namespace>)
-> &mut Command
{
for ns in iter {
self.config.namespaces |= to_clone_flag(*ns);
}
self
}
/// Reassociate child process with a namespace specified by a file
/// descriptor
///
/// `file` argument is an open file referring to a namespace
///
/// 'ns' is a namespace type
///
/// See `man 2 setns` for further details
///
/// Note: using `unshare` and `setns` for the same namespace is meaningless.
pub fn set_namespace<F: AsRawFd>(&mut self, file: &F, ns: Namespace)
-> io::Result<&mut Command>
{
let fd = dup_file_cloexec(file)?;
self.config.setns_namespaces.insert(ns, fd);
Ok(self)
}
/// Sets user id and group id mappings for new process
///
/// This automatically enables `User` namespace. You should also set `uid`
/// and `gid` with respective methods for the new process.
///
/// Note there are basically two ways to enable id maps:
///
/// 1. Write them directly
/// 2. Invoke a `newuidmap`, `newgidmap` commands
///
/// First option works either if current process is root or if resulting
/// map only contains current user in the mapping.
///
/// The library will not try to guess the behavior. By default it will
/// write directly. You need to call the `set_id_map_commands` when you
/// want non-default behavior.
///
/// See `man 7 user_namespaces` for more info
pub fn set_id_maps(&mut self, uid_map: Vec<UidMap>, gid_map: Vec<GidMap>)
-> &mut Command
{
self.unshare(&[Namespace::User]);
self.config.id_maps = Some((uid_map, gid_map));
self
}
/// Set path to command-line utilities for writing uid/gid maps
///
/// The utilities provided my obey same interface as `newuidmap` and
/// `newgidmap` from `shadow` (or sometimes `uidmap`) package. To get it
/// working you usually need to setup `/etc/subuid` and `/etc/subgid`
/// files.
///
/// See `man 1 newuidmap`, `man 1 newgidmap` for details
///
/// This method is no-op unless `set_id_maps` is called.
pub fn set_id_map_commands<A: AsRef<Path>, B: AsRef<Path>>(&mut self,
newuidmap: A, newgidmap: B)
-> &mut Command
{
self.id_map_commands = Some((
newuidmap.as_ref().to_path_buf(),
newgidmap.as_ref().to_path_buf()));
self
}
/// Keep signal mask intact after executing child, keeps also ignored
/// signals
///
/// By default signal mask is empty and all signals are reset to the
/// `SIG_DFL` value right before `execve()` syscall.
///
/// This is only useful if started process is aware of the issue and sets
/// sigmasks to some reasonable value. When used wisely it may avoid some
/// race conditions when signal is sent after child is cloned but before
/// child have been able to establish it's state.
pub fn keep_sigmask(&mut self) -> &mut Command {
self.config.restore_sigmask = false;
self
}
/// Set the argument zero for the process
///
/// By default argument zero is same as path to the program to run. You
/// may set it to a short name of the command or to something else to
/// pretend there is a symlink to a program (for example to run `gzip` as
/// `gunzip`).
pub fn arg0<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args[0] = arg.to_cstring();
self
}
/// Makes child process a group leader
///
/// If child process is being launched as a foreground job,
/// the child process group needs to be put into the foreground on
/// the controlling terminal using `tcsetpgrp`. To request status
/// information from stopped child process you should call `waitpid` with
/// `WUNTRACED` flag. And then check status with `WIFSTOPPED` macro.
/// After | /// # Panics
///
/// Panics if either path is not absolute or new_root is not a prefix of
/// put_old. | random_line_split |
linux.rs | to `SIGKILL`. And you should keep it that way
/// unless you know what you are doing.
///
/// Particularly you should consider the following choices:
///
/// 1. Instead of setting ``PDEATHSIG`` to some other signal, send signal
/// yourself and wait until child gracefully finishes.
///
/// 2. Instead of daemonizing use ``systemd``/``upstart``/whatever system
/// init script to run your service
///
/// Another issue with this option is that it works only with immediate
/// child. To better control all descendant processes you may need the
/// following:
///
/// 1. The `prctl(PR_SET_CHILD_SUBREAPER..)` in parent which allows to
/// "catch" descendant processes.
///
/// 2. The pid namespaces
///
/// The former is out of scope of this library. The latter works by
/// ``cmd.unshare(Namespace::Pid)``, but you may need to setup mount points
/// and other important things (which are out of scope too).
///
/// To reset this behavior use ``allow_daemonize()``.
///
pub fn set_parent_death_signal(&mut self, sig: Signal) -> &mut Command {
self.config.death_sig = Some(sig);
self
}
/// Set chroot dir. Only absolute path is supported
///
/// This method has a non-standard security feature: even if current_dir
/// is unspecified we set it to the directory inside the new root dir.
/// see more details in the description of `Command::current_dir`.
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root. If chroot dir is relative it's relative
/// to either suffix of the current directory with stripped off pivot dir
/// or the pivot dir itself (if old workdir is not prefixed by pivot dir)
///
/// # Panics
///
/// If directory is not absolute
pub fn | <P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
let dir = dir.as_ref();
if !dir.is_absolute() {
panic!("Chroot dir must be absolute");
}
self.chroot_dir = Some(dir.to_path_buf());
self
}
/// Moves the root of the file system to the directory `put_old` and
/// makes `new_root` the new root file system. Also it's optionally
/// unmount `new_root` mount point after moving root (but it must exist
/// anyway).
///
/// The documentation says that `put_old` must be underneath the
/// `new_root`. Currently we have a restriction that both must be absolute
/// and `new_root` be prefix of `put_old`, but we may lift it later.
///
/// **Warning** if you don't unshare the mount namespace you will get
/// moved filesystem root for *all processes running in that namespace*
/// including parent (currently running) process itself. If you don't
/// run equivalent to ``mount --make-private`` for the old root filesystem
/// and set ``unmount`` to true, you may get unmounted filesystem for
/// running processes too.
///
/// See `man 2 pivot` for further details
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root.
///
/// # Panics
///
/// Panics if either path is not absolute or new_root is not a prefix of
/// put_old.
pub fn pivot_root<A: AsRef<Path>, B:AsRef<Path>>(&mut self,
new_root: A, put_old: B, unmount: bool)
-> &mut Command
{
let new_root = new_root.as_ref();
let put_old = put_old.as_ref();
if !new_root.is_absolute() {
panic!("New root must be absolute");
};
if !put_old.is_absolute() {
panic!("The `put_old` dir must be absolute");
}
let mut old_cmp = put_old.components();
for (n, o) in new_root.components().zip(old_cmp.by_ref()) {
if n != o {
panic!("The new_root is not a prefix of put old");
}
}
self.pivot_root = Some((new_root.to_path_buf(), put_old.to_path_buf(),
unmount));
self
}
/// Unshare given namespaces
///
/// Note: each namespace have some consequences on how new process will
/// work, some of them are described in the `Namespace` type documentation.
pub fn unshare<'x>(&mut self, iter: impl IntoIterator<Item=&'x Namespace>)
-> &mut Command
{
for ns in iter {
self.config.namespaces |= to_clone_flag(*ns);
}
self
}
/// Reassociate child process with a namespace specified by a file
/// descriptor
///
/// `file` argument is an open file referring to a namespace
///
/// 'ns' is a namespace type
///
/// See `man 2 setns` for further details
///
/// Note: using `unshare` and `setns` for the same namespace is meaningless.
pub fn set_namespace<F: AsRawFd>(&mut self, file: &F, ns: Namespace)
-> io::Result<&mut Command>
{
let fd = dup_file_cloexec(file)?;
self.config.setns_namespaces.insert(ns, fd);
Ok(self)
}
/// Sets user id and group id mappings for new process
///
/// This automatically enables `User` namespace. You should also set `uid`
/// and `gid` with respective methods for the new process.
///
/// Note there are basically two ways to enable id maps:
///
/// 1. Write them directly
/// 2. Invoke a `newuidmap`, `newgidmap` commands
///
/// First option works either if current process is root or if resulting
/// map only contains current user in the mapping.
///
/// The library will not try to guess the behavior. By default it will
/// write directly. You need to call the `set_id_map_commands` when you
/// want non-default behavior.
///
/// See `man 7 user_namespaces` for more info
pub fn set_id_maps(&mut self, uid_map: Vec<UidMap>, gid_map: Vec<GidMap>)
-> &mut Command
{
self.unshare(&[Namespace::User]);
self.config.id_maps = Some((uid_map, gid_map));
self
}
/// Set path to command-line utilities for writing uid/gid maps
///
/// The utilities provided my obey same interface as `newuidmap` and
/// `newgidmap` from `shadow` (or sometimes `uidmap`) package. To get it
/// working you usually need to setup `/etc/subuid` and `/etc/subgid`
/// files.
///
/// See `man 1 newuidmap`, `man 1 newgidmap` for details
///
/// This method is no-op unless `set_id_maps` is called.
pub fn set_id_map_commands<A: AsRef<Path>, B: AsRef<Path>>(&mut self,
newuidmap: A, newgidmap: B)
-> &mut Command
{
self.id_map_commands = Some((
newuidmap.as_ref().to_path_buf(),
newgidmap.as_ref().to_path_buf()));
self
}
/// Keep signal mask intact after executing child, keeps also ignored
/// signals
///
/// By default signal mask is empty and all signals are reset to the
/// `SIG_DFL` value right before `execve()` syscall.
///
/// This is only useful if started process is aware of the issue and sets
/// sigmasks to some reasonable value. When used wisely it may avoid some
/// race conditions when signal is sent after child is cloned but before
/// child have been able to establish it's state.
pub fn keep_sigmask(&mut self) -> &mut Command {
self.config.restore_sigmask = false;
self
}
/// Set the argument zero for the process
///
/// By default argument zero is same as path to the program to run. You
/// may set it to a short name of the command or to something else to
/// pretend there is a symlink to a program (for example to run `gzip` as
/// `gunzip`).
pub fn arg0<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args[0] = arg.to_cstring();
self
}
/// Makes child process a group leader
///
/// If child process is being launched as a foreground job,
/// the child process group needs to be put into the foreground on
/// the controlling terminal using `tcsetpgrp`. To request status
/// information from stopped child process you should call `waitpid` with
/// `WUNTRACED` flag. And then check status with `WIFSTOPPED` macro.
/// After | chroot_dir | identifier_name |
linux.rs | to `SIGKILL`. And you should keep it that way
/// unless you know what you are doing.
///
/// Particularly you should consider the following choices:
///
/// 1. Instead of setting ``PDEATHSIG`` to some other signal, send signal
/// yourself and wait until child gracefully finishes.
///
/// 2. Instead of daemonizing use ``systemd``/``upstart``/whatever system
/// init script to run your service
///
/// Another issue with this option is that it works only with immediate
/// child. To better control all descendant processes you may need the
/// following:
///
/// 1. The `prctl(PR_SET_CHILD_SUBREAPER..)` in parent which allows to
/// "catch" descendant processes.
///
/// 2. The pid namespaces
///
/// The former is out of scope of this library. The latter works by
/// ``cmd.unshare(Namespace::Pid)``, but you may need to setup mount points
/// and other important things (which are out of scope too).
///
/// To reset this behavior use ``allow_daemonize()``.
///
pub fn set_parent_death_signal(&mut self, sig: Signal) -> &mut Command {
self.config.death_sig = Some(sig);
self
}
/// Set chroot dir. Only absolute path is supported
///
/// This method has a non-standard security feature: even if current_dir
/// is unspecified we set it to the directory inside the new root dir.
/// see more details in the description of `Command::current_dir`.
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root. If chroot dir is relative it's relative
/// to either suffix of the current directory with stripped off pivot dir
/// or the pivot dir itself (if old workdir is not prefixed by pivot dir)
///
/// # Panics
///
/// If directory is not absolute
pub fn chroot_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
let dir = dir.as_ref();
if !dir.is_absolute() {
panic!("Chroot dir must be absolute");
}
self.chroot_dir = Some(dir.to_path_buf());
self
}
/// Moves the root of the file system to the directory `put_old` and
/// makes `new_root` the new root file system. Also it's optionally
/// unmount `new_root` mount point after moving root (but it must exist
/// anyway).
///
/// The documentation says that `put_old` must be underneath the
/// `new_root`. Currently we have a restriction that both must be absolute
/// and `new_root` be prefix of `put_old`, but we may lift it later.
///
/// **Warning** if you don't unshare the mount namespace you will get
/// moved filesystem root for *all processes running in that namespace*
/// including parent (currently running) process itself. If you don't
/// run equivalent to ``mount --make-private`` for the old root filesystem
/// and set ``unmount`` to true, you may get unmounted filesystem for
/// running processes too.
///
/// See `man 2 pivot` for further details
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root.
///
/// # Panics
///
/// Panics if either path is not absolute or new_root is not a prefix of
/// put_old.
pub fn pivot_root<A: AsRef<Path>, B:AsRef<Path>>(&mut self,
new_root: A, put_old: B, unmount: bool)
-> &mut Command
{
let new_root = new_root.as_ref();
let put_old = put_old.as_ref();
if !new_root.is_absolute() {
panic!("New root must be absolute");
};
if !put_old.is_absolute() |
let mut old_cmp = put_old.components();
for (n, o) in new_root.components().zip(old_cmp.by_ref()) {
if n != o {
panic!("The new_root is not a prefix of put old");
}
}
self.pivot_root = Some((new_root.to_path_buf(), put_old.to_path_buf(),
unmount));
self
}
/// Unshare given namespaces
///
/// Note: each namespace have some consequences on how new process will
/// work, some of them are described in the `Namespace` type documentation.
pub fn unshare<'x>(&mut self, iter: impl IntoIterator<Item=&'x Namespace>)
-> &mut Command
{
for ns in iter {
self.config.namespaces |= to_clone_flag(*ns);
}
self
}
/// Reassociate child process with a namespace specified by a file
/// descriptor
///
/// `file` argument is an open file referring to a namespace
///
/// 'ns' is a namespace type
///
/// See `man 2 setns` for further details
///
/// Note: using `unshare` and `setns` for the same namespace is meaningless.
pub fn set_namespace<F: AsRawFd>(&mut self, file: &F, ns: Namespace)
-> io::Result<&mut Command>
{
let fd = dup_file_cloexec(file)?;
self.config.setns_namespaces.insert(ns, fd);
Ok(self)
}
/// Sets user id and group id mappings for new process
///
/// This automatically enables `User` namespace. You should also set `uid`
/// and `gid` with respective methods for the new process.
///
/// Note there are basically two ways to enable id maps:
///
/// 1. Write them directly
/// 2. Invoke a `newuidmap`, `newgidmap` commands
///
/// First option works either if current process is root or if resulting
/// map only contains current user in the mapping.
///
/// The library will not try to guess the behavior. By default it will
/// write directly. You need to call the `set_id_map_commands` when you
/// want non-default behavior.
///
/// See `man 7 user_namespaces` for more info
pub fn set_id_maps(&mut self, uid_map: Vec<UidMap>, gid_map: Vec<GidMap>)
-> &mut Command
{
self.unshare(&[Namespace::User]);
self.config.id_maps = Some((uid_map, gid_map));
self
}
/// Set path to command-line utilities for writing uid/gid maps
///
/// The utilities provided my obey same interface as `newuidmap` and
/// `newgidmap` from `shadow` (or sometimes `uidmap`) package. To get it
/// working you usually need to setup `/etc/subuid` and `/etc/subgid`
/// files.
///
/// See `man 1 newuidmap`, `man 1 newgidmap` for details
///
/// This method is no-op unless `set_id_maps` is called.
pub fn set_id_map_commands<A: AsRef<Path>, B: AsRef<Path>>(&mut self,
newuidmap: A, newgidmap: B)
-> &mut Command
{
self.id_map_commands = Some((
newuidmap.as_ref().to_path_buf(),
newgidmap.as_ref().to_path_buf()));
self
}
/// Keep signal mask intact after executing child, keeps also ignored
/// signals
///
/// By default signal mask is empty and all signals are reset to the
/// `SIG_DFL` value right before `execve()` syscall.
///
/// This is only useful if started process is aware of the issue and sets
/// sigmasks to some reasonable value. When used wisely it may avoid some
/// race conditions when signal is sent after child is cloned but before
/// child have been able to establish it's state.
pub fn keep_sigmask(&mut self) -> &mut Command {
self.config.restore_sigmask = false;
self
}
/// Set the argument zero for the process
///
/// By default argument zero is same as path to the program to run. You
/// may set it to a short name of the command or to something else to
/// pretend there is a symlink to a program (for example to run `gzip` as
/// `gunzip`).
pub fn arg0<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args[0] = arg.to_cstring();
self
}
/// Makes child process a group leader
///
/// If child process is being launched as a foreground job,
/// the child process group needs to be put into the foreground on
/// the controlling terminal using `tcsetpgrp`. To request status
/// information from stopped child process you should call `waitpid` with
/// `WUNTRACED` flag. And then check status with `WIFSTOPPED` macro.
/// | {
panic!("The `put_old` dir must be absolute");
} | conditional_block |
router.go | use patterns in the same way they are currently used for routes but in reverse order (params on the left)
// NOTE: You have to use the '$' character instead of ':' for matching host parameters.
// The following patterns works:
/*
admin.example.com will match admin.example.com
$username.blog.com will match messi.blog.com
will not match my.awesome.blog.com
*.example.com will match my.admin.example.com
The following patterns are not allowed:
mail.*
*
*/
func (r *Router) Host(hostpattern string) *Router {
r.host = hostpattern
r.hostrm.Register(hostpattern)
return r
}
// Any registers the provided Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) Any(pattern string, handler http.Handler) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, handler).(*route)
rt.withMethods(r.middlewares.BuildHandler(handler), allowedHTTPMethods[1:]...)
return rt
}
// Get registers an http GET method receiver with the provided Handler
func (r *Router) Get(pattern string, handler http.Handler) Route {
return r.Handle("GET", pattern, handler)
}
// Head registers an http HEAD method receiver with the provided Handler
func (r *Router) Head(pattern string, handler http.Handler) Route {
return r.Handle("HEAD", pattern, handler)
}
// Post registers an http POST method receiver with the provided Handler
func (r *Router) Post(pattern string, handler http.Handler) Route {
return r.Handle("POST", pattern, handler)
}
// Put registers an http PUT method receiver with the provided Handler
func (r *Router) Put(pattern string, handler http.Handler) Route {
return r.Handle("PUT", pattern, handler)
}
// Delete registers an http DELETE method receiver with the provided Handler
func (r *Router) Delete(pattern string, handler http.Handler) Route {
return r.Handle("DELETE", pattern, handler)
}
// Trace registers an http TRACE method receiver with the provided Handler
func (r *Router) Trace(pattern string, handler http.Handler) Route {
return r.Handle("TRACE", pattern, handler)
}
// Options registers an http OPTIONS method receiver with the provided Handler
func (r *Router) Options(pattern string, handler http.Handler) Route {
return r.Handle("OPTIONS", pattern, handler)
}
// Connect registers an http CONNECT method receiver with the provided Handler
func (r *Router) Connect(pattern string, handler http.Handler) Route {
return r.Handle("CONNECT", pattern, handler)
}
// Patch registers an http PATCH method receiver with the provided Handler
func (r *Router) Patch(pattern string, handler http.Handler) Route {
return r.Handle("PATCH", pattern, handler)
}
// ANY registers the provided contextual Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) ANY(pattern string, handler func(Context)) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, wrap(handler)).(*route)
rt.withMethods(r.middlewares.BuildHandler(wrap(handler)), allowedHTTPMethods[1:]...)
return rt
}
// GET registers an http GET method receiver with the provided contextual Handler
func (r *Router) GET(pattern string, handler func(Context)) Route {
return r.Handle("GET", pattern, wrap(handler))
}
// HEAD registers an http HEAD method receiver with the provided contextual Handler
func (r *Router) HEAD(pattern string, handler func(Context)) Route {
return r.Handle("HEAD", pattern, wrap(handler))
}
// POST registers an http POST method receiver with the provided contextual Handler
func (r *Router) POST(pattern string, handler func(Context)) Route {
return r.Handle("POST", pattern, wrap(handler))
}
// PUT registers an http PUT method receiver with the provided contextual Handler
func (r *Router) PUT(pattern string, handler func(Context)) Route {
return r.Handle("PUT", pattern, wrap(handler))
}
// DELETE registers an http DELETE method receiver with the provided contextual Handler
func (r *Router) DELETE(pattern string, handler func(Context)) Route {
return r.Handle("DELETE", pattern, wrap(handler))
}
// TRACE registers an http TRACE method receiver with the provided contextual Handler
func (r *Router) TRACE(pattern string, handler func(Context)) Route {
return r.Handle("TRACE", pattern, wrap(handler))
}
// OPTIONS registers an http OPTIONS method receiver with the provided contextual Handler
func (r *Router) OPTIONS(pattern string, handler func(Context)) Route {
return r.Handle("OPTIONS", pattern, wrap(handler))
}
// CONNECT registers an http CONNECT method receiver with the provided contextual Handler
func (r *Router) CONNECT(pattern string, handler func(Context)) Route {
return r.Handle("CONNECT", pattern, wrap(handler))
}
// PATCH registers an http PATCH method receiver with the provided contextual Handler
func (r *Router) PATCH(pattern string, handler func(Context)) Route {
return r.Handle("PATCH", pattern, wrap(handler))
}
// AnyFunc registers the provided HandlerFunc for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) AnyFunc(pattern string, handler http.HandlerFunc) Route {
return r.Any(pattern, http.HandlerFunc(handler))
}
// GetFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) GetFunc(pattern string, fn http.HandlerFunc) Route {
return r.Get(pattern, http.HandlerFunc(fn))
}
// HeadFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) HeadFunc(pattern string, fn http.HandlerFunc) Route {
return r.Head(pattern, http.HandlerFunc(fn))
}
// PostFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PostFunc(pattern string, fn http.HandlerFunc) Route {
return r.Post(pattern, http.HandlerFunc(fn))
}
// PutFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PutFunc(pattern string, fn http.HandlerFunc) Route {
return r.Put(pattern, http.HandlerFunc(fn))
}
// DeleteFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) DeleteFunc(pattern string, fn http.HandlerFunc) Route {
return r.Delete(pattern, http.HandlerFunc(fn))
}
// TraceFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) TraceFunc(pattern string, fn http.HandlerFunc) Route {
return r.Trace(pattern, http.HandlerFunc(fn))
}
// OptionsFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) OptionsFunc(pattern string, fn http.HandlerFunc) Route {
return r.Options(pattern, http.HandlerFunc(fn))
}
// ConnectFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) ConnectFunc(pattern string, fn http.HandlerFunc) Route {
return r.Connect(pattern, http.HandlerFunc(fn))
}
// PatchFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PatchFunc(pattern string, fn http.HandlerFunc) Route {
return r.Patch(pattern, http.HandlerFunc(fn))
}
// Use registers middlewares to be used
func (r *Router) Use(middlewares ...Middleware) {
r.middlewares = append(r.middlewares, middlewares...)
}
// UseFunc wraps a MiddlewareFunc as a Middleware and registers it middlewares to be used
func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {
for _, fn := range middlewareFuncs {
r.Use(MiddlewareFunc(fn))
}
}
// UseNext allows to use middlewares with the following form: func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)
// Previously named: UseNegroniFunc.
// This can be useful if you want to use negroni style middleware or a middleware already built by the community.
func (r *Router) UseNext(funcs ...func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {
for _, fn := range funcs {
r.Use(MiddlewareFunc(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fn(w, r, next.ServeHTTP)
})
}))
}
}
// USE allows you to use contextual middlewares.
// Example:
// router.USE(func (next func(Context)) func(Context) {
// return func(c Context) {
// if c.GetHeader("Authorization") == "" {
// c.Error(lion.ErrorUnauthorized)
// return
// }
// next(c)
// }
// })
// This will return an HTTP 401 Unauthorized response if the "Authorization" header is set.
// Otherwise, it will continue to next middleware.
func (r *Router) USE(middlewares ...func(func(Context)) func(Context)) {
for _, mw := range middlewares {
r.UseFunc(func(next http.Handler) http.Handler {
return wrap(mw(unwrap(next)))
})
}
}
func (r *Router) root() *Router {
if r.parent == nil {
return r
}
return r.parent.root()
}
func (r *Router) findRoute(rt *route) (*route, bool) {
for _, route := range r.routes {
if route == rt {
return route, true
}
}
return nil, false
}
func (r *Router) buildMiddlewares(handler http.Handler) http.Handler | {
handler = r.middlewares.BuildHandler(handler)
if !r.isRoot() {
handler = r.parent.buildMiddlewares(handler)
}
return handler
} | identifier_body |
|
router.go | r.Handle("POST", pattern, handler)
}
// Put registers an http PUT method receiver with the provided Handler
func (r *Router) Put(pattern string, handler http.Handler) Route {
return r.Handle("PUT", pattern, handler)
}
// Delete registers an http DELETE method receiver with the provided Handler
func (r *Router) Delete(pattern string, handler http.Handler) Route {
return r.Handle("DELETE", pattern, handler)
}
// Trace registers an http TRACE method receiver with the provided Handler
func (r *Router) Trace(pattern string, handler http.Handler) Route {
return r.Handle("TRACE", pattern, handler)
}
// Options registers an http OPTIONS method receiver with the provided Handler
func (r *Router) Options(pattern string, handler http.Handler) Route {
return r.Handle("OPTIONS", pattern, handler)
}
// Connect registers an http CONNECT method receiver with the provided Handler
func (r *Router) Connect(pattern string, handler http.Handler) Route {
return r.Handle("CONNECT", pattern, handler)
}
// Patch registers an http PATCH method receiver with the provided Handler
func (r *Router) Patch(pattern string, handler http.Handler) Route {
return r.Handle("PATCH", pattern, handler)
}
// ANY registers the provided contextual Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) ANY(pattern string, handler func(Context)) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, wrap(handler)).(*route)
rt.withMethods(r.middlewares.BuildHandler(wrap(handler)), allowedHTTPMethods[1:]...)
return rt
}
// GET registers an http GET method receiver with the provided contextual Handler
func (r *Router) GET(pattern string, handler func(Context)) Route {
return r.Handle("GET", pattern, wrap(handler))
}
// HEAD registers an http HEAD method receiver with the provided contextual Handler
func (r *Router) HEAD(pattern string, handler func(Context)) Route {
return r.Handle("HEAD", pattern, wrap(handler))
}
// POST registers an http POST method receiver with the provided contextual Handler
func (r *Router) POST(pattern string, handler func(Context)) Route {
return r.Handle("POST", pattern, wrap(handler))
}
// PUT registers an http PUT method receiver with the provided contextual Handler
func (r *Router) PUT(pattern string, handler func(Context)) Route {
return r.Handle("PUT", pattern, wrap(handler))
}
// DELETE registers an http DELETE method receiver with the provided contextual Handler
func (r *Router) DELETE(pattern string, handler func(Context)) Route {
return r.Handle("DELETE", pattern, wrap(handler))
}
// TRACE registers an http TRACE method receiver with the provided contextual Handler
func (r *Router) TRACE(pattern string, handler func(Context)) Route {
return r.Handle("TRACE", pattern, wrap(handler))
}
// OPTIONS registers an http OPTIONS method receiver with the provided contextual Handler
func (r *Router) OPTIONS(pattern string, handler func(Context)) Route {
return r.Handle("OPTIONS", pattern, wrap(handler))
}
// CONNECT registers an http CONNECT method receiver with the provided contextual Handler
func (r *Router) CONNECT(pattern string, handler func(Context)) Route {
return r.Handle("CONNECT", pattern, wrap(handler))
}
// PATCH registers an http PATCH method receiver with the provided contextual Handler
func (r *Router) PATCH(pattern string, handler func(Context)) Route {
return r.Handle("PATCH", pattern, wrap(handler))
}
// AnyFunc registers the provided HandlerFunc for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) AnyFunc(pattern string, handler http.HandlerFunc) Route {
return r.Any(pattern, http.HandlerFunc(handler))
}
// GetFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) GetFunc(pattern string, fn http.HandlerFunc) Route {
return r.Get(pattern, http.HandlerFunc(fn))
}
// HeadFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) HeadFunc(pattern string, fn http.HandlerFunc) Route {
return r.Head(pattern, http.HandlerFunc(fn))
}
// PostFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PostFunc(pattern string, fn http.HandlerFunc) Route {
return r.Post(pattern, http.HandlerFunc(fn))
}
// PutFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PutFunc(pattern string, fn http.HandlerFunc) Route {
return r.Put(pattern, http.HandlerFunc(fn))
}
// DeleteFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) DeleteFunc(pattern string, fn http.HandlerFunc) Route {
return r.Delete(pattern, http.HandlerFunc(fn))
}
// TraceFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) TraceFunc(pattern string, fn http.HandlerFunc) Route {
return r.Trace(pattern, http.HandlerFunc(fn))
}
// OptionsFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) OptionsFunc(pattern string, fn http.HandlerFunc) Route {
return r.Options(pattern, http.HandlerFunc(fn))
}
// ConnectFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) ConnectFunc(pattern string, fn http.HandlerFunc) Route {
return r.Connect(pattern, http.HandlerFunc(fn))
}
// PatchFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PatchFunc(pattern string, fn http.HandlerFunc) Route {
return r.Patch(pattern, http.HandlerFunc(fn))
}
// Use registers middlewares to be used
func (r *Router) Use(middlewares ...Middleware) {
r.middlewares = append(r.middlewares, middlewares...)
}
// UseFunc wraps a MiddlewareFunc as a Middleware and registers it middlewares to be used
func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {
for _, fn := range middlewareFuncs {
r.Use(MiddlewareFunc(fn))
}
}
// UseNext allows to use middlewares with the following form: func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)
// Previously named: UseNegroniFunc.
// This can be useful if you want to use negroni style middleware or a middleware already built by the community.
func (r *Router) UseNext(funcs ...func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {
for _, fn := range funcs {
r.Use(MiddlewareFunc(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fn(w, r, next.ServeHTTP)
})
}))
}
}
// USE allows you to use contextual middlewares.
// Example:
// router.USE(func (next func(Context)) func(Context) {
// return func(c Context) {
// if c.GetHeader("Authorization") == "" {
// c.Error(lion.ErrorUnauthorized)
// return
// }
// next(c)
// }
// })
// This will return an HTTP 401 Unauthorized response if the "Authorization" header is set.
// Otherwise, it will continue to next middleware.
func (r *Router) USE(middlewares ...func(func(Context)) func(Context)) {
for _, mw := range middlewares {
r.UseFunc(func(next http.Handler) http.Handler {
return wrap(mw(unwrap(next)))
})
}
}
func (r *Router) root() *Router {
if r.parent == nil {
return r
}
return r.parent.root()
}
func (r *Router) findRoute(rt *route) (*route, bool) {
for _, route := range r.routes {
if route == rt {
return route, true
}
}
return nil, false
}
func (r *Router) buildMiddlewares(handler http.Handler) http.Handler {
handler = r.middlewares.BuildHandler(handler)
if !r.isRoot() {
handler = r.parent.buildMiddlewares(handler)
}
return handler
}
func (r *Router) isRoot() bool {
return r.parent == nil
}
// HandleFunc wraps a HandlerFunc and pass it to Handle method
func (r *Router) HandleFunc(method, pattern string, fn http.HandlerFunc) Route {
return r.Handle(method, pattern, http.HandlerFunc(fn))
}
// NotFound calls NotFoundHandler() if it is set. Otherwise, it calls net/http.NotFound
func (r *Router) notFound(w http.ResponseWriter, req *http.Request) {
if r.root().notFoundHandler != nil {
r.root().notFoundHandler.ServeHTTP(w, req)
} else {
http.NotFound(w, req)
}
}
// ServeFiles serves files located in root http.FileSystem
//
// This can be used as shown below:
// r := New()
// r.ServeFiles("/static", http.Dir("static")) // This will serve files in the directory static with /static prefix
func (r *Router) ServeFiles(base string, root http.FileSystem) {
if strings.ContainsAny(base, ":*") {
panic("Lion: ServeFiles cannot have url parameters")
}
pattern := path.Join(base, "/*")
fileServer := http.StripPrefix(base, http.FileServer(root))
r.Get(pattern, fileServer)
r.Head(pattern, fileServer)
}
// ServeFile serve a specific file located at the passed path
//
// l := New()
// l.ServeFile("/robots.txt", "path/to/robots.txt")
func (r *Router) ServeFile(base, path string) {
if strings.ContainsAny(base, ":*") | {
panic("Lion: ServeFile cannot have url parameters")
} | conditional_block |
|
router.go | .host
rt.pathMatcher = rm
r.routes = append(r.routes, rt)
}
return rt
}
// ServeHTTP finds the handler associated with the request's path.
// If it is not found it calls the NotFound handler
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ctx := r.pool.Get().(*ctx)
ctx.Reset()
ctx.parent = req.Context()
ctx.ResponseWriter = w
ctx.req = req
if h := r.root().hostrm.Match(ctx, req); h != nil {
// We set the context only if there is a match
req = setParamContext(req, ctx)
h.ServeHTTP(w, req)
} else {
r.notFound(w, req) // r.middlewares.BuildHandler(HandlerFunc(r.NotFound)).ServeHTTPC
}
r.pool.Put(ctx)
}
// Mount mounts a subrouter at the provided pattern
func (r *Router) Mount(pattern string, router *Router, mws ...Middleware) {
router.parent = r
r.subrouters = append(r.subrouters, router)
var p string
if pattern == "/" {
p = r.pattern
} else {
p = r.pattern + pattern
}
router.pattern = p
host := r.host
for i, route := range router.routes {
router.Host(route.Host())
for _, method := range route.Methods() {
router.Handle(method, route.Pattern(), route.Handler(method))
}
router.routes = append(router.routes[:i], router.routes[i+1:]...)
}
// Restore previous host
r.host = host
}
func newCtxPool() sync.Pool {
return sync.Pool{
New: func() interface{} {
return newContext()
},
}
}
// Host sets the host for the current router instances.
// You can use patterns in the same way they are currently used for routes but in reverse order (params on the left)
// NOTE: You have to use the '$' character instead of ':' for matching host parameters.
// The following patterns works:
/*
admin.example.com will match admin.example.com
$username.blog.com will match messi.blog.com
will not match my.awesome.blog.com
*.example.com will match my.admin.example.com
The following patterns are not allowed:
mail.*
*
*/
func (r *Router) Host(hostpattern string) *Router {
r.host = hostpattern
r.hostrm.Register(hostpattern)
return r
}
// Any registers the provided Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) Any(pattern string, handler http.Handler) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, handler).(*route)
rt.withMethods(r.middlewares.BuildHandler(handler), allowedHTTPMethods[1:]...)
return rt
}
// Get registers an http GET method receiver with the provided Handler
func (r *Router) Get(pattern string, handler http.Handler) Route {
return r.Handle("GET", pattern, handler)
}
// Head registers an http HEAD method receiver with the provided Handler
func (r *Router) Head(pattern string, handler http.Handler) Route {
return r.Handle("HEAD", pattern, handler)
}
// Post registers an http POST method receiver with the provided Handler
func (r *Router) Post(pattern string, handler http.Handler) Route {
return r.Handle("POST", pattern, handler)
}
// Put registers an http PUT method receiver with the provided Handler
func (r *Router) Put(pattern string, handler http.Handler) Route {
return r.Handle("PUT", pattern, handler)
}
// Delete registers an http DELETE method receiver with the provided Handler
func (r *Router) Delete(pattern string, handler http.Handler) Route {
return r.Handle("DELETE", pattern, handler)
}
// Trace registers an http TRACE method receiver with the provided Handler
func (r *Router) Trace(pattern string, handler http.Handler) Route {
return r.Handle("TRACE", pattern, handler)
}
// Options registers an http OPTIONS method receiver with the provided Handler
func (r *Router) Options(pattern string, handler http.Handler) Route {
return r.Handle("OPTIONS", pattern, handler)
}
// Connect registers an http CONNECT method receiver with the provided Handler
func (r *Router) Connect(pattern string, handler http.Handler) Route {
return r.Handle("CONNECT", pattern, handler)
}
// Patch registers an http PATCH method receiver with the provided Handler
func (r *Router) Patch(pattern string, handler http.Handler) Route {
return r.Handle("PATCH", pattern, handler)
}
// ANY registers the provided contextual Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) ANY(pattern string, handler func(Context)) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, wrap(handler)).(*route)
rt.withMethods(r.middlewares.BuildHandler(wrap(handler)), allowedHTTPMethods[1:]...)
return rt
}
// GET registers an http GET method receiver with the provided contextual Handler
func (r *Router) GET(pattern string, handler func(Context)) Route {
return r.Handle("GET", pattern, wrap(handler))
}
// HEAD registers an http HEAD method receiver with the provided contextual Handler
func (r *Router) HEAD(pattern string, handler func(Context)) Route {
return r.Handle("HEAD", pattern, wrap(handler))
}
// POST registers an http POST method receiver with the provided contextual Handler
func (r *Router) | (pattern string, handler func(Context)) Route {
return r.Handle("POST", pattern, wrap(handler))
}
// PUT registers an http PUT method receiver with the provided contextual Handler
func (r *Router) PUT(pattern string, handler func(Context)) Route {
return r.Handle("PUT", pattern, wrap(handler))
}
// DELETE registers an http DELETE method receiver with the provided contextual Handler
func (r *Router) DELETE(pattern string, handler func(Context)) Route {
return r.Handle("DELETE", pattern, wrap(handler))
}
// TRACE registers an http TRACE method receiver with the provided contextual Handler
func (r *Router) TRACE(pattern string, handler func(Context)) Route {
return r.Handle("TRACE", pattern, wrap(handler))
}
// OPTIONS registers an http OPTIONS method receiver with the provided contextual Handler
func (r *Router) OPTIONS(pattern string, handler func(Context)) Route {
return r.Handle("OPTIONS", pattern, wrap(handler))
}
// CONNECT registers an http CONNECT method receiver with the provided contextual Handler
func (r *Router) CONNECT(pattern string, handler func(Context)) Route {
return r.Handle("CONNECT", pattern, wrap(handler))
}
// PATCH registers an http PATCH method receiver with the provided contextual Handler
func (r *Router) PATCH(pattern string, handler func(Context)) Route {
return r.Handle("PATCH", pattern, wrap(handler))
}
// AnyFunc registers the provided HandlerFunc for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) AnyFunc(pattern string, handler http.HandlerFunc) Route {
return r.Any(pattern, http.HandlerFunc(handler))
}
// GetFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) GetFunc(pattern string, fn http.HandlerFunc) Route {
return r.Get(pattern, http.HandlerFunc(fn))
}
// HeadFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) HeadFunc(pattern string, fn http.HandlerFunc) Route {
return r.Head(pattern, http.HandlerFunc(fn))
}
// PostFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PostFunc(pattern string, fn http.HandlerFunc) Route {
return r.Post(pattern, http.HandlerFunc(fn))
}
// PutFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PutFunc(pattern string, fn http.HandlerFunc) Route {
return r.Put(pattern, http.HandlerFunc(fn))
}
// DeleteFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) DeleteFunc(pattern string, fn http.HandlerFunc) Route {
return r.Delete(pattern, http.HandlerFunc(fn))
}
// TraceFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) TraceFunc(pattern string, fn http.HandlerFunc) Route {
return r.Trace(pattern, http.HandlerFunc(fn))
}
// OptionsFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) OptionsFunc(pattern string, fn http.HandlerFunc) Route {
return r.Options(pattern, http.HandlerFunc(fn))
}
// ConnectFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) ConnectFunc(pattern string, fn http.HandlerFunc) Route {
return r.Connect(pattern, http.HandlerFunc(fn))
}
// PatchFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PatchFunc(pattern string, fn http.HandlerFunc) Route {
return r.Patch(pattern, http.HandlerFunc(fn))
}
// Use registers middlewares to be used
func (r *Router) Use(middlewares ...Middleware) {
r.middlewares = append(r.middlewares, middlewares...)
}
// UseFunc wraps a MiddlewareFunc as a Middleware and registers it middlewares to be used
func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {
for _, fn := range middlewareFuncs {
r.Use(MiddlewareFunc(fn))
}
}
// UseNext allows to use middlewares with the following form: func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)
// Previously named: UseNegroniFunc.
// This can be useful if you | POST | identifier_name |
router.go | .host
rt.pathMatcher = rm
r.routes = append(r.routes, rt)
}
return rt
}
// ServeHTTP finds the handler associated with the request's path.
// If it is not found it calls the NotFound handler
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ctx := r.pool.Get().(*ctx)
ctx.Reset()
ctx.parent = req.Context()
ctx.ResponseWriter = w
ctx.req = req
if h := r.root().hostrm.Match(ctx, req); h != nil {
// We set the context only if there is a match
req = setParamContext(req, ctx)
h.ServeHTTP(w, req)
} else {
r.notFound(w, req) // r.middlewares.BuildHandler(HandlerFunc(r.NotFound)).ServeHTTPC
}
r.pool.Put(ctx)
}
// Mount mounts a subrouter at the provided pattern
func (r *Router) Mount(pattern string, router *Router, mws ...Middleware) {
router.parent = r
r.subrouters = append(r.subrouters, router)
var p string
if pattern == "/" {
p = r.pattern
} else {
p = r.pattern + pattern
}
router.pattern = p
host := r.host
for i, route := range router.routes {
router.Host(route.Host())
for _, method := range route.Methods() {
router.Handle(method, route.Pattern(), route.Handler(method))
}
router.routes = append(router.routes[:i], router.routes[i+1:]...)
}
// Restore previous host
r.host = host
}
func newCtxPool() sync.Pool {
return sync.Pool{
New: func() interface{} {
return newContext()
},
}
}
// Host sets the host for the current router instances.
// You can use patterns in the same way they are currently used for routes but in reverse order (params on the left)
// NOTE: You have to use the '$' character instead of ':' for matching host parameters.
// The following patterns works:
/*
admin.example.com will match admin.example.com
$username.blog.com will match messi.blog.com
will not match my.awesome.blog.com
*.example.com will match my.admin.example.com
The following patterns are not allowed:
mail.*
*
*/
func (r *Router) Host(hostpattern string) *Router {
r.host = hostpattern
r.hostrm.Register(hostpattern)
return r
}
// Any registers the provided Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) Any(pattern string, handler http.Handler) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, handler).(*route)
rt.withMethods(r.middlewares.BuildHandler(handler), allowedHTTPMethods[1:]...)
return rt
}
// Get registers an http GET method receiver with the provided Handler
func (r *Router) Get(pattern string, handler http.Handler) Route {
return r.Handle("GET", pattern, handler)
}
// Head registers an http HEAD method receiver with the provided Handler
func (r *Router) Head(pattern string, handler http.Handler) Route {
return r.Handle("HEAD", pattern, handler) | }
// Post registers an http POST method receiver with the provided Handler
func (r *Router) Post(pattern string, handler http.Handler) Route {
return r.Handle("POST", pattern, handler)
}
// Put registers an http PUT method receiver with the provided Handler
func (r *Router) Put(pattern string, handler http.Handler) Route {
return r.Handle("PUT", pattern, handler)
}
// Delete registers an http DELETE method receiver with the provided Handler
func (r *Router) Delete(pattern string, handler http.Handler) Route {
return r.Handle("DELETE", pattern, handler)
}
// Trace registers an http TRACE method receiver with the provided Handler
func (r *Router) Trace(pattern string, handler http.Handler) Route {
return r.Handle("TRACE", pattern, handler)
}
// Options registers an http OPTIONS method receiver with the provided Handler
func (r *Router) Options(pattern string, handler http.Handler) Route {
return r.Handle("OPTIONS", pattern, handler)
}
// Connect registers an http CONNECT method receiver with the provided Handler
func (r *Router) Connect(pattern string, handler http.Handler) Route {
return r.Handle("CONNECT", pattern, handler)
}
// Patch registers an http PATCH method receiver with the provided Handler
func (r *Router) Patch(pattern string, handler http.Handler) Route {
return r.Handle("PATCH", pattern, handler)
}
// ANY registers the provided contextual Handler for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) ANY(pattern string, handler func(Context)) Route {
rt := r.Handle(allowedHTTPMethods[0], pattern, wrap(handler)).(*route)
rt.withMethods(r.middlewares.BuildHandler(wrap(handler)), allowedHTTPMethods[1:]...)
return rt
}
// GET registers an http GET method receiver with the provided contextual Handler
func (r *Router) GET(pattern string, handler func(Context)) Route {
return r.Handle("GET", pattern, wrap(handler))
}
// HEAD registers an http HEAD method receiver with the provided contextual Handler
func (r *Router) HEAD(pattern string, handler func(Context)) Route {
return r.Handle("HEAD", pattern, wrap(handler))
}
// POST registers an http POST method receiver with the provided contextual Handler
func (r *Router) POST(pattern string, handler func(Context)) Route {
return r.Handle("POST", pattern, wrap(handler))
}
// PUT registers an http PUT method receiver with the provided contextual Handler
func (r *Router) PUT(pattern string, handler func(Context)) Route {
return r.Handle("PUT", pattern, wrap(handler))
}
// DELETE registers an http DELETE method receiver with the provided contextual Handler
func (r *Router) DELETE(pattern string, handler func(Context)) Route {
return r.Handle("DELETE", pattern, wrap(handler))
}
// TRACE registers an http TRACE method receiver with the provided contextual Handler
func (r *Router) TRACE(pattern string, handler func(Context)) Route {
return r.Handle("TRACE", pattern, wrap(handler))
}
// OPTIONS registers an http OPTIONS method receiver with the provided contextual Handler
func (r *Router) OPTIONS(pattern string, handler func(Context)) Route {
return r.Handle("OPTIONS", pattern, wrap(handler))
}
// CONNECT registers an http CONNECT method receiver with the provided contextual Handler
func (r *Router) CONNECT(pattern string, handler func(Context)) Route {
return r.Handle("CONNECT", pattern, wrap(handler))
}
// PATCH registers an http PATCH method receiver with the provided contextual Handler
func (r *Router) PATCH(pattern string, handler func(Context)) Route {
return r.Handle("PATCH", pattern, wrap(handler))
}
// AnyFunc registers the provided HandlerFunc for all of the allowed http methods: GET, HEAD, POST, PUT, DELETE, TRACE, OPTIONS, CONNECT, PATCH
func (r *Router) AnyFunc(pattern string, handler http.HandlerFunc) Route {
return r.Any(pattern, http.HandlerFunc(handler))
}
// GetFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) GetFunc(pattern string, fn http.HandlerFunc) Route {
return r.Get(pattern, http.HandlerFunc(fn))
}
// HeadFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) HeadFunc(pattern string, fn http.HandlerFunc) Route {
return r.Head(pattern, http.HandlerFunc(fn))
}
// PostFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PostFunc(pattern string, fn http.HandlerFunc) Route {
return r.Post(pattern, http.HandlerFunc(fn))
}
// PutFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PutFunc(pattern string, fn http.HandlerFunc) Route {
return r.Put(pattern, http.HandlerFunc(fn))
}
// DeleteFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) DeleteFunc(pattern string, fn http.HandlerFunc) Route {
return r.Delete(pattern, http.HandlerFunc(fn))
}
// TraceFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) TraceFunc(pattern string, fn http.HandlerFunc) Route {
return r.Trace(pattern, http.HandlerFunc(fn))
}
// OptionsFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) OptionsFunc(pattern string, fn http.HandlerFunc) Route {
return r.Options(pattern, http.HandlerFunc(fn))
}
// ConnectFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) ConnectFunc(pattern string, fn http.HandlerFunc) Route {
return r.Connect(pattern, http.HandlerFunc(fn))
}
// PatchFunc wraps a HandlerFunc as a Handler and registers it to the router
func (r *Router) PatchFunc(pattern string, fn http.HandlerFunc) Route {
return r.Patch(pattern, http.HandlerFunc(fn))
}
// Use registers middlewares to be used
func (r *Router) Use(middlewares ...Middleware) {
r.middlewares = append(r.middlewares, middlewares...)
}
// UseFunc wraps a MiddlewareFunc as a Middleware and registers it middlewares to be used
func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {
for _, fn := range middlewareFuncs {
r.Use(MiddlewareFunc(fn))
}
}
// UseNext allows to use middlewares with the following form: func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)
// Previously named: UseNegroniFunc.
// This can be useful if you want | random_line_split |
|
viewsets.py | Optional
import toml
from asciinema import asciicast
from asciinema.commands.cat import CatCommand
from django.conf import settings
from django.db.models import F, Q
from django.http import StreamingHttpResponse, Http404
from django.utils.functional import cached_property
from django.views.static import serve
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters, mixins, renderers, serializers
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework import parsers
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from pentest_project.exceptions import ItemDoesNotExist
from pentest_project.plugins import PluginCollections, Plugins, PluginShortcuts, Plugin
from pentest_studio.api import BaseViewSetMixIn, StandardResultsSetPagination
from pentest_worker.models import Worker
from pentest_project.api.filters import ActionFilter, FileTomlSearchFilter
from pentest_project.api.serializers import ProjectSerializer, ActionSerializer, DetailProjectSerializer, \
DetailActionSerializer, PluginSetupSerializer, PluginSerializer, PluginShortcutArgSerializer, \
PluginShortcutEnvSerializer, PluginShortcutSerializer, RenderShortcutSerializer, PluginCollectionSerializer
from pentest_project.models import Project, Action
def serve_file(request, filepath):
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
def asciinema_cat(file):
with asciicast.open_from_url(file) as a:
for t, _type, text in a.stdout_events():
yield text
def filter_toml_file(file_id):
def filter_file(file):
return re.match('{}\.plugin\.to?ml$'.format(re.escape(file_id)), file, re.IGNORECASE)
return filter_file
class FilesQuerySet:
def __init__(self, directory: str, read_file: Callable, filter_callable: Optional[Callable] = None):
self.directory = directory
self.read_file = read_file
self.filter_callable = filter_callable
@cached_property
def file_names(self):
file_names = os.listdir(self.directory)
if self.filter_callable is not None:
file_names = list(filter(self.filter_callable, file_names))
return file_names
def __iter__(self):
return map(self.read_file, self.file_names)
def __len__(self):
return len(self.file_names)
def __getitem__(self, item):
if not isinstance(item, slice):
raise ValueError(f'Unsupported slice type: {item}')
return map(self.read_file, self.file_names[item])
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'text'
def render(self, data, media_type=None, renderer_context=None):
return data
class PlainTextParser(parsers.FileUploadParser):
media_type = 'text/plain'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
return decoded_stream
class TomlParser(parsers.FileUploadParser):
media_type = 'application/toml'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
raw_body = decoded_stream.read()
request = parser_context.get('request')
setattr(request, 'raw_body', raw_body)
filename = self.get_filename(stream, media_type, parser_context)
if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):
filename = f'{filename}.toml'
setattr(request, 'filename', filename)
return toml.loads(raw_body)
class ProjectViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Project.objects.all().order_by('-pk')
serializer_class = ProjectSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filter_fields = ('parent', 'created_at', 'updated_at')
ordering_fields = filter_fields
detail_serializer_class = DetailProjectSerializer
class ActionViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Action.objects.order_by('-pk')
serializer_class = ActionSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filterset_class = ActionFilter
ordering_fields = ('parent', 'parent', 'created_at', 'updated_at')
detail_serializer_class = DetailActionSerializer
pagination_class = StandardResultsSetPagination
def get_queryset(self):
queryset = super(ActionViewSet, self).get_queryset()
if self.action == 'grouped':
# Filtrar por (plugin + is_last) or not plugin
queryset = queryset.filter(Q(plugin='') | Q(is_last=True))
return queryset
@action(methods=['put'], detail=True)
def worker_upload(self, request, pk):
file_obj = request.data['file']
instance: Action = self.get_object()
directory = os.path.dirname(instance.get_data_directory())
os.makedirs(directory, exist_ok=True)
t = tarfile.open(fileobj=file_obj.file)
t.extractall(directory)
return_code = instance.get_return_code()
if return_code is None:
instance.status = 'FINISHED'
else:
instance.status = 'SUCCESS' if return_code == 0 else 'ERROR'
instance.save()
return Response(status=204)
@action(methods=['get'], detail=True, url_path='asciinema.cast')
def download_cast(self, request, pk):
instance: Action = self.get_object()
return serve_file(request, instance.get_terminal_path())
@action(methods=['get'], detail=True)
def terminal_output(self, request, pk):
instance: Action = self.get_object()
file: str = instance.get_terminal_path()
if not os.path.lexists(file) or not os.path.getsize(file):
raise Http404
return StreamingHttpResponse(asciinema_cat(file))
@action(methods=['post'], detail=True)
def bl | elf, request, pk):
instance: Action = self.get_object()
try:
worker = Worker.objects.get(user=request.user)
except Worker.DoesNotExist:
raise ValidationError('User {} is not a worker'.format(request.user))
instance.block_task(worker)
instance.save()
return self.retrieve(request, pk)
@action(methods=['get'], detail=False)
def grouped(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FileTomlViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, viewsets.ViewSet):
pagination_class = StandardResultsSetPagination
serializer_class: Type[Serializer] = None
filter_backends = (FileTomlSearchFilter,)
parser_classes = (TomlParser,)
def get_renderers(self):
if self.action == 'text':
return [PlainTextRenderer()]
else:
return super(FileTomlViewSet, self).get_renderers()
def get_queryset(self):
raise NotImplementedError
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
return self.get_queryset().from_name(self.kwargs[lookup_url_kwarg])
except ItemDoesNotExist:
raise Http404
@action(detail=True, methods=['get'])
def text(self, request, *args, **kwargs):
obj = self.get_object()
return Response(obj.text)
class PluginViewSet(mixins.UpdateModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin, FileTomlViewSet):
serializer_class = PluginSerializer
parser_classes = (TomlParser, JSONParser)
def get_serializer_class(self):
if self.action == 'install':
return serializers.Serializer
else:
return super(PluginViewSet, self).get_serializer_class()
def get_queryset(self):
return Plugins()
@action(detail=True, methods=['post'])
def install(self, request, *args, **kwargs):
obj: Plugin = self.get_object()
actions = Worker.objects.active().run_command(f'Installing the {obj.name} plugin', obj.setup['install'])
serializer = ActionSerializer(many=True, instance=actions, context=self.get_serializer_context())
return Response(serializer.data)
class PluginShortcutViewSet(FileTomlViewSet):
serializer_class = PluginShortcutSerializer
lookup_value_regex = '[a-zA-Z0-9.\-]+'
parser_classes = (JSONParser,)
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, PlainTextRenderer)
def get_queryset(self):
return PluginShortcuts()
@action(methods=['post'], detail=True)
def render_command(self, request, pk=None):
obj = self.get_object()
serializer = RenderShortcut | ock_task(s | identifier_name |
viewsets.py | Optional
import toml
from asciinema import asciicast
from asciinema.commands.cat import CatCommand
from django.conf import settings
from django.db.models import F, Q
from django.http import StreamingHttpResponse, Http404
from django.utils.functional import cached_property
from django.views.static import serve
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters, mixins, renderers, serializers
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework import parsers
from rest_framework.response import Response
from rest_framework.serializers import Serializer
from pentest_project.exceptions import ItemDoesNotExist
from pentest_project.plugins import PluginCollections, Plugins, PluginShortcuts, Plugin
from pentest_studio.api import BaseViewSetMixIn, StandardResultsSetPagination
from pentest_worker.models import Worker
from pentest_project.api.filters import ActionFilter, FileTomlSearchFilter
from pentest_project.api.serializers import ProjectSerializer, ActionSerializer, DetailProjectSerializer, \
DetailActionSerializer, PluginSetupSerializer, PluginSerializer, PluginShortcutArgSerializer, \
PluginShortcutEnvSerializer, PluginShortcutSerializer, RenderShortcutSerializer, PluginCollectionSerializer
from pentest_project.models import Project, Action
def serve_file(request, filepath):
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
def asciinema_cat(file):
with asciicast.open_from_url(file) as a:
for t, _type, text in a.stdout_events():
yield text
def filter_toml_file(file_id):
def filter_file(file):
return re.match('{}\.plugin\.to?ml$'.format(re.escape(file_id)), file, re.IGNORECASE)
return filter_file
class FilesQuerySet:
def __init__(self, directory: str, read_file: Callable, filter_callable: Optional[Callable] = None):
self.directory = directory
self.read_file = read_file
self.filter_callable = filter_callable
@cached_property
def file_names(self):
file_names = os.listdir(self.directory)
if self.filter_callable is not None:
file_names = list(filter(self.filter_callable, file_names))
return file_names
def __iter__(self):
return map(self.read_file, self.file_names)
def __len__(self):
return len(self.file_names)
def __getitem__(self, item):
if not isinstance(item, slice):
raise ValueError(f'Unsupported slice type: {item}')
return map(self.read_file, self.file_names[item])
class PlainTextRenderer(renderers.BaseRenderer):
media_type = 'text/plain'
format = 'text'
def render(self, data, media_type=None, renderer_context=None):
return data
class PlainTextParser(parsers.FileUploadParser):
media_type = 'text/plain'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
return decoded_stream
class TomlParser(parsers.FileUploadParser):
media_type = 'application/toml'
# TODO: tomar el nombre del fichero si es la creación del mismo
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
decoded_stream = codecs.getreader(encoding)(stream)
raw_body = decoded_stream.read()
request = parser_context.get('request')
setattr(request, 'raw_body', raw_body)
filename = self.get_filename(stream, media_type, parser_context)
if filename and (not filename.endswith('.toml') and not filename.endswith('.tml')):
filename = f'{filename}.toml'
setattr(request, 'filename', filename)
return toml.loads(raw_body)
class ProjectViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Project.objects.all().order_by('-pk')
serializer_class = ProjectSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filter_fields = ('parent', 'created_at', 'updated_at')
ordering_fields = filter_fields
detail_serializer_class = DetailProjectSerializer
class ActionViewSet(BaseViewSetMixIn, viewsets.ModelViewSet):
"""
"""
queryset = Action.objects.order_by('-pk')
serializer_class = ActionSerializer
filter_backends = (filters.OrderingFilter, filters.SearchFilter, DjangoFilterBackend)
search_fields = ('name',)
filterset_class = ActionFilter
ordering_fields = ('parent', 'parent', 'created_at', 'updated_at')
detail_serializer_class = DetailActionSerializer
pagination_class = StandardResultsSetPagination
def get_queryset(self):
queryset = super(ActionViewSet, self).get_queryset()
if self.action == 'grouped':
# Filtrar por (plugin + is_last) or not plugin
qu | return queryset
@action(methods=['put'], detail=True)
def worker_upload(self, request, pk):
file_obj = request.data['file']
instance: Action = self.get_object()
directory = os.path.dirname(instance.get_data_directory())
os.makedirs(directory, exist_ok=True)
t = tarfile.open(fileobj=file_obj.file)
t.extractall(directory)
return_code = instance.get_return_code()
if return_code is None:
instance.status = 'FINISHED'
else:
instance.status = 'SUCCESS' if return_code == 0 else 'ERROR'
instance.save()
return Response(status=204)
@action(methods=['get'], detail=True, url_path='asciinema.cast')
def download_cast(self, request, pk):
instance: Action = self.get_object()
return serve_file(request, instance.get_terminal_path())
@action(methods=['get'], detail=True)
def terminal_output(self, request, pk):
instance: Action = self.get_object()
file: str = instance.get_terminal_path()
if not os.path.lexists(file) or not os.path.getsize(file):
raise Http404
return StreamingHttpResponse(asciinema_cat(file))
@action(methods=['post'], detail=True)
def block_task(self, request, pk):
instance: Action = self.get_object()
try:
worker = Worker.objects.get(user=request.user)
except Worker.DoesNotExist:
raise ValidationError('User {} is not a worker'.format(request.user))
instance.block_task(worker)
instance.save()
return self.retrieve(request, pk)
@action(methods=['get'], detail=False)
def grouped(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FileTomlViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, viewsets.ViewSet):
pagination_class = StandardResultsSetPagination
serializer_class: Type[Serializer] = None
filter_backends = (FileTomlSearchFilter,)
parser_classes = (TomlParser,)
def get_renderers(self):
if self.action == 'text':
return [PlainTextRenderer()]
else:
return super(FileTomlViewSet, self).get_renderers()
def get_queryset(self):
raise NotImplementedError
def get_object(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
return self.get_queryset().from_name(self.kwargs[lookup_url_kwarg])
except ItemDoesNotExist:
raise Http404
@action(detail=True, methods=['get'])
def text(self, request, *args, **kwargs):
obj = self.get_object()
return Response(obj.text)
class PluginViewSet(mixins.UpdateModelMixin, mixins.CreateModelMixin, mixins.DestroyModelMixin, FileTomlViewSet):
serializer_class = PluginSerializer
parser_classes = (TomlParser, JSONParser)
def get_serializer_class(self):
if self.action == 'install':
return serializers.Serializer
else:
return super(PluginViewSet, self).get_serializer_class()
def get_queryset(self):
return Plugins()
@action(detail=True, methods=['post'])
def install(self, request, *args, **kwargs):
obj: Plugin = self.get_object()
actions = Worker.objects.active().run_command(f'Installing the {obj.name} plugin', obj.setup['install'])
serializer = ActionSerializer(many=True, instance=actions, context=self.get_serializer_context())
return Response(serializer.data)
class PluginShortcutViewSet(FileTomlViewSet):
serializer_class = PluginShortcutSerializer
lookup_value_regex = '[a-zA-Z0-9.\-]+'
parser_classes = (JSONParser,)
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, PlainTextRenderer)
def get_queryset(self):
return PluginShortcuts()
@action(methods=['post'], detail=True)
def render_command(self, request, pk=None):
obj = self.get_object()
serializer = Render | eryset = queryset.filter(Q(plugin='') | Q(is_last=True))
| conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.