file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
test/DepthwiseConv2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("DepthwiseConv2D layer test", function () {
this.timeout(100000000);
it("(7,7,3) -> (5,5,3)", async () => {
const INPUT = require("../models/depthwiseConv2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "DepthwiseConv2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
});
| https://github.com/socathie/circomlib-ml |
test/Flatten2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("Flatten2D layer test", function () {
this.timeout(100000000);
it("(5,5,3) -> 75", async () => {
const INPUT = require("../models/flatten2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "Flatten2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
});
| https://github.com/socathie/circomlib-ml |
test/GlobalAveragePooling2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("GlobalAveragePooling2D layer test", function () {
this.timeout(100000000);
// GlobalAveragePooling with strides==poolSize
it("(5,5,3) -> (3,)", async () => {
const INPUT = require("../models/globalAveragePooling2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "GlobalAveragePooling2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/GlobalMaxPooling2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("GlobalMaxPooling2D layer test", function () {
this.timeout(100000000);
// GlobalMaxPooling with strides==poolSize
it("(5,5,3) -> (3,)", async () => {
const INPUT = require("../models/globalMaxPooling2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "GlobalMaxPooling2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/IsNegative.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("IsNegative test", function () {
this.timeout(100000000);
it("Negative -> 1", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "IsNegative_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 516);
//assert.equal(circuit.constraints.length, 516);
const INPUT = {
"in": Fr.e(-1)
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(1)));
});
it("Positive -> 0", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "IsNegative_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 516);
//assert.equal(circuit.constraints.length, 516);
const INPUT = {
"in": "1"
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(0)));
});
}); | https://github.com/socathie/circomlib-ml |
test/IsPositive.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("IsPositive test", function () {
this.timeout(100000000);
it("Positive -> 1", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "IsPositive_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 516);
//assert.equal(circuit.constraints.length, 516);
const INPUT = {
"in": "1"
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(1)));
});
it("Negative -> 0", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "IsPositive_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 516);
//assert.equal(circuit.constraints.length, 516);
const INPUT = {
"in": Fr.e(-1)
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(0)));
});
}); | https://github.com/socathie/circomlib-ml |
test/LeakyReLU.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("LeakyReLU layer test", function () {
this.timeout(100000000);
it("3 nodes", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "LeakyReLU_test.circom"));
const INPUT = {
"in": [Fr.e(-11),"0","3"],
"out": [Fr.e(-4),"0","3"],
"remainder": ["7","0","0"]
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/Max.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("Max test", function () {
this.timeout(100000000);
it("Maximum of 4 numbers", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "Max_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 516);
//assert.equal(circuit.constraints.length, 516);
const INPUT = {
"in": ["1","4","2","3"]
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(4)));
});
}); | https://github.com/socathie/circomlib-ml |
test/MaxPooling2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("MaxPooling2D layer test", function () {
this.timeout(100000000);
// MaxPooling with strides==poolSize
it("(5,5,3) -> (2,2,3)", async () => {
const INPUT = require("../models/maxPooling2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "MaxPooling2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
// MaxPooling with strides!=poolSize
it("(10,10,3) -> (3,3,3)", async () => {
const INPUT = require("../models/maxPooling2D_stride_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "MaxPooling2D_stride_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/MaxPooling2Dsame.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("MaxPooling2Dsame layer test", function () {
this.timeout(100000000);
// MaxPooling with strides==poolSize
it("(5,5,3) -> (3,3,3)", async () => {
const INPUT = require("../models/maxPooling2Dsame_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "MaxPooling2Dsame_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
// MaxPooling with strides!=poolSize
it("(10,10,3) -> (4,4,3)", async () => {
const INPUT = require("../models/maxPooling2Dsame_stride_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "MaxPooling2Dsame_stride_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/PointwiseConv2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("PointwiseConv2D layer test", function () {
this.timeout(100000000);
it("(7,7,3) -> (5,5,3)", async () => {
const INPUT = require("../models/pointwiseConv2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "PointwiseConv2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
});
| https://github.com/socathie/circomlib-ml |
test/ReLU.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("ReLU layer test", function () {
this.timeout(100000000);
it("3 nodes", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "ReLU_test.circom"));
const INPUT = {
"in": [Fr.e(-3),"0","3"],
"out": ["0","0","3"]
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/Reshape2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("Reshape2D layer test", function () {
this.timeout(100000000);
it("75 -> (5,5,3)", async () => {
const INPUT = require("../models/reshape2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "Reshape2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
});
| https://github.com/socathie/circomlib-ml |
test/SeparableConv2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("SeparableConv2D layer test", function () {
this.timeout(100000000);
it("(7,7,3) -> (5,5,3)", async () => {
const INPUT = require("../models/separableConv2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "SeparableConv2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
});
| https://github.com/socathie/circomlib-ml |
test/SumPooling2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("SumPooling2D layer test", function () {
this.timeout(100000000);
// SumPooling with strides==poolSize
it("(5,5,3) -> (2,2,3)", async () => {
const json = require("../models/sumPooling2D_input.json");
const OUTPUT = require("../models/sumPooling2D_output.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "SumPooling2D_test.circom"));
//await circuit.loadConstraints();
//assert.equal(circuit.nVars, 76);
//assert.equal(circuit.constraints.length, 0);
const INPUT = {
"in": json.in
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
let ape = 0;
for (var i=0; i<OUTPUT.out.length; i++) {
// console.log("actual", OUTPUT.out[i], "predicted", Fr.toString(witness[i+1]));
ape += Math.abs((OUTPUT.out[i]-parseInt(Fr.toString(witness[i+1])))/OUTPUT.out[i]);
}
const mape = ape/OUTPUT.out.length;
console.log("mean absolute % error", mape);
assert(mape < 0.01);
});
// SumPooling with strides!=poolSize
it("(10,10,3) -> (3,3,3)", async () => {
const json = require("../models/sumPooling2D_stride_input.json");
const OUTPUT = require("../models/sumPooling2D_stride_output.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "SumPooling2D_stride_test.circom"));
const INPUT = {
"in": json.in
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
let ape = 0;
for (var i=0; i<OUTPUT.out.length; i++) {
// console.log("actual", OUTPUT.out[i], "predicted", Fr.toString(witness[i+1]));
ape += Math.abs((OUTPUT.out[i]-parseInt(Fr.toString(witness[i+1])))/OUTPUT.out[i]);
}
const mape = ape/OUTPUT.out.length;
console.log("mean absolute % error", mape);
assert(mape < 0.01);
});
}); | https://github.com/socathie/circomlib-ml |
test/UpSampling2D.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
describe("UpSampling2D layer test", function () {
this.timeout(100000000);
// UpSampling with strides==poolSize
it("(1,2,3) -> (2,4,3)", async () => {
const INPUT = require("../models/upSampling2D_input.json");
const circuit = await wasm_tester(path.join(__dirname, "circuits", "UpSampling2D_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
});
}); | https://github.com/socathie/circomlib-ml |
test/circuits/AveragePooling2D_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/AveragePooling2D.circom";
// poolSize!=strides
component main = AveragePooling2D(10, 10, 3, 3, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/AveragePooling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/AveragePooling2D.circom";
// poolSize=strides - default Keras settings
component main = AveragePooling2D(5, 5, 3, 2, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/BatchNormalization_test.circom | pragma circom 2.0.0;
include "../../circuits/BatchNormalization2D.circom";
component main = BatchNormalization2D(5,5,3,10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Conv1D_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv1D.circom";
component main = Conv1D(20, 3, 2, 4, 3, 10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Conv2D_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2D.circom";
component main = Conv2D(10, 10, 3, 2, 4, 3, 10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Conv2D_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2D.circom";
component main = Conv2D(5, 5, 3, 2, 3, 1, 10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Conv2Dsame_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2Dsame.circom";
component main = Conv2Dsame(10, 10, 3, 2, 4, 3, 10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Conv2Dsame_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2Dsame.circom";
component main = Conv2Dsame(5, 5, 3, 2, 3, 1, 10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/Dense_test.circom | pragma circom 2.0.0;
include "../../circuits/Dense.circom";
component main = Dense(20,10,10**36); | https://github.com/socathie/circomlib-ml |
test/circuits/DepthwiseConv2D_test.circom | pragma circom 2.0.0;
include "../../circuits/DepthwiseConv2D.circom";
component main = DepthwiseConv2D(7, 7, 3, 3, 3, 1, 10**15);
| https://github.com/socathie/circomlib-ml |
test/circuits/Flatten2D_test.circom | pragma circom 2.0.0;
include "../../circuits/Flatten2D.circom";
component main = Flatten2D(5, 5, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/GlobalAveragePooling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/GlobalAveragePooling2D.circom";
component main = GlobalAveragePooling2D(5, 5, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/GlobalMaxPooling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/GlobalMaxPooling2D.circom";
component main = GlobalMaxPooling2D(5, 5, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/IsNegative_test.circom | pragma circom 2.0.0;
include "../../circuits/util.circom";
component main = IsNegative(); | https://github.com/socathie/circomlib-ml |
test/circuits/IsPositive_test.circom | pragma circom 2.0.0;
include "../../circuits/util.circom";
component main = IsPositive(); | https://github.com/socathie/circomlib-ml |
test/circuits/LeakyReLU_test.circom | pragma circom 2.0.0;
include "../../circuits/LeakyReLU.circom";
template leaky_relu_test() {
signal input in[3];
signal input out[3];
signal input remainder[3];
component leaky_relu[3];
for (var i=0; i<3; i++) {
leaky_relu[i] = LeakyReLU(3);
leaky_relu[i].in <== in[i];
leaky_relu[i].out <== out[i];
leaky_relu[i].remainder <== remainder[i];
}
}
component main = leaky_relu_test(); | https://github.com/socathie/circomlib-ml |
test/circuits/MaxPooling2D_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/MaxPooling2D.circom";
component main = MaxPooling2D(10, 10, 3, 2, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/MaxPooling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/MaxPooling2D.circom";
// poolSize=strides - default Keras settings
component main = MaxPooling2D(5, 5, 3, 2, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/MaxPooling2Dsame_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/MaxPooling2Dsame.circom";
component main = MaxPooling2Dsame(10, 10, 3, 2, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/MaxPooling2Dsame_test.circom | pragma circom 2.0.0;
include "../../circuits/MaxPooling2Dsame.circom";
// poolSize=strides - default Keras settings
component main = MaxPooling2Dsame(5, 5, 3, 2, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/Max_test.circom | pragma circom 2.0.0;
include "../../circuits/util.circom";
component main = Max(4); | https://github.com/socathie/circomlib-ml |
test/circuits/PointwiseConv2D_test.circom | pragma circom 2.0.0;
include "../../circuits/PointwiseConv2D.circom";
component main = PointwiseConv2D(5, 5, 3, 6, 10**15);
| https://github.com/socathie/circomlib-ml |
test/circuits/ReLU_test.circom | pragma circom 2.0.0;
include "../../circuits/ReLU.circom";
template relu_test() {
signal input in[3];
signal input out[3];
component relu[3];
for (var i=0; i<3; i++) {
relu[i] = ReLU();
relu[i].in <== in[i];
relu[i].out <== out[i];
}
}
component main = relu_test(); | https://github.com/socathie/circomlib-ml |
test/circuits/Reshape2D_test.circom | pragma circom 2.0.0;
include "../../circuits/Reshape2D.circom";
component main = Reshape2D(5, 5, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/SeparableConv2D_test.circom | pragma circom 2.0.0;
include "../../circuits/SeparableConv2D.circom";
component main = SeparableConv2D(7, 7, 3, 3, 6, 3, 1, 10**15);
| https://github.com/socathie/circomlib-ml |
test/circuits/SumPooling2D_stride_test.circom | pragma circom 2.0.0;
include "../../circuits/SumPooling2D.circom";
component main = SumPooling2D(10, 10, 3, 2, 3); | https://github.com/socathie/circomlib-ml |
test/circuits/SumPooling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/SumPooling2D.circom";
// poolSize=strides - default Keras settings
component main = SumPooling2D(5, 5, 3, 2, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/UpSampling2D_test.circom | pragma circom 2.0.0;
include "../../circuits/UpSampling2D.circom";
component main = UpSampling2D(1, 2, 3, 2); | https://github.com/socathie/circomlib-ml |
test/circuits/decryptMultiple_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/encrypt.circom";
component main = DecryptBits(1000); | https://github.com/socathie/circomlib-ml |
test/circuits/decrypt_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/encrypt.circom";
component main = Decrypt(); | https://github.com/socathie/circomlib-ml |
test/circuits/ecdh_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/ecdh.circom";
component main = Ecdh(); | https://github.com/socathie/circomlib-ml |
test/circuits/encryptDecrypt_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/encrypt.circom";
include "../../circuits/crypto/ecdh.circom";
// from zk-ml/linear-regression-demo
template Test() {
signal input message;
signal input shared_key;
signal output out;
signal input private_key;
signal input public_key[2];
component ecdh = Ecdh();
ecdh.private_key <== private_key;
ecdh.public_key[0] <== public_key[0];
ecdh.public_key[1] <== public_key[1];
log(ecdh.shared_key);
log(shared_key);
log(private_key);
log(public_key[0]);
log(public_key[1]);
shared_key === ecdh.shared_key;
component enc = Encrypt();
component dec = Decrypt();
message ==> enc.plaintext;
shared_key ==> enc.shared_key;
shared_key ==> dec.shared_key;
enc.out[0] ==> dec.message[0];
enc.out[1] ==> dec.message[1];
log(dec.out);
dec.out === message;
out <== 1;
}
component main = Test(); | https://github.com/socathie/circomlib-ml |
test/circuits/encryptMultiple_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/encrypt.circom";
component main = EncryptBits(1000); | https://github.com/socathie/circomlib-ml |
test/circuits/encrypt_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/encrypt.circom";
component main = Encrypt(); | https://github.com/socathie/circomlib-ml |
test/circuits/encrypted_mnist_latest_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2D.circom";
include "../../circuits/Dense.circom";
include "../../circuits/ArgMax.circom";
include "../../circuits/Poly.circom";
include "../../circuits/AveragePooling2D.circom";
include "../../circuits/BatchNormalization2D.circom";
include "../../circuits/Flatten2D.circom";
include "../../circuits/crypto/encrypt.circom";
include "../../circuits/crypto/ecdh.circom";
template encrypted_mnist_latest() {
signal input in[28][28][1];
signal input conv2d_1_weights[3][3][1][4];
signal input conv2d_1_bias[4];
signal input bn_1_a[4];
signal input bn_1_b[4];
signal input conv2d_2_weights[3][3][4][8];
signal input conv2d_2_bias[8];
signal input bn_2_a[8];
signal input bn_2_b[8];
signal input dense_weights[200][10];
signal input dense_bias[10];
signal output out;
signal input private_key;
signal input public_key[2];
component ecdh = Ecdh();
ecdh.private_key <== private_key;
ecdh.public_key[0] <== public_key[0];
ecdh.public_key[1] <== public_key[1];
signal output message[3*3*1*4+4+4+4+3*3*4*8+8+8+8+200*10+10+1];
component enc = EncryptBits(3*3*1*4+4+4+4+3*3*4*8+8+8+8+200*10+10);
enc.shared_key <== ecdh.shared_key;
var idx = 0;
component conv2d_1 = Conv2D(28,28,1,4,3,1);
component bn_1 = BatchNormalization2D(26,26,4);
component poly_1[26][26][4];
component avg2d_1 = AveragePooling2D(26,26,4,2,2,25);
component conv2d_2 = Conv2D(13,13,4,8,3,1);
component bn_2 = BatchNormalization2D(11,11,8);
component poly_2[11][11][8];
component avg2d_2 = AveragePooling2D(11,11,8,2,2,25);
component flatten = Flatten2D(5,5,8);
component dense = Dense(200,10);
component argmax = ArgMax(10);
for (var i=0; i<28; i++) {
for (var j=0; j<28; j++) {
conv2d_1.in[i][j][0] <== in[i][j][0];
}
}
for (var i=0; i<3; i++) {
for (var j=0; j<3; j++) {
for (var m=0; m<4; m++) {
conv2d_1.weights[i][j][0][m] <== conv2d_1_weights[i][j][0][m];
enc.plaintext[idx] <== conv2d_1_weights[i][j][0][m];
idx++;
}
}
}
for (var m=0; m<4; m++) {
conv2d_1.bias[m] <== conv2d_1_bias[m];
enc.plaintext[idx] <== conv2d_1_bias[m];
idx++;
}
for (var k=0; k<4; k++) {
bn_1.a[k] <== bn_1_a[k];
enc.plaintext[idx] <== bn_1_a[k];
idx++;
}
for (var k=0; k<4; k++) {
bn_1.b[k] <== bn_1_b[k];
enc.plaintext[idx] <== bn_1_b[k];
idx++;
for (var i=0; i<26; i++) {
for (var j=0; j<26; j++) {
bn_1.in[i][j][k] <== conv2d_1.out[i][j][k];
}
}
}
for (var i=0; i<26; i++) {
for (var j=0; j<26; j++) {
for (var k=0; k<4; k++) {
poly_1[i][j][k] = Poly(10**6);
poly_1[i][j][k].in <== bn_1.out[i][j][k];
avg2d_1.in[i][j][k] <== poly_1[i][j][k].out;
}
}
}
for (var i=0; i<13; i++) {
for (var j=0; j<13; j++) {
for (var k=0; k<4; k++) {
conv2d_2.in[i][j][k] <== avg2d_1.out[i][j][k];
}
}
}
for (var i=0; i<3; i++) {
for (var j=0; j<3; j++) {
for (var k=0; k<4; k++) {
for (var m=0; m<8; m++) {
conv2d_2.weights[i][j][k][m] <== conv2d_2_weights[i][j][k][m];
enc.plaintext[idx] <== conv2d_2_weights[i][j][k][m];
idx++;
}
}
}
}
for (var m=0; m<8; m++) {
conv2d_2.bias[m] <== conv2d_2_bias[m];
enc.plaintext[idx] <== conv2d_2_bias[m];
idx++;
}
for (var k=0; k<8; k++) {
bn_2.a[k] <== bn_2_a[k];
enc.plaintext[idx] <== bn_2_a[k];
idx++;
}
for (var k=0; k<8; k++) {
bn_2.b[k] <== bn_2_b[k];
enc.plaintext[idx] <== bn_2_b[k];
idx++;
for (var i=0; i<11; i++) {
for (var j=0; j<11; j++) {
bn_2.in[i][j][k] <== conv2d_2.out[i][j][k];
}
}
}
for (var i=0; i<11; i++) {
for (var j=0; j<11; j++) {
for (var k=0; k<8; k++) {
poly_2[i][j][k] = Poly(10**18);
poly_2[i][j][k].in <== bn_2.out[i][j][k];
avg2d_2.in[i][j][k] <== poly_2[i][j][k].out;
}
}
}
for (var i=0; i<5; i++) {
for (var j=0; j<5; j++) {
for (var k=0; k<8; k++) {
flatten.in[i][j][k] <== avg2d_2.out[i][j][k];
}
}
}
for (var i=0; i<200; i++) {
dense.in[i] <== flatten.out[i];
for (var j=0; j<10; j++) {
dense.weights[i][j] <== dense_weights[i][j];
enc.plaintext[idx] <== dense_weights[i][j];
idx++;
}
}
for (var i=0; i<10; i++) {
dense.bias[i] <== dense_bias[i];
enc.plaintext[idx] <== dense_bias[i];
idx++;
}
for (var i=0; i<10; i++) {
argmax.in[i] <== dense.out[i];
}
out <== argmax.out;
for (var i=0; i<3*3*1*4+4+4+4+3*3*4*8+8+8+8+200*10+10+1; i++) {
message[i] <== enc.out[i];
}
}
component main = encrypted_mnist_latest(); | https://github.com/socathie/circomlib-ml |
test/circuits/mnist_test.circom | pragma circom 2.0.0;
include "../../circuits/Conv2D.circom";
include "../../circuits/Dense.circom";
include "../../circuits/ArgMax.circom";
include "../../circuits/ReLU.circom";
include "../../circuits/AveragePooling2D.circom";
include "../../circuits/BatchNormalization2D.circom";
include "../../circuits/Flatten2D.circom";
template mnist() {
signal input in[28][28][1];
signal input conv2d_1_weights[3][3][1][4];
signal input conv2d_1_bias[4];
signal input conv2d_1_out[26][26][4];
signal input conv2d_1_remainder[26][26][4];
signal input bn_1_a[4];
signal input bn_1_b[4];
signal input bn_1_out[26][26][4];
signal input bn_1_remainder[26][26][4];
signal input relu_1_out[26][26][4];
signal input avg2d_1_out[13][13][4];
signal input avg2d_1_remainder[13][13][4];
signal input conv2d_2_weights[3][3][4][8];
signal input conv2d_2_bias[8];
signal input conv2d_2_out[11][11][8];
signal input conv2d_2_remainder[11][11][8];
signal input bn_2_a[8];
signal input bn_2_b[8];
signal input bn_2_out[11][11][8];
signal input bn_2_remainder[11][11][8];
signal input relu_2_out[11][11][8];
signal input avg2d_2_out[5][5][8];
signal input avg2d_2_remainder[5][5][8];
signal input flatten_out[200];
signal input dense_weights[200][10];
signal input dense_bias[10];
signal input dense_out[10];
signal input dense_remainder[10];
signal input argmax_out;
signal output out;
component conv2d_1 = Conv2D(28,28,1,4,3,1,10**18);
component bn_1 = BatchNormalization2D(26,26,4,10**18);
component relu_1[26][26][4];
component avg2d_1 = AveragePooling2D(26,26,4,2,2);
component conv2d_2 = Conv2D(13,13,4,8,3,1,10**18);
component bn_2 = BatchNormalization2D(11,11,8,10**18);
component relu_2[11][11][8];
component avg2d_2 = AveragePooling2D(11,11,8,2,2);
component flatten = Flatten2D(5,5,8);
component dense = Dense(200,10,10**18);
component argmax = ArgMax(10);
for (var i=0; i<28; i++) {
for (var j=0; j<28; j++) {
conv2d_1.in[i][j][0] <== in[i][j][0];
}
}
for (var m=0; m<4; m++) {
for (var i=0; i<3; i++) {
for (var j=0; j<3; j++) {
conv2d_1.weights[i][j][0][m] <== conv2d_1_weights[i][j][0][m];
}
}
conv2d_1.bias[m] <== conv2d_1_bias[m];
}
for (var k=0; k<4; k++) {
bn_1.a[k] <== bn_1_a[k];
bn_1.b[k] <== bn_1_b[k];
for (var i=0; i<26; i++) {
for (var j=0; j<26; j++) {
conv2d_1.out[i][j][k] <== conv2d_1_out[i][j][k];
conv2d_1.remainder[i][j][k] <== conv2d_1_remainder[i][j][k];
bn_1.in[i][j][k] <== conv2d_1.out[i][j][k];
}
}
}
for (var i=0; i<26; i++) {
for (var j=0; j<26; j++) {
for (var k=0; k<4; k++) {
bn_1.out[i][j][k] <== bn_1_out[i][j][k];
bn_1.remainder[i][j][k] <== bn_1_remainder[i][j][k];
relu_1[i][j][k] = ReLU();
relu_1[i][j][k].in <== bn_1.out[i][j][k];
relu_1[i][j][k].out <== relu_1_out[i][j][k];
avg2d_1.in[i][j][k] <== relu_1[i][j][k].out;
}
}
}
for (var i=0; i<13; i++) {
for (var j=0; j<13; j++) {
for (var k=0; k<4; k++) {
avg2d_1.out[i][j][k] <== avg2d_1_out[i][j][k];
avg2d_1.remainder[i][j][k] <== avg2d_1_remainder[i][j][k];
conv2d_2.in[i][j][k] <== avg2d_1.out[i][j][k];
}
}
}
for (var m=0; m<8; m++) {
for (var i=0; i<3; i++) {
for (var j=0; j<3; j++) {
for (var k=0; k<4; k++) {
conv2d_2.weights[i][j][k][m] <== conv2d_2_weights[i][j][k][m];
}
}
}
conv2d_2.bias[m] <== conv2d_2_bias[m];
}
for (var k=0; k<8; k++) {
bn_2.a[k] <== bn_2_a[k];
bn_2.b[k] <== bn_2_b[k];
for (var i=0; i<11; i++) {
for (var j=0; j<11; j++) {
conv2d_2.out[i][j][k] <== conv2d_2_out[i][j][k];
conv2d_2.remainder[i][j][k] <== conv2d_2_remainder[i][j][k];
bn_2.in[i][j][k] <== conv2d_2.out[i][j][k];
}
}
}
for (var i=0; i<11; i++) {
for (var j=0; j<11; j++) {
for (var k=0; k<8; k++) {
bn_2.out[i][j][k] <== bn_2_out[i][j][k];
bn_2.remainder[i][j][k] <== bn_2_remainder[i][j][k];
relu_2[i][j][k] = ReLU();
relu_2[i][j][k].in <== bn_2.out[i][j][k];
relu_2[i][j][k].out <== relu_2_out[i][j][k];
avg2d_2.in[i][j][k] <== relu_2[i][j][k].out;
}
}
}
for (var i=0; i<5; i++) {
for (var j=0; j<5; j++) {
for (var k=0; k<8; k++) {
avg2d_2.out[i][j][k] <== avg2d_2_out[i][j][k];
avg2d_2.remainder[i][j][k] <== avg2d_2_remainder[i][j][k];
flatten.in[i][j][k] <== avg2d_2.out[i][j][k];
}
}
}
for (var i=0; i<200; i++) {
flatten.out[i] <== flatten_out[i];
dense.in[i] <== flatten.out[i];
for (var j=0; j<10; j++) {
dense.weights[i][j] <== dense_weights[i][j];
}
}
for (var i=0; i<10; i++) {
dense.bias[i] <== dense_bias[i];
}
for (var i=0; i<10; i++) {
dense.out[i] <== dense_out[i];
dense.remainder[i] <== dense_remainder[i];
argmax.in[i] <== dense.out[i];
}
argmax.out <== argmax_out;
out <== argmax.out;
}
component main = mnist(); | https://github.com/socathie/circomlib-ml |
test/circuits/model1_test.circom | pragma circom 2.0.0;
include "../../circuits/Dense.circom";
include "../../circuits/ReLU.circom";
template model1() {
signal input in[3];
signal input Dense32weights[3][2];
signal input Dense32bias[2];
signal input Dense32out[2];
signal input Dense32remainder[2];
signal input ReLUout[2];
signal input Dense21weights[2][1];
signal input Dense21bias[1];
signal input Dense21out[1];
signal input Dense21remainder[1];
signal output out;
component Dense32 = Dense(3,2, 10**36);
component relu[2];
component Dense21 = Dense(2,1, 10**36);
for (var i=0; i<3; i++) {
Dense32.in[i] <== in[i];
for (var j=0; j<2; j++) {
Dense32.weights[i][j] <== Dense32weights[i][j];
}
}
for (var i=0; i<2; i++) {
Dense32.bias[i] <== 0;
Dense32.out[i] <== Dense32out[i];
Dense32.remainder[i] <== Dense32remainder[i];
}
for (var i=0; i<2; i++) {
relu[i] = ReLU();
relu[i].in <== Dense32.out[i];
relu[i].out <== ReLUout[i];
}
for (var i=0; i<2; i++) {
Dense21.in[i] <== relu[i].out;
Dense21.weights[i][0] <== Dense21weights[i][0];
}
Dense21.bias[0] <== 0;
Dense21.out[0] <== Dense21out[0];
Dense21.remainder[0] <== Dense21remainder[0];
out <== Dense21.out[0];
}
component main = model1(); | https://github.com/socathie/circomlib-ml |
test/circuits/publicKey_test.circom | pragma circom 2.0.0;
include "../../circuits/crypto/publickey_derivation.circom";
component main = PublicKey(); | https://github.com/socathie/circomlib-ml |
test/encryption.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
const { Keypair } = require("./modules/maci-domainobjs");
const { encrypt, decrypt } = require("./modules/maci-crypto");
describe("crypto circuits test", function () {
this.timeout(100000000);
it("public key test", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "publicKey_test.circom"));
const keypair = new Keypair();
const INPUT = {
'private_key': keypair.privKey.asCircuitInputs(),
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]), Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]), Fr.e(keypair.pubKey.rawPubKey[0])));
assert(Fr.eq(Fr.e(witness[2]), Fr.e(keypair.pubKey.rawPubKey[1])));
});
it("ecdh full circuit test", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "ecdh_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const INPUT = {
'private_key': keypair.privKey.asCircuitInputs(),
'public_key': keypair2.pubKey.asCircuitInputs(),
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]), Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]), Fr.e(ecdhSharedKey)));
});
it("encrypt/decrypt test", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "encryptDecrypt_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const INPUT = {
'message': '123456789',
'shared_key': ecdhSharedKey.toString(),
'private_key': keypair.privKey.asCircuitInputs(),
'public_key': keypair2.pubKey.asCircuitInputs(),
}
const witness = await circuit.calculateWitness(INPUT, true);
//console.log(witness);
assert(Fr.eq(Fr.e(witness[0]), Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]), Fr.e(1)));
});
it("encrypt in circom, decrypt in js", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "encrypt_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const plaintext = 1234567890n;
const INPUT = {
'plaintext': plaintext.toString(),
'shared_key': ecdhSharedKey.toString(),
}
const witness = await circuit.calculateWitness(INPUT, true);
output1 = witness[1];
output2 = witness[2];
const ciphertext = {
iv: output1,
data: [output2],
}
assert(Fr.eq(Fr.e(decrypt(ciphertext, ecdhSharedKey)[0]), Fr.e(plaintext)));
});
it("encrypt in js, decrypt in circom", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "decrypt_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const plaintext = 1234567890n;
const ciphertext = encrypt([plaintext], ecdhSharedKey);
const INPUT = {
'message': [ciphertext.iv.toString(), ciphertext.data[0].toString()],
'shared_key': ecdhSharedKey.toString(),
}
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[1]), Fr.e(plaintext)));
});
it("encrypt multiple in circom, decrypt in js", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "encryptMultiple_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const plaintext = [...Array(1000).keys()];
const INPUT = {
'plaintext': plaintext.map(String),
'shared_key': ecdhSharedKey.toString(),
}
const witness = await circuit.calculateWitness(INPUT, true);
const ciphertext = {
iv: witness[1],
data: witness.slice(2,1002),
}
decryptedText = decrypt(ciphertext, ecdhSharedKey);
for (let i=0; i<1000; i++) {
assert(Fr.eq(Fr.e(decryptedText[i]), Fr.e(plaintext[i])));
}
});
it("encrypt multiple in js, decrypt in circom", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "decryptMultiple_test.circom"));
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
const plaintext = ([...Array(1000).keys()]).map(BigInt);
const ciphertext = encrypt(plaintext, ecdhSharedKey);
const INPUT = {
'message': [ciphertext.iv.toString(), ...ciphertext.data.map(String)],
'shared_key': ecdhSharedKey.toString(),
}
const witness = await circuit.calculateWitness(INPUT, true);
for (let i=0; i<1000; i++) {
assert(Fr.eq(Fr.e(witness[i+1]), Fr.e(plaintext[i])));
}
});
// TODO: encrypt a model
it.skip("encrypt entire model in circom, decrypt in js", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "encrypted_mnist_latest_test.circom"));
const json = require("../models/mnist_latest_input.json");
let INPUT = {};
let plaintext = [
...json['conv2d_1_weights'],
...json['conv2d_1_bias'],
...json['bn_1_a'],
...json['bn_1_b'],
...json['conv2d_2_weights'],
...json['conv2d_2_bias'],
...json['bn_2_a'],
...json['bn_2_b'],
...json['dense_weights'],
...json['dense_bias'],
];
for (const [key, value] of Object.entries(json)) {
if (Array.isArray(value)) {
let tmpArray = [];
for (let i = 0; i < value.flat().length; i++) {
tmpArray.push(Fr.e(value.flat()[i]));
}
INPUT[key] = tmpArray;
} else {
INPUT[key] = Fr.e(value);
}
}
const keypair = new Keypair();
const keypair2 = new Keypair();
const ecdhSharedKey = Keypair.genEcdhSharedKey(
keypair.privKey,
keypair2.pubKey,
);
INPUT['private_key'] = keypair.privKey.asCircuitInputs();
INPUT['public_key'] = keypair2.pubKey.asCircuitInputs();
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(7)));
const ciphertext = {
iv: witness[2],
data: witness.slice(3,2373),
}
decryptedText = decrypt(ciphertext, ecdhSharedKey);
for (let i=0; i<2370; i++) {
assert(Fr.eq(Fr.e(decryptedText[i]), Fr.e(plaintext[i])));
}
});
});
| https://github.com/socathie/circomlib-ml |
test/mnist.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
const INPUT = require("../models/mnist_input.json");
describe("mnist test", function () {
this.timeout(100000000);
it("should return correct output", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "mnist_test.circom"));
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(Fr.e(witness[1]),Fr.e(7)));
});
}); | https://github.com/socathie/circomlib-ml |
test/model1.js | const chai = require("chai");
const path = require("path");
const wasm_tester = require("circom_tester").wasm;
const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const Fr = new F1Field(exports.p);
const assert = chai.assert;
const INPUT = require("../models/model1_input.json");
describe("model1 test", function () {
this.timeout(100000000);
it("should return correct output", async () => {
const circuit = await wasm_tester(path.join(__dirname, "circuits", "model1_test.circom"));
// const INPUT = {
// "in": json.in,
// "Dense32weights": json.Dense32weights,
// "Dense32bias": json.Dense32bias,
// "Dense32out": json.Dense32out,
// "Dense32remainder": json.Dense32remainder,
// "ReLUout": json.ReLUout,
// "Dense21weights": json.Dense21weights,
// "Dense21bias": json.Dense21bias,
// "Dense21out": json.Dense21out,
// "Dense21remainder": json.Dense21remainder,
// }
const witness = await circuit.calculateWitness(INPUT, true);
assert(Fr.eq(Fr.e(witness[0]),Fr.e(1)));
assert(Fr.eq(witness[1],Fr.e(INPUT.Dense21out[0])));
});
}); | https://github.com/socathie/circomlib-ml |
test/modules/circomlib-0.5.1.js | exports.smt = require("./src/smt");
exports.eddsa = require("./src/eddsa");
exports.mimc7 = require("./src/mimc7");
exports.mimcsponge = require("./src/mimcsponge");
exports.babyJub = require("./src/babyjub");
exports.pedersenHash = require("./src/pedersenHash");
exports.SMT = require("./src/smt").SMT;
exports.SMTMemDB = require("./src/smt_memdb");
exports.poseidon = require("./src/poseidon");
| https://github.com/socathie/circomlib-ml |
test/modules/maci-crypto.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.unpackPubKey = exports.packPubKey = exports.bigInt2Buffer = exports.SNARK_FIELD_SIZE = exports.NOTHING_UP_MY_SLEEVE_PUBKEY = exports.NOTHING_UP_MY_SLEEVE = exports.formatPrivKeyForBabyJub = exports.unstringifyBigInts = exports.stringifyBigInts = exports.verifySignature = exports.hashLeftRight = exports.hash11 = exports.hash5 = exports.hashOne = exports.sign = exports.decrypt = exports.encrypt = exports.genEcdhSharedKey = exports.genKeypair = exports.genPubKey = exports.genPrivKey = exports.genRandomSalt = void 0;
const assert = require('assert');
const crypto = require("crypto");
const ethers = require("ethers");
const ff = require('ffjavascript');
const createBlakeHash = require('blake-hash');
const circomlib_0_5_1_1 = require("./circomlib-0.5.1");
const stringifyBigInts = ff.utils.stringifyBigInts;
exports.stringifyBigInts = stringifyBigInts;
const unstringifyBigInts = ff.utils.unstringifyBigInts;
exports.unstringifyBigInts = unstringifyBigInts;
const SNARK_FIELD_SIZE = BigInt('21888242871839275222246405745257275088548364400416034343698204186575808495617');
exports.SNARK_FIELD_SIZE = SNARK_FIELD_SIZE;
// A nothing-up-my-sleeve zero value
// Should be equal to 8370432830353022751713833565135785980866757267633941821328460903436894336785
const NOTHING_UP_MY_SLEEVE = BigInt(ethers.utils.solidityKeccak256(['bytes'], [ethers.utils.toUtf8Bytes('Maci')])) % SNARK_FIELD_SIZE;
exports.NOTHING_UP_MY_SLEEVE = NOTHING_UP_MY_SLEEVE;
// The pubkey is the first Pedersen base point from iden3's circomlib
// See https://github.com/iden3/circomlib/blob/d5ed1c3ce4ca137a6b3ca48bec4ac12c1b38957a/src/pedersen_printbases.js
const NOTHING_UP_MY_SLEEVE_PUBKEY = [
BigInt('10457101036533406547632367118273992217979173478358440826365724437999023779287'),
BigInt('19824078218392094440610104313265183977899662750282163392862422243483260492317')
];
exports.NOTHING_UP_MY_SLEEVE_PUBKEY = NOTHING_UP_MY_SLEEVE_PUBKEY;
/*
* Convert a BigInt to a Buffer
*/
const bigInt2Buffer = (i) => {
let hexStr = i.toString(16);
while (hexStr.length < 64) {
hexStr = '0' + hexStr;
}
return Buffer.from(hexStr, 'hex');
};
exports.bigInt2Buffer = bigInt2Buffer;
// Hash up to 2 elements
const poseidonT3 = (inputs) => {
assert(inputs.length === 2);
return (0, circomlib_0_5_1_1.poseidon)(inputs);
};
// Hash up to 5 elements
const poseidonT6 = (inputs) => {
assert(inputs.length === 5);
return (0, circomlib_0_5_1_1.poseidon)(inputs);
};
const hash5 = (elements) => {
const elementLength = elements.length;
if (elements.length > 5) {
throw new Error(`elements length should not greater than 5, got ${elements.length}`);
}
const elementsPadded = elements.slice();
if (elementLength < 5) {
for (let i = elementLength; i < 5; i++) {
elementsPadded.push(BigInt(0));
}
}
return poseidonT6(elementsPadded);
};
exports.hash5 = hash5;
/*
* A convenience function for to use Poseidon to hash a Plaintext with
* no more than 11 elements
*/
const hash11 = (elements) => {
const elementLength = elements.length;
if (elementLength > 11) {
throw new TypeError(`elements length should not greater than 11, got ${elementLength}`);
}
const elementsPadded = elements.slice();
if (elementLength < 11) {
for (let i = elementLength; i < 11; i++) {
elementsPadded.push(BigInt(0));
}
}
return poseidonT3([
poseidonT3([
poseidonT6(elementsPadded.slice(0, 5)),
poseidonT6(elementsPadded.slice(5, 10))
]),
elementsPadded[10]
]);
};
exports.hash11 = hash11;
/*
* Hash a single BigInt with the Poseidon hash function
*/
const hashOne = (preImage) => {
return poseidonT3([preImage, BigInt(0)]);
};
exports.hashOne = hashOne;
/*
* Hash two BigInts with the Poseidon hash function
*/
const hashLeftRight = (left, right) => {
return poseidonT3([left, right]);
};
exports.hashLeftRight = hashLeftRight;
/*
* Returns a BabyJub-compatible random value. We create it by first generating
* a random value (initially 256 bits large) modulo the snark field size as
* described in EIP197. This results in a key size of roughly 253 bits and no
* more than 254 bits. To prevent modulo bias, we then use this efficient
* algorithm:
* http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/lib/libc/crypt/arc4random_uniform.c
* @return A BabyJub-compatible random value.
*/
const genRandomBabyJubValue = () => {
// Prevent modulo bias
//const lim = BigInt('0x10000000000000000000000000000000000000000000000000000000000000000')
//const min = (lim - SNARK_FIELD_SIZE) % SNARK_FIELD_SIZE
const min = BigInt('6350874878119819312338956282401532410528162663560392320966563075034087161851');
let rand;
while (true) {
rand = BigInt('0x' + crypto.randomBytes(32).toString('hex'));
if (rand >= min) {
break;
}
}
const privKey = rand % SNARK_FIELD_SIZE;
assert(privKey < SNARK_FIELD_SIZE);
return privKey;
};
/*
* @return A BabyJub-compatible private key.
*/
const genPrivKey = () => {
return genRandomBabyJubValue();
};
exports.genPrivKey = genPrivKey;
/*
* @return A BabyJub-compatible salt.
*/
const genRandomSalt = () => {
return genRandomBabyJubValue();
};
exports.genRandomSalt = genRandomSalt;
/*
* An internal function which formats a random private key to be compatible
* with the BabyJub curve. This is the format which should be passed into the
* PublicKey and other circuits.
*/
const formatPrivKeyForBabyJub = (privKey) => {
const sBuff = circomlib_0_5_1_1.eddsa.pruneBuffer(createBlakeHash("blake512").update(bigInt2Buffer(privKey)).digest().slice(0, 32));
const s = ff.utils.leBuff2int(sBuff);
return ff.Scalar.shr(s, 3);
};
exports.formatPrivKeyForBabyJub = formatPrivKeyForBabyJub;
/*
* Losslessly reduces the size of the representation of a public key
* @param pubKey The public key to pack
* @return A packed public key
*/
const packPubKey = (pubKey) => {
return circomlib_0_5_1_1.babyJub.packPoint(pubKey);
};
exports.packPubKey = packPubKey;
/*
* Restores the original PubKey from its packed representation
* @param packed The value to unpack
* @return The unpacked public key
*/
const unpackPubKey = (packed) => {
return circomlib_0_5_1_1.babyJub.unpackPoint(packed);
};
exports.unpackPubKey = unpackPubKey;
/*
* @param privKey A private key generated using genPrivKey()
* @return A public key associated with the private key
*/
const genPubKey = (privKey) => {
privKey = BigInt(privKey.toString());
assert(privKey < SNARK_FIELD_SIZE);
return circomlib_0_5_1_1.eddsa.prv2pub(bigInt2Buffer(privKey));
};
exports.genPubKey = genPubKey;
const genKeypair = () => {
const privKey = genPrivKey();
const pubKey = genPubKey(privKey);
const Keypair = { privKey, pubKey };
return Keypair;
};
exports.genKeypair = genKeypair;
/*
* Generates an Elliptic-curve Diffie–Hellman shared key given a private key
* and a public key.
* @return The ECDH shared key.
*/
const genEcdhSharedKey = (privKey, pubKey) => {
return circomlib_0_5_1_1.babyJub.mulPointEscalar(pubKey, formatPrivKeyForBabyJub(privKey))[0];
};
exports.genEcdhSharedKey = genEcdhSharedKey;
/*
* Encrypts a plaintext using a given key.
* @return The ciphertext.
*/
const encrypt = (plaintext, sharedKey) => {
// Generate the IV
const iv = circomlib_0_5_1_1.mimc7.multiHash(plaintext, BigInt(0));
const ciphertext = {
iv,
data: plaintext.map((e, i) => {
return e + circomlib_0_5_1_1.mimc7.hash(sharedKey, iv + BigInt(i));
}),
};
// TODO: add asserts here
return ciphertext;
};
exports.encrypt = encrypt;
/*
* Decrypts a ciphertext using a given key.
* @return The plaintext.
*/
const decrypt = (ciphertext, sharedKey) => {
const plaintext = ciphertext.data.map((e, i) => {
return BigInt(e) - BigInt(circomlib_0_5_1_1.mimc7.hash(sharedKey, BigInt(ciphertext.iv) + BigInt(i)));
});
return plaintext;
};
exports.decrypt = decrypt;
/*
* Generates a signature given a private key and plaintext.
* @return The signature.
*/
const sign = (privKey, msg) => {
return circomlib_0_5_1_1.eddsa.signPoseidon(bigInt2Buffer(privKey), msg);
};
exports.sign = sign;
/*
* Checks whether the signature of the given plaintext was created using the
* private key associated with the given public key.
* @return True if the signature is valid, and false otherwise.
*/
const verifySignature = (msg, signature, pubKey) => {
return circomlib_0_5_1_1.eddsa.verifyPoseidon(msg, signature, pubKey);
};
exports.verifySignature = verifySignature;
| https://github.com/socathie/circomlib-ml |
test/modules/maci-domainobjs.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.PrivKey = exports.PubKey = exports.Keypair = exports.Message = exports.Command = exports.StateLeaf = void 0;
const assert = require('assert');
const maci_crypto_1 = require("./maci-crypto");
const SERIALIZED_PRIV_KEY_PREFIX = 'macisk.';
class PrivKey {
constructor(rawPrivKey) {
this.copy = () => {
return new PrivKey(BigInt(this.rawPrivKey.toString()));
};
this.asCircuitInputs = () => {
return (0, maci_crypto_1.formatPrivKeyForBabyJub)(this.rawPrivKey).toString();
};
this.serialize = () => {
return SERIALIZED_PRIV_KEY_PREFIX + this.rawPrivKey.toString(16);
};
this.rawPrivKey = rawPrivKey;
}
}
exports.PrivKey = PrivKey;
PrivKey.unserialize = (s) => {
const x = s.slice(SERIALIZED_PRIV_KEY_PREFIX.length);
return new PrivKey(BigInt('0x' + x));
};
PrivKey.isValidSerializedPrivKey = (s) => {
const correctPrefix = s.startsWith(SERIALIZED_PRIV_KEY_PREFIX);
const x = s.slice(SERIALIZED_PRIV_KEY_PREFIX.length);
let validValue = false;
try {
const value = BigInt('0x' + x);
validValue = value < maci_crypto_1.SNARK_FIELD_SIZE;
}
catch {
// comment to make linter happy
}
return correctPrefix && validValue;
};
const SERIALIZED_PUB_KEY_PREFIX = 'macipk.';
class PubKey {
constructor(rawPubKey) {
this.copy = () => {
return new PubKey([
BigInt(this.rawPubKey[0].toString()),
BigInt(this.rawPubKey[1].toString()),
]);
};
this.asContractParam = () => {
return {
x: this.rawPubKey[0].toString(),
y: this.rawPubKey[1].toString(),
};
};
this.asCircuitInputs = () => {
return this.rawPubKey.map((x) => x.toString());
};
this.asArray = () => {
return [
this.rawPubKey[0],
this.rawPubKey[1],
];
};
this.serialize = () => {
// Blank leaves have pubkey [0, 0], which packPubKey does not support
if (BigInt(this.rawPubKey[0]) === BigInt(0) &&
BigInt(this.rawPubKey[1]) === BigInt(0)) {
return SERIALIZED_PUB_KEY_PREFIX + 'z';
}
const packed = (0, maci_crypto_1.packPubKey)(this.rawPubKey).toString('hex');
return SERIALIZED_PUB_KEY_PREFIX + packed.toString();
};
assert(rawPubKey.length === 2);
assert(rawPubKey[0] < maci_crypto_1.SNARK_FIELD_SIZE);
assert(rawPubKey[1] < maci_crypto_1.SNARK_FIELD_SIZE);
this.rawPubKey = rawPubKey;
}
}
exports.PubKey = PubKey;
PubKey.unserialize = (s) => {
// Blank leaves have pubkey [0, 0], which packPubKey does not support
if (s === SERIALIZED_PUB_KEY_PREFIX + 'z') {
return new PubKey([BigInt(0), BigInt(0)]);
}
const len = SERIALIZED_PUB_KEY_PREFIX.length;
const packed = Buffer.from(s.slice(len), 'hex');
return new PubKey((0, maci_crypto_1.unpackPubKey)(packed));
};
PubKey.isValidSerializedPubKey = (s) => {
const correctPrefix = s.startsWith(SERIALIZED_PUB_KEY_PREFIX);
let validValue = false;
try {
PubKey.unserialize(s);
validValue = true;
}
catch {
// comment to make linter happy
}
return correctPrefix && validValue;
};
class Keypair {
constructor(privKey) {
this.copy = () => {
return new Keypair(this.privKey.copy());
};
if (privKey) {
this.privKey = privKey;
this.pubKey = new PubKey((0, maci_crypto_1.genPubKey)(privKey.rawPrivKey));
}
else {
const rawKeyPair = (0, maci_crypto_1.genKeypair)();
this.privKey = new PrivKey(rawKeyPair.privKey);
this.pubKey = new PubKey(rawKeyPair.pubKey);
}
}
static genEcdhSharedKey(privKey, pubKey) {
return (0, maci_crypto_1.genEcdhSharedKey)(privKey.rawPrivKey, pubKey.rawPubKey);
}
equals(keypair) {
const equalPrivKey = this.privKey.rawPrivKey === keypair.privKey.rawPrivKey;
const equalPubKey = this.pubKey.rawPubKey[0] === keypair.pubKey.rawPubKey[0] &&
this.pubKey.rawPubKey[1] === keypair.pubKey.rawPubKey[1];
// If this assertion fails, something is very wrong and this function
// should not return anything
// XOR is equivalent to: (x && !y) || (!x && y )
const x = (equalPrivKey && equalPubKey);
const y = (!equalPrivKey && !equalPubKey);
assert((x && !y) || (!x && y));
return equalPrivKey;
}
}
exports.Keypair = Keypair;
/*
* An encrypted command and signature.
*/
class Message {
constructor(iv, data) {
this.asArray = () => {
return [
this.iv,
...this.data,
];
};
this.asContractParam = () => {
return {
iv: this.iv.toString(),
data: this.data.map((x) => x.toString()),
};
};
this.asCircuitInputs = () => {
return this.asArray();
};
this.hash = () => {
return (0, maci_crypto_1.hash11)(this.asArray());
};
this.copy = () => {
return new Message(BigInt(this.iv.toString()), this.data.map((x) => BigInt(x.toString())));
};
assert(data.length === 10);
this.iv = iv;
this.data = data;
}
}
exports.Message = Message;
/*
* A leaf in the state tree, which maps public keys to votes
*/
class StateLeaf {
constructor(pubKey, voteOptionTreeRoot, voiceCreditBalance, nonce) {
this.asArray = () => {
return [
...this.pubKey.asArray(),
this.voteOptionTreeRoot,
this.voiceCreditBalance,
this.nonce,
];
};
this.asCircuitInputs = () => {
return this.asArray();
};
this.hash = () => {
return (0, maci_crypto_1.hash5)(this.asArray());
};
this.serialize = () => {
const j = {
pubKey: this.pubKey.serialize(),
voteOptionTreeRoot: this.voteOptionTreeRoot.toString(16),
voiceCreditBalance: this.voiceCreditBalance.toString(16),
nonce: this.nonce.toString(16),
};
return Buffer.from(JSON.stringify(j, null, 0), 'utf8').toString('base64');
};
this.pubKey = pubKey;
this.voteOptionTreeRoot = voteOptionTreeRoot;
this.voiceCreditBalance = voiceCreditBalance;
// The this is the current nonce. i.e. a user who has published 0 valid
// command should have this value at 0, and the first command should
// have a nonce of 1
this.nonce = nonce;
}
copy() {
return new StateLeaf(this.pubKey.copy(), BigInt(this.voteOptionTreeRoot.toString()), BigInt(this.voiceCreditBalance.toString()), BigInt(this.nonce.toString()));
}
static genBlankLeaf(emptyVoteOptionTreeRoot) {
return new StateLeaf(new PubKey(maci_crypto_1.NOTHING_UP_MY_SLEEVE_PUBKEY), emptyVoteOptionTreeRoot, BigInt(0), BigInt(0));
}
static genRandomLeaf() {
return new StateLeaf(new PubKey(maci_crypto_1.NOTHING_UP_MY_SLEEVE_PUBKEY), (0, maci_crypto_1.genRandomSalt)(), (0, maci_crypto_1.genRandomSalt)(), (0, maci_crypto_1.genRandomSalt)());
}
}
exports.StateLeaf = StateLeaf;
StateLeaf.unserialize = (serialized) => {
const j = JSON.parse(Buffer.from(serialized, 'base64').toString('utf8'));
return new StateLeaf(PubKey.unserialize(j.pubKey), BigInt('0x' + j.voteOptionTreeRoot), BigInt('0x' + j.voiceCreditBalance), BigInt('0x' + j.nonce));
};
/*
* Unencrypted data whose fields include the user's public key, vote etc.
*/
class Command {
constructor(stateIndex, newPubKey, voteOptionIndex, newVoteWeight, nonce, salt = (0, maci_crypto_1.genRandomSalt)()) {
this.copy = () => {
return new Command(BigInt(this.stateIndex.toString()), this.newPubKey.copy(), BigInt(this.voteOptionIndex.toString()), BigInt(this.newVoteWeight.toString()), BigInt(this.nonce.toString()), BigInt(this.salt.toString()));
};
this.asArray = () => {
return [
this.stateIndex,
...this.newPubKey.asArray(),
this.voteOptionIndex,
this.newVoteWeight,
this.nonce,
this.salt,
];
};
/*
* Check whether this command has deep equivalence to another command
*/
this.equals = (command) => {
return this.stateIndex == command.stateIndex &&
this.newPubKey[0] == command.newPubKey[0] &&
this.newPubKey[1] == command.newPubKey[1] &&
this.voteOptionIndex == command.voteOptionIndex &&
this.newVoteWeight == command.newVoteWeight &&
this.nonce == command.nonce &&
this.salt == command.salt;
};
this.hash = () => {
return (0, maci_crypto_1.hash11)(this.asArray());
};
/*
* Signs this command and returns a Signature.
*/
this.sign = (privKey) => {
return (0, maci_crypto_1.sign)(privKey.rawPrivKey, this.hash());
};
/*
* Returns true if the given signature is a correct signature of this
* command and signed by the private key associated with the given public
* key.
*/
this.verifySignature = (signature, pubKey) => {
return (0, maci_crypto_1.verifySignature)(this.hash(), signature, pubKey.rawPubKey);
};
/*
* Encrypts this command along with a signature to produce a Message.
*/
this.encrypt = (signature, sharedKey) => {
const plaintext = [
...this.asArray(),
signature.R8[0],
signature.R8[1],
signature.S,
];
const ciphertext = (0, maci_crypto_1.encrypt)(plaintext, sharedKey);
const message = new Message(ciphertext.iv, ciphertext.data);
return message;
};
this.stateIndex = stateIndex;
this.newPubKey = newPubKey;
this.voteOptionIndex = voteOptionIndex;
this.newVoteWeight = newVoteWeight;
this.nonce = nonce;
this.salt = salt;
}
}
exports.Command = Command;
/*
* Decrypts a Message to produce a Command.
*/
Command.decrypt = (message, sharedKey) => {
const decrypted = (0, maci_crypto_1.decrypt)(message, sharedKey);
const command = new Command(decrypted[0], new PubKey([decrypted[1], decrypted[2]]), decrypted[3], decrypted[4], decrypted[5], decrypted[6]);
const signature = {
R8: [decrypted[7], decrypted[8]],
S: decrypted[9],
};
return { command, signature };
};
| https://github.com/socathie/circomlib-ml |
test/modules/src/babyjub.js | const F1Field = require("ffjavascript").F1Field;
const Scalar = require("ffjavascript").Scalar;
const utils = require("ffjavascript").utils;
exports.addPoint = addPoint;
exports.mulPointEscalar = mulPointEscalar;
exports.inCurve = inCurve;
exports.inSubgroup = inSubgroup;
exports.packPoint = packPoint;
exports.unpackPoint = unpackPoint;
exports.p = Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617");
const F = new F1Field(exports.p);
exports.F = F;
exports.Generator = [
F.e("995203441582195749578291179787384436505546430278305826713579947235728471134"),
F.e("5472060717959818805561601436314318772137091100104008585924551046643952123905")
];
exports.Base8 = [
F.e("5299619240641551281634865583518297030282874472190772894086521144482721001553"),
F.e("16950150798460657717958625567821834550301663161624707787222815936182638968203")
];
exports.order = Scalar.fromString("21888242871839275222246405745257275088614511777268538073601725287587578984328");
exports.subOrder = Scalar.shiftRight(exports.order, 3);
exports.A = F.e("168700");
exports.D = F.e("168696");
function addPoint(a,b) {
const res = [];
/* does the equivalent of:
res[0] = bigInt((a[0]*b[1] + b[0]*a[1]) * bigInt(bigInt("1") + d*a[0]*b[0]*a[1]*b[1]).inverse(q)).affine(q);
res[1] = bigInt((a[1]*b[1] - cta*a[0]*b[0]) * bigInt(bigInt("1") - d*a[0]*b[0]*a[1]*b[1]).inverse(q)).affine(q);
*/
const beta = F.mul(a[0],b[1]);
const gamma = F.mul(a[1],b[0]);
const delta = F.mul(
F.sub(a[1], F.mul(exports.A, a[0])),
F.add(b[0], b[1])
);
const tau = F.mul(beta, gamma);
const dtau = F.mul(exports.D, tau);
res[0] = F.div(
F.add(beta, gamma),
F.add(F.one, dtau)
);
res[1] = F.div(
F.add(delta, F.sub(F.mul(exports.A,beta), gamma)),
F.sub(F.one, dtau)
);
return res;
}
function mulPointEscalar(base, e) {
let res = [F.e("0"),F.e("1")];
let rem = e;
let exp = base;
while (! Scalar.isZero(rem)) {
if (Scalar.isOdd(rem)) {
res = addPoint(res, exp);
}
exp = addPoint(exp, exp);
rem = Scalar.shiftRight(rem, 1);
}
return res;
}
function inSubgroup(P) {
if (!inCurve(P)) return false;
const res= mulPointEscalar(P, exports.subOrder);
return (F.isZero(res[0]) && F.eq(res[1], F.one));
}
function inCurve(P) {
const x2 = F.square(P[0]);
const y2 = F.square(P[1]);
if (!F.eq(
F.add(F.mul(exports.A, x2), y2),
F.add(F.one, F.mul(F.mul(x2, y2), exports.D)))) return false;
return true;
}
function packPoint(P) {
const buff = utils.leInt2Buff(P[1], 32);
if (F.lt(P[0], F.zero)) {
buff[31] = buff[31] | 0x80;
}
return buff;
}
function unpackPoint(_buff) {
const buff = Buffer.from(_buff);
let sign = false;
const P = new Array(2);
if (buff[31] & 0x80) {
sign = true;
buff[31] = buff[31] & 0x7F;
}
P[1] = utils.leBuff2int(buff);
if (Scalar.gt(P[1], exports.p)) return null;
const y2 = F.square(P[1]);
let x = F.sqrt(F.div(
F.sub(F.one, y2),
F.sub(exports.A, F.mul(exports.D, y2))));
if (x == null) return null;
if (sign) x = F.neg(x);
P[0] = x;
return P;
}
| https://github.com/socathie/circomlib-ml |
test/modules/src/eddsa.js | const createBlakeHash = require("blake-hash");
const Scalar = require("ffjavascript").Scalar;
const F1Field = require("ffjavascript").F1Field;
const babyJub = require("./babyjub");
const utils = require("ffjavascript").utils;
const pedersenHash = require("./pedersenHash").hash;
const mimc7 = require("./mimc7");
const poseidon = require("./poseidon.js");
const mimcsponge = require("./mimcsponge");
exports.prv2pub= prv2pub;
exports.sign = sign;
exports.signMiMC = signMiMC;
exports.signPoseidon = signPoseidon;
exports.signMiMCSponge = signMiMCSponge;
exports.verify = verify;
exports.verifyMiMC = verifyMiMC;
exports.verifyPoseidon = verifyPoseidon;
exports.verifyMiMCSponge = verifyMiMCSponge;
exports.packSignature = packSignature;
exports.unpackSignature = unpackSignature;
exports.pruneBuffer = pruneBuffer;
function pruneBuffer(_buff) {
const buff = Buffer.from(_buff);
buff[0] = buff[0] & 0xF8;
buff[31] = buff[31] & 0x7F;
buff[31] = buff[31] | 0x40;
return buff;
}
function prv2pub(prv) {
const sBuff = pruneBuffer(createBlakeHash("blake512").update(prv).digest().slice(0,32));
let s = utils.leBuff2int(sBuff);
const A = babyJub.mulPointEscalar(babyJub.Base8, Scalar.shr(s,3));
return A;
}
function sign(prv, msg) {
const h1 = createBlakeHash("blake512").update(prv).digest();
const sBuff = pruneBuffer(h1.slice(0,32));
const s = utils.leBuff2int(sBuff);
const A = babyJub.mulPointEscalar(babyJub.Base8, Scalar.shr(s, 3));
const rBuff = createBlakeHash("blake512").update(Buffer.concat([h1.slice(32,64), msg])).digest();
let r = utils.leBuff2int(rBuff);
const Fr = new F1Field(babyJub.subOrder);
r = Fr.e(r);
const R8 = babyJub.mulPointEscalar(babyJub.Base8, r);
const R8p = babyJub.packPoint(R8);
const Ap = babyJub.packPoint(A);
const hmBuff = pedersenHash(Buffer.concat([R8p, Ap, msg]));
const hm = utils.leBuff2int(hmBuff);
const S = Fr.add(r , Fr.mul(hm, s));
return {
R8: R8,
S: S
};
}
function signMiMC(prv, msg) {
const h1 = createBlakeHash("blake512").update(prv).digest();
const sBuff = pruneBuffer(h1.slice(0,32));
const s = utils.leBuff2int(sBuff);
const A = babyJub.mulPointEscalar(babyJub.Base8, Scalar.shr(s, 3));
const msgBuff = utils.leInt2Buff(msg, 32);
const rBuff = createBlakeHash("blake512").update(Buffer.concat([h1.slice(32,64), msgBuff])).digest();
let r = utils.leBuff2int(rBuff);
const Fr = new F1Field(babyJub.subOrder);
r = Fr.e(r);
const R8 = babyJub.mulPointEscalar(babyJub.Base8, r);
const hm = mimc7.multiHash([R8[0], R8[1], A[0], A[1], msg]);
const S = Fr.add(r , Fr.mul(hm, s));
return {
R8: R8,
S: S
};
}
function signMiMCSponge(prv, msg) {
const h1 = createBlakeHash("blake512").update(prv).digest();
const sBuff = pruneBuffer(h1.slice(0,32));
const s = utils.leBuff2int(sBuff);
const A = babyJub.mulPointEscalar(babyJub.Base8, Scalar.shr(s, 3));
const msgBuff = utils.leInt2Buff(msg, 32);
const rBuff = createBlakeHash("blake512").update(Buffer.concat([h1.slice(32,64), msgBuff])).digest();
let r = utils.leBuff2int(rBuff);
const Fr = new F1Field(babyJub.subOrder);
r = Fr.e(r);
const R8 = babyJub.mulPointEscalar(babyJub.Base8, r);
const hm = mimcsponge.multiHash([R8[0], R8[1], A[0], A[1], msg]);
const S = Fr.add(r , Fr.mul(hm, s));
return {
R8: R8,
S: S
};
}
function signPoseidon(prv, msg) {
const h1 = createBlakeHash("blake512").update(prv).digest();
const sBuff = pruneBuffer(h1.slice(0,32));
const s = utils.leBuff2int(sBuff);
const A = babyJub.mulPointEscalar(babyJub.Base8, Scalar.shr(s, 3));
const msgBuff = utils.leInt2Buff(msg, 32);
const rBuff = createBlakeHash("blake512").update(Buffer.concat([h1.slice(32,64), msgBuff])).digest();
let r = utils.leBuff2int(rBuff);
const Fr = new F1Field(babyJub.subOrder);
r = Fr.e(r);
const R8 = babyJub.mulPointEscalar(babyJub.Base8, r);
const hm = poseidon([R8[0], R8[1], A[0], A[1], msg]);
const S = Fr.add(r , Fr.mul(hm, s));
return {
R8: R8,
S: S
};
}
function verify(msg, sig, A) {
// Check parameters
if (typeof sig != "object") return false;
if (!Array.isArray(sig.R8)) return false;
if (sig.R8.length!= 2) return false;
if (!babyJub.inCurve(sig.R8)) return false;
if (!Array.isArray(A)) return false;
if (A.length!= 2) return false;
if (!babyJub.inCurve(A)) return false;
if (sig.S>= babyJub.subOrder) return false;
const R8p = babyJub.packPoint(sig.R8);
const Ap = babyJub.packPoint(A);
const hmBuff = pedersenHash(Buffer.concat([R8p, Ap, msg]));
const hm = utils.leBuff2int(hmBuff);
const Pleft = babyJub.mulPointEscalar(babyJub.Base8, sig.S);
let Pright = babyJub.mulPointEscalar(A, Scalar.mul(hm,8));
Pright = babyJub.addPoint(sig.R8, Pright);
if (!babyJub.F.eq(Pleft[0],Pright[0])) return false;
if (!babyJub.F.eq(Pleft[1],Pright[1])) return false;
return true;
}
function verifyMiMC(msg, sig, A) {
// Check parameters
if (typeof sig != "object") return false;
if (!Array.isArray(sig.R8)) return false;
if (sig.R8.length!= 2) return false;
if (!babyJub.inCurve(sig.R8)) return false;
if (!Array.isArray(A)) return false;
if (A.length!= 2) return false;
if (!babyJub.inCurve(A)) return false;
if (sig.S>= babyJub.subOrder) return false;
const hm = mimc7.multiHash([sig.R8[0], sig.R8[1], A[0], A[1], msg]);
const Pleft = babyJub.mulPointEscalar(babyJub.Base8, sig.S);
let Pright = babyJub.mulPointEscalar(A, Scalar.mul(hm, 8));
Pright = babyJub.addPoint(sig.R8, Pright);
if (!babyJub.F.eq(Pleft[0],Pright[0])) return false;
if (!babyJub.F.eq(Pleft[1],Pright[1])) return false;
return true;
}
function verifyPoseidon(msg, sig, A) {
// Check parameters
if (typeof sig != "object") return false;
if (!Array.isArray(sig.R8)) return false;
if (sig.R8.length!= 2) return false;
if (!babyJub.inCurve(sig.R8)) return false;
if (!Array.isArray(A)) return false;
if (A.length!= 2) return false;
if (!babyJub.inCurve(A)) return false;
if (sig.S>= babyJub.subOrder) return false;
const hm = poseidon([sig.R8[0], sig.R8[1], A[0], A[1], msg]);
const Pleft = babyJub.mulPointEscalar(babyJub.Base8, sig.S);
let Pright = babyJub.mulPointEscalar(A, Scalar.mul(hm, 8));
Pright = babyJub.addPoint(sig.R8, Pright);
if (!babyJub.F.eq(Pleft[0],Pright[0])) return false;
if (!babyJub.F.eq(Pleft[1],Pright[1])) return false;
return true;
}
function verifyMiMCSponge(msg, sig, A) {
// Check parameters
if (typeof sig != "object") return false;
if (!Array.isArray(sig.R8)) return false;
if (sig.R8.length!= 2) return false;
if (!babyJub.inCurve(sig.R8)) return false;
if (!Array.isArray(A)) return false;
if (A.length!= 2) return false;
if (!babyJub.inCurve(A)) return false;
if (sig.S>= babyJub.subOrder) return false;
const hm = mimcsponge.multiHash([sig.R8[0], sig.R8[1], A[0], A[1], msg]);
const Pleft = babyJub.mulPointEscalar(babyJub.Base8, sig.S);
let Pright = babyJub.mulPointEscalar(A, hm.times(bigInt("8")));
Pright = babyJub.addPoint(sig.R8, Pright);
if (!babyJub.F.eq(Pleft[0],Pright[0])) return false;
if (!babyJub.F.eq(Pleft[1],Pright[1])) return false;
return true;
}
function packSignature(sig) {
const R8p = babyJub.packPoint(sig.R8);
const Sp = utils.leInt2Buff(sig.S, 32);
return Buffer.concat([R8p, Sp]);
}
function unpackSignature(sigBuff) {
return {
R8: babyJub.unpackPoint(sigBuff.slice(0,32)),
S: utils.leBuff2int(sigBuff.slice(32,64))
};
}
| https://github.com/socathie/circomlib-ml |
test/modules/src/evmasm.js | // Copyright (c) 2018 Jordi Baylina
// License: LGPL-3.0+
//
const Web3Utils = require("web3-utils");
class Contract {
constructor() {
this.code = [];
this.labels = {};
this.pendingLabels = {};
}
createTxData() {
let C;
// Check all labels are defined
const pendingLabels = Object.keys(this.pendingLabels);
if (pendingLabels.length>0) {
throw new Error("Lables not defined: "+ pendingLabels.join(", "));
}
let setLoaderLength = 0;
let genLoadedLength = -1;
while (genLoadedLength!=setLoaderLength) {
setLoaderLength = genLoadedLength;
C = new module.exports();
C.codesize();
C.push(setLoaderLength);
C.push(0);
C.codecopy();
C.push(this.code.length);
C.push(0);
C.return();
genLoadedLength = C.code.length;
}
return Web3Utils.bytesToHex(C.code.concat(this.code));
}
stop() { this.code.push(0x00); }
add() { this.code.push(0x01); }
mul() { this.code.push(0x02); }
sub() { this.code.push(0x03); }
div() { this.code.push(0x04); }
sdiv() { this.code.push(0x05); }
mod() { this.code.push(0x06); }
smod() { this.code.push(0x07); }
addmod() { this.code.push(0x08); }
mulmod() { this.code.push(0x09); }
exp() { this.code.push(0x0a); }
signextend() { this.code.push(0x0b); }
lt() { this.code.push(0x10); }
gt() { this.code.push(0x11); }
slt() { this.code.push(0x12); }
sgt() { this.code.push(0x13); }
eq() { this.code.push(0x14); }
iszero() { this.code.push(0x15); }
and() { this.code.push(0x16); }
or() { this.code.push(0x17); }
shor() { this.code.push(0x18); }
not() { this.code.push(0x19); }
byte() { this.code.push(0x1a); }
keccak() { this.code.push(0x20); }
sha3() { this.code.push(0x20); } // alias
address() { this.code.push(0x30); }
balance() { this.code.push(0x31); }
origin() { this.code.push(0x32); }
caller() { this.code.push(0x33); }
callvalue() { this.code.push(0x34); }
calldataload() { this.code.push(0x35); }
calldatasize() { this.code.push(0x36); }
calldatacopy() { this.code.push(0x37); }
codesize() { this.code.push(0x38); }
codecopy() { this.code.push(0x39); }
gasprice() { this.code.push(0x3a); }
extcodesize() { this.code.push(0x3b); }
extcodecopy() { this.code.push(0x3c); }
returndatasize() { this.code.push(0x3d); }
returndatacopy() { this.code.push(0x3e); }
blockhash() { this.code.push(0x40); }
coinbase() { this.code.push(0x41); }
timestamp() { this.code.push(0x42); }
number() { this.code.push(0x43); }
difficulty() { this.code.push(0x44); }
gaslimit() { this.code.push(0x45); }
pop() { this.code.push(0x50); }
mload() { this.code.push(0x51); }
mstore() { this.code.push(0x52); }
mstore8() { this.code.push(0x53); }
sload() { this.code.push(0x54); }
sstore() { this.code.push(0x55); }
_pushLabel(label) {
if (typeof this.labels[label] != "undefined") {
this.push(this.labels[label]);
} else {
this.pendingLabels[label] = this.pendingLabels[label] || [];
this.pendingLabels[label].push(this.code.length);
this.push("0x000000");
}
}
_fillLabel(label) {
if (!this.pendingLabels[label]) return;
let dst = this.labels[label];
const dst3 = [dst >> 16, (dst >> 8) & 0xFF, dst & 0xFF];
this.pendingLabels[label].forEach((p) => {
for (let i=0; i<3; i++) {
this.code[p+i+1] = dst3[i];
}
});
delete this.pendingLabels[label];
}
jmp(label) {
if (typeof label !== "undefined") {
this._pushLabel(label);
}
this.code.push(0x56);
}
jmpi(label) {
if (typeof label !== "undefined") {
this._pushLabel(label);
}
this.code.push(0x57);
}
pc() { this.code.push(0x58); }
msize() { this.code.push(0x59); }
gas() { this.code.push(0x5a); }
label(name) {
if (typeof this.labels[name] != "undefined") {
throw new Error("Label already defined");
}
this.labels[name] = this.code.length;
this.code.push(0x5b);
this._fillLabel(name);
}
push(data) {
if (typeof data === "number") {
let isNeg;
if (data<0) {
isNeg = true;
data = -data;
}
data = data.toString(16);
if (data.length % 2 == 1) data = "0" + data;
data = "0x" + data;
if (isNeg) data = "-"+data;
}
const d = Web3Utils.hexToBytes(Web3Utils.toHex(data));
if (d.length == 0 || d.length > 32) {
throw new Error("Assertion failed");
}
this.code = this.code.concat([0x5F + d.length], d);
}
dup(n) {
if (n < 0 || n >= 16) {
throw new Error("Assertion failed");
}
this.code.push(0x80 + n);
}
swap(n) {
if (n < 1 || n > 16) {
throw new Error("Assertion failed");
}
this.code.push(0x8f + n);
}
log0() { this.code.push(0xa0); }
log1() { this.code.push(0xa1); }
log2() { this.code.push(0xa2); }
log3() { this.code.push(0xa3); }
log4() { this.code.push(0xa4); }
create() { this.code.push(0xf0); }
call() { this.code.push(0xf1); }
callcode() { this.code.push(0xf2); }
return() { this.code.push(0xf3); }
delegatecall() { this.code.push(0xf4); }
staticcall() { this.code.push(0xfa); }
revert() { this.code.push(0xfd); }
invalid() { this.code.push(0xfe); }
selfdestruct() { this.code.push(0xff); }
}
module.exports = Contract;
| https://github.com/socathie/circomlib-ml |
test/modules/src/g2_gencontract.js | // Copyright (c) 2018 Jordi Baylina
// License: LGPL-3.0+
//
const Contract = require("./evmasm");
const G2 = require("snarkjs").bn128.G2;
function toHex256(a) {
let S = a.toString(16);
while (S.length < 64) S="0"+S;
return "0x" + S;
}
function createCode(P, w) {
const C = new Contract();
const NPOINTS = 1 << (w-1);
const VAR_POS = C.allocMem(32);
const VAR_POINTS = C.allocMem( (NPOINTS)*4*32);
const savedP = C.allocMem(32);
const savedZ3 = C.allocMem(32);
// Check selector
C.push("0x0100000000000000000000000000000000000000000000000000000000");
C.push(0);
C.calldataload();
C.div();
C.push("b65c7c74"); // mulexp(uint256)
C.eq();
C.jmpi("start");
C.invalid();
C.label("start");
storeVals();
C.push( Math.floor(255/w)*w ); // pos := 255
C.push(VAR_POS);
C.mstore();
C.push("21888242871839275222246405745257275088696311157297823662689037894645226208583");
C.push(0);
C.push(0);
C.push(0);
C.push(0);
C.push(0);
C.push(0);
C.label("begin_loop"); // ACC_X ACC_Y ACC_Z q
C.internalCall("double");
// g = (e>>pos)&MASK
C.push(4);
C.calldataload(); // e ACC_X ACC_Y ACC_Z q
C.push(VAR_POS);
C.mload(); // pos e ACC_X ACC_Y ACC_Z q
C.shr();
C.push(NPOINTS-1);
C.and(); // g ACC_X ACC_Y ACC_Z q
C.internalCall("add"); // acc_x acc_y acc_z
C.push(VAR_POS);
C.mload(); // pos acc_x acc_y acc_z
C.dup(0); // pos pos acc_x acc_y acc_z
C.push(0); // 0 pos pos acc_x acc_y acc_z
C.eq(); // eq pos acc_x acc_y acc_z
C.jmpi("after_loop"); // pos acc_x acc_y acc_z
C.push(w); // 5 pos acc_x acc_y acc_z
C.sub(); // pos acc_x acc_y acc_z
C.push(VAR_POS);
C.mstore(); // acc_x acc_y acc_z
C.jmp("begin_loop");
C.label("after_loop"); // pos acc_x acc_y acc_z
C.pop(); // acc_x acc_y acc_z
C.internalCall("affine"); // acc_x acc_y
C.push(0);
C.mstore();
C.push(20);
C.mstore();
C.push(40);
C.mstore();
C.push(60);
C.mstore();
C.push("0x80");
C.push("0x00");
C.return();
double();
addPoint();
affine();
return C.createTxData();
function add(a,b,q) {
C.dup(q);
C.dup(a+1 + 1);
C.dup(b+1 + 2);
C.addmod();
C.dup(q + 1);
C.dup(a + 2);
C.dup(b + 3);
C.addmod();
}
function sub(a,b,q) {
C.dup(q); // q
C.dup(a+1 + 1); // ai q
C.dub(q + 2); // q ai q
C.dup(b+1 + 3); // bi q ai q
C.sub(); // -bi ai q
C.addmod(); // ci
C.dup(q + 1); // q ci
C.dup(a + 2); // ar q ci
C.dup(q + 3); // q ar q ci
C.dup(b + 4); // br q ar q ci
C.sub(); // -br ar q ci
C.addmod(); // cr ci
}
function mul(a, b, q) {
C.dup(q); // q
C.dup(q + 1); // q q
C.dup(a + 2); // ar q q
C.dup(b+1 + 3); // bi ar q q
C.mulmod(); // ci1 q
C.dup(q + 2); // q ci1 q
C.dup(a+1 + 3); // ai q ci1 q
C.dup(b + 4); // ar ai q ci1 q
C.mulmod(); // ci2 ci1 q
C.addmod(); // ci
C.dup(q + 1); // q ci
C.dup(q + 2); // q q ci
C.dup(q + 3); // q q q ci
C.dup(a+1 + 4); // ai q q ci
C.dup(b+1 + 5); // bi ai q q ci
C.mulmod(); // cr2 q q ci
C.sub(); // -cr2 q ci
C.dup(q + 3); // q -cr2 q ci
C.dup(a + 4); // ar q -cr2 q ci
C.dup(b + 5); // br ar q -cr2 q ci
C.mulmod(); // cr1 -cr2 q ci
C.addmod(); // cr ci
}
function square(a, q) {
C.dup(q); // q
C.dup(q + 1); // q q
C.dup(a + 2); // ar q q
C.dup(a+1 + 3); // ai ar q q
C.mulmod(); // arai q
C.dup(0); // arai arai q
C.addmod(); // ci
C.dup(q + 1); // q ci
C.dup(q + 2); // q q ci
C.dup(q + 3); // q q q ci
C.dup(a+1 + 4); // ai q q ci
C.dup(a+1 + 5); // ai ai q q ci
C.mulmod(); // cr2 q q ci
C.sub(); // -cr2 q ci
C.dup(q + 3); // q -cr2 q ci
C.dup(a + 4); // ar q -cr2 q ci
C.dup(a + 5); // br ar q -cr2 q ci
C.mulmod(); // cr1 -cr2 q ci
C.addmod(); // cr ci
}
function add1(a, q) {
C.dup(a+1); // im
C.dup(1 + q); // q
C.dup(2 + a); // re q im
C.push(1); // 1 re q im
C.addmod();
}
function cmp(a, b) {
C.dup(a);
C.dup(b);
C.eq();
C.dup(a+1);
C.dup(a+1);
C.and();
}
function rm(a) {
if (a>0) C.swap(a);
C.pop();
if (a>0) C.swap(a);
C.pop();
}
function double() {
C.label("double"); // xR, xI, yR, yI, zR zI, q
C.dup(4);
C.iszero();
C.dup(6);
C.iszero();
C.and();
C.jumpi("enddouble"); // X Y Z q
// Z3 = 2*Y*Z // Remove Z
mul(2, 4, 6); // yz X Y Z q
rm(6); // X Y yz q
add(4, 4, 6); // 2yz X Y yz q
rm(6); // X Y Z3 q
// A = X^2
square(0,6); // A X Y Z3 q
// B = Y^2 // Remove Y
square(4,8); // B A X Y Z3 q
rm(6); // A X B Z3 q
// C = B^2
square(4,8); // C A X B Z3 q
// D = (X+B)^2-A-C // Remove X, Remove B
add(4,6, 10); // X+B C A X B Z3 q
rm(6); // C A X+B B Z3 q
rm(6); // A X+B C Z3 q
square(2,8); // (X+B)^2 A X+B C Z3 q
rm(4); // A (X+B)^2 C Z3 q
sub(2, 0, 8); // (X+B)^2-A A (X+B)^2 C Z3 q
rm(4); // A (X+B)^2-A C Z3 q
sub(2, 4, 8); // (X+B)^2-A-C A (X+B)^2-A C Z3 q
rm(4); // A D C Z3 q
// D = D+D
add(2,2, 8); // D+D A D C Z3 q
rm(4); // A D C Z3 q
// E=A+A+A
add(0, 0, 8); // 2A A D C Z3 q
add(0, 2, 10); // 3A 2A A D C Z3 q
rm(4); // 2A 3A D C Z3 q
rm(0); // E D C Z3 q
// F=E^2
square(0, 8); // F E D C Z3 q
// X3= F - 2*D // Remove F
add(4, 4, 10); // 2D F E D C Z3 q
sub(2, 0, 12); // F-2D 2D F E D C Z3 q
rm(4); // 2D X3 E D C Z3 q
rm(0); // X3 E D C Z3 q
// Y3 = E * (D - X3) - 8 * C // Remove D C E
sub(4, 0, 10); // D-X3 X3 E D C Z3 q
rm(6); // X3 E D-X3 C Z3 q
mul(2, 4, 10); // E*(D-X3) X3 E D-X3 C Z3 q
rm(6); // X3 E E*(D-X3) C Z3 q
rm(2); // X3 E*(D-X3) C Z3 q
add(4, 4, 8); // 2C X3 E*(D-X3) C Z3 q
rm(6); // X3 E*(D-X3) 2C Z3 q
add(4, 4, 8); // 4C X3 E*(D-X3) 2C Z3 q
rm(6); // X3 E*(D-X3) 4C Z3 q
add(4, 4, 8); // 8C X3 E*(D-X3) 4C Z3 q
rm(6); // X3 E*(D-X3) 8C Z3 q
sub(2, 4, 8); // E*(D-X3)-8C X3 E*(D-X3) 8C Z3 q
rm(6); // X3 E*(D-X3) Y3 Z3 q
rm(2); // X3 Y3 Z3 q
C.label("enddouble");
C.returnCall();
}
function addPoint() { // p, xR, xI, yR, yI, zR zI, q
C.dup(0); // p p X2 Y2 Z2 q
C.push(savedP);
C.mstore();
C.iszero(); // X2 Y2 Z2 q
C.jumpi("endpadd");
C.dup(4);
C.iszero();
C.dup(6);
C.iszero();
C.and();
C.jumpi("returnP"); // X2 Y2 Z2 q
// lastZ3 = (Z2+1)^2 - Z2^2
add1(4, 6); // Z2+1 X2 Y2 Z2 q
square(0, 8); // (Z2+1)^2 Z2+1 X2 Y2 Z2 q
rm(2); // (Z2+1)^2 X2 Y2 Z2 q
square(6, 8); // Z2^2 (Z2+1)^2 X2 Y2 Z2 q
sub(2, 0, 10); // (Z2+1)^2-Z2^2 Z2^2 (Z2+1)^2 X2 Y2 Z2 q
saveZ3(); // Z2^2 (Z2+1)^2 X2 Y2 Z2 q
rm(2); // Z2^2 X2 Y2 Z2 q
// U2 = X2
// S2 = Y2 // Z2^2 U2 S2 Z2 q
// U1 = X1 * Z2^2
loadX(); // X1 Z2^2 U2 S2 Z2 q
mul(0, 2, 10); // X1*Z2^2 X1 Z2^2 U2 S2 Z2 q
rm(2); // X1*Z2^2 Z2^2 U2 S2 Z2 q
mul(2, 8, 10); // Z2^3 U1 Z2^2 U2 S2 Z2 q
rm(4); // U1 Z2^3 U2 S2 Z2 q
rm(8); // Z2^3 U2 S2 U1 q
// S1 = Y1 * Z1^3
loadY(); // Y1 Z2^3 U2 S2 U1 q
mul(0, 2, 10); // S1 Y1 Z2^3 U2 S2 U1 q
rm(4); // Y1 S1 U2 S2 U1 q
rm(0); // S1 U2 S2 U1 q
cmp(0, 4); // c1 S1 U2 S2 U1 q
cmp(3, 7); // c2 c1 S1 U2 S2 U1 q
C.and(); // c2&c1 S1 U2 S2 U1 q
C.jumpi("double1"); // S1 U2 S2 U1 q
// Returns the double
// H = U2-U1 // Remove U2
C.sub(4, 8, 10); // H S1 U2 S2 U1 q
rm(4); // S1 H S2 U1 q
// // r = 2 * (S2-S1) // Remove S2
C.sub(4, 4, 8); // S1-S2 S1 H S2 U1 q
rm(6); // S1 H S1-S2 U1 q
C.add(4, 4, 8); // 2*(S1-S2) S1 H S1-S2 U1 q
rm(6); // S1 H r U1 q
// I = (2 * H)^2
C.add(2, 2, 8); // 2*H S1 H r U1 q
C.square(0, 10); // (2*H)^2 2*H S1 H r U1 q
rm(2); // I S1 H r U1 q
// V = U1 * I
mul(8, 0, 10); // V I S1 H r U1 q
rm(10); // I S1 H r V q
// J = H * I // Remove I
mul(4, 0, 10); // J I S1 H r V q
rm(2); // J S1 H r V q
// X3 = r^2 - J - 2 * V
// S1J2 = (S1*J)*2 // Remove S1
mul(2, 0, 10); // S1*J J S1 H r V q
rm(4); // J S1*J H r V q
add(2,2, 10); // (S1*J)*2 J S1*J H r V q
rm(4); // J S1J2 H r V q
// X3 = r^2 - J - 2 * V
square(6, 10); // r^2 J S1J2 H r V q
sub(0, 2, 12); // r^2-J r^2 J S1J2 H r V q
rm(2); // r^2-J J S1J2 H r V q
rm(2); // r^2-J S1J2 H r V q
add(8, 8, 10); // 2*V r^2-J S1J2 H r V q
sub(2, 0, 12); // r^2-J-2*V 2*V r^2-J S1J2 H r V q
rm(4); // 2*V X3 S1J2 H r V q
rm(0); // X3 S1J2 H r V q
// Y3 = r * (V-X3)-S1J2
sub(8, 0, 10); // V-X3 X3 S1J2 H r V q
rm(10); // X3 S1J2 H r V-X3 q
mul(6, 8, 10); // r*(V-X3) X3 S1J2 H r V-X3 q
rm(8); // X3 S1J2 H r*(V-X3) V-X3 q
rm(8); // S1J2 H r*(V-X3) X3 q
sub(4, 0, 8); // Y3 S1J2 H r*(V-X3) X3 q
rm(6); // S1J2 H Y3 X3 q
rm(0); // H Y3 X3 q
// Z3 = lastZ * H
loadZ3(); // lastZ3 H Y3 X3 q
mul(0, 2, 8); // Z3 lastZ3 H Y3 X3 q
rm(4); // lastZ3 Z3 Y3 X3 q
rm(0); // Z3 Y3 X3 q
C.swap(1);
C.swap(5);
C.swap(1);
C.swap(4); // X3 Y3 Z3 q
// returns the point in memory
C.label("returnP"); // X Y Z q
rm(0);
rm(0);
rm(0);
C.push(0);
C.push(1);
loadX();
loadY();
C.jump("endpadd");
C.label("double1"); // S1 U2 S2 U1 q
rm(0);
rm(0);
rm(0);
rm(0);
C.push(0);
C.push(1);
loadX();
loadY();
C.jump("double");
C.label("endpadd");
C.returnCall();
function loadX() {
C.push(savedP);
C.mload(); // p
C.push(32);
C.mul(); // P*32
C.push(VAR_POINTS+32);
C.add(); // P*32+32
C.dup(); // P*32+32 P*32+32
C.mload(); // im P*32+32
C.swap(1); // P*32+32 im
C.push(0x20); // 32 P*32+32 im
C.sub(); // P*32 im
C.mload(); // re im
}
function loadY() {
C.push(savedP);
C.mload(); // p
C.push(32);
C.mul(); // P*32
C.push(VAR_POINTS+32*3);
C.add(); // P*32+32
C.dup(); // P*32+32 P*32+32
C.mload(); // im P*32+32
C.swap(1); // P*32+32 im
C.push(0x20); // 32 P*32+32 im
C.sub(); // P*32 im
C.mload(); // re im
}
function loadZ3() {
C.push(savedZ3+32);
C.mload(); // p
C.push(savedZ3);
C.mload();
}
function saveZ3() {
C.push(savedZ3);
C.mstore();
C.push(savedZ3+32);
C.mstore();
}
}
function affine() { // X Y Z q
// If Z2=0 return 0
C.label("affine");
C.dup(4);
C.dup(5 + 1);
C.or();
C.jumpi("notZero"); // X Y Z q
rm(0);
rm(0);
C.push(0);
C.push(0);
C.jmp("endAffine");
C.label("notZero");
inverse2(4,6); // Z_inv X Y Z q
square(2, 8); // Z2_inv Z_inv X Y Z q
mul(0, 2, 10); // Z3_inv Z2_inv Z_inv X Y Z q
rm(4); // Z2_inv Z3_inv X Y Z q
C.push(1);
C.push(0); // 1 Z2_inv Z3_inv X Y Z q
rm(10); // Z2_inv Z3_inv X Y 1 q
mul(2, 6, 10); // YI Z2_inv Z3_inv X Y 1 q
rm(8); // Z2_inv Z3_inv X YI 1 q
mul(0, 4, 10); // XI Z2_inv Z3_inv X YI 1 q
rm(6); // Z2_inv Z3_inv XI YI 1 q
rm(0); // Z3_inv XI YI 1 q
rm(0); // XI YI 1 q
C.label("endAffine");
C.returnCall();
}
function inverse2(a, q) {
C.dup(q); // q
C.dup(q + 1); // q q
C.push(2); // 2 q q
C.sub(); // q-2 q
C.dup(q + 2); // q q-2 q
C.dup(q + 3); // q q q-2 q
C.dup(a + 4); // ar q q q-2 q
C.dup(a + 5); // ar ar q q q-2 q
C.mulmod(); // t0 q q-2 q
C.dup(q + 4); // q t0 q q-2 q
C.dup(a+1 + 5); // ai q t0 q q-2 q
C.dup(a+1 + 6); // ai ai q t0 q q-2 q
C.mulmod(); // t1 t0 q q-2 q
C.addmod(); // t2 q-2 q
C.expmod(); // t3
C.dup(q + 1); // q t3
C.dup(q + 2); // q q t3
C.dup(q + 3); // q q q t3
C.dup(1); // t3 q q q t3
C.sub(); // -t3 q q t3
C.dup(a+1 + 3); // ai -t3 q q t3
C.mulmod(); // ii q t3
C.swap(2); // t3 q ii
C.dup(a + 3); // ar t3 q ii
C.mulmod(); // ir ii
}
function storeVals() {
C.push(VAR_POINTS); // p
for (let i=0; i<NPOINTS; i++) {
const MP = G2.affine(G2.mulScalar(P, i));
for (let j=0; j<2; j++) {
for (let k=0; k<2; k++) {
C.push(toHex256(MP[j][k])); // MP[0][0] p
C.dup(1); // p MP[0][0] p
C.mstore(); // p
C.push(32); // 32 p
C.add(); // p+32
}
}
}
}
}
module.exports.abi = [
{
"constant": true,
"inputs": [
{
"name": "escalar",
"type": "uint256"
}
],
"name": "mulexp",
"outputs": [
{
"name": "",
"type": "uint256"
},
{
"name": "",
"type": "uint256"
}
],
"payable": false,
"stateMutability": "pure",
"type": "function"
}
];
module.exports.createCode = createCode;
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimc7.js | const Scalar = require("ffjavascript").Scalar;
const ZqField = require("ffjavascript").ZqField;
const Web3Utils = require("web3-utils");
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
exports.F = F;
const SEED = "mimc";
const NROUNDS = 91;
exports.getIV = (seed) => {
if (typeof seed === "undefined") seed = SEED;
const c = Web3Utils.keccak256(seed+"_iv");
const cn = Scalar.FromString(Web3Utils.toBN(c).toString());
const iv = cn.mod(F.p);
return iv;
};
exports.getConstants = (seed, nRounds) => {
if (typeof seed === "undefined") seed = SEED;
if (typeof nRounds === "undefined") nRounds = NROUNDS;
const cts = new Array(nRounds);
let c = Web3Utils.keccak256(SEED);
for (let i=1; i<nRounds; i++) {
c = Web3Utils.keccak256(c);
const n1 = Web3Utils.toBN(c).mod(Web3Utils.toBN(F.p.toString()));
const c2 = Web3Utils.padLeft(Web3Utils.toHex(n1), 64);
cts[i] = Scalar.fromString(Web3Utils.toBN(c2).toString());
}
cts[0] = F.e(0);
return cts;
};
const cts = exports.getConstants(SEED, 91);
exports.hash = (_x_in, _k) =>{
const x_in = F.e(_x_in);
const k = F.e(_k);
let r;
for (let i=0; i<NROUNDS; i++) {
const c = cts[i];
const t = (i==0) ? F.add(x_in, k) : F.add(F.add(r, k), c);
r = F.pow(t, 7);
}
return F.add(r, k);
};
exports.multiHash = (arr, key) => {
let r;
if (typeof(key) === "undefined") {
r = F.zero;
} else {
r = key;
}
for (let i=0; i<arr.length; i++) {
r = F.add(
F.add(
r,
arr[i]
),
exports.hash(F.e(arr[i]), r)
);
}
return r;
};
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimc_gencontract.js | // Copyright (c) 2018 Jordi Baylina
// License: LGPL-3.0+
//
const Web3Utils = require("web3-utils");
const Contract = require("./evmasm");
function createCode(seed, n) {
let ci = Web3Utils.keccak256(seed);
const C = new Contract();
C.push(0x44);
C.push("0x00");
C.push("0x00");
C.calldatacopy();
C.push("0x0100000000000000000000000000000000000000000000000000000000");
C.push("0x00");
C.mload();
C.div();
C.push("0xd15ca109"); // MiMCpe7(uint256,uint256)
// C.push("0x8c42199e"); // MiMCpe7(uint256,uint256,uint256)
C.eq();
C.jmpi("start");
C.invalid();
C.label("start");
C.push("0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); // q
C.push("0x24");
C.mload(); // k q
C.dup(1); // q k q
C.dup(0); // q q k q
C.push("0x04");
C.mload(); // x q q k q
C.dup(3); // k x q q k q
C.addmod(); // t=x+k q k q
C.dup(1); // q t q k q
C.dup(0); // q q t q k q
C.dup(2); // t q q t q k q
C.dup(0); // t t q q t q k q
C.mulmod(); // a=t^2 q t q k q
C.dup(1); // q a q t q k q
C.dup(1); // a q a q t q k q
C.dup(0); // a a q a q t q k q
C.mulmod(); // b=t^4 a q t q k q
C.mulmod(); // c=t^6 t q k q
C.mulmod(); // r=t^7 k q
for (let i=0; i<n-1; i++) {
ci = Web3Utils.keccak256(ci);
C.dup(2); // q r k q
C.dup(0); // q q r k q
C.dup(0); // q q q r k q
C.swap(3); // r q q q k q
C.push(ci); // c r q q k q
C.addmod(); // s=c+r q q k q
C.dup(3); // k s q q k q
C.addmod(); // t=s+k q k q
C.dup(1); // q t q k q
C.dup(0); // q q t q k q
C.dup(2); // t q q t q k q
C.dup(0); // t t q q t q k q
C.mulmod(); // a=t^2 q t q k q
C.dup(1); // q a q t q k q
C.dup(1); // a q a q t q k q
C.dup(0); // a a q a q t q k q
C.mulmod(); // b=t^4 a q t q k q
C.mulmod(); // c=t^6 t q k q
C.mulmod(); // r=t^7 k q
}
C.addmod(); // res=t^7+k
C.push("0x00");
C.mstore(); // Save it to pos 0;
C.push("0x20");
C.push("0x00");
C.return();
return C.createTxData();
}
module.exports.abi = [
{
"constant": true,
"inputs": [
{
"name": "in_x",
"type": "uint256"
},
{
"name": "in_k",
"type": "uint256"
}
],
"name": "MiMCpe7",
"outputs": [
{
"name": "out_x",
"type": "uint256"
}
],
"payable": false,
"stateMutability": "pure",
"type": "function"
}
];
module.exports.createCode = createCode;
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimc_print_iv.js | const mimc7 = require("./mimc7.js");
console.log("IV: "+mimc7.getIV().toString());
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimc_printconstants.js | const mimc7 = require("./mimc7.js");
const nRounds = 91;
let S = "[\n";
const cts = mimc7.getConstants();
for (let i=0; i<nRounds; i++) {
S = S + cts[i].toString();
if (i<nRounds-1) S = S + ",";
S=S+"\n";
}
S = S + "]\n";
console.log(S);
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimc_printcontract.js | const mimcGenContract = require("./mimc_gencontract");
const SEED = "mimc";
let nRounds;
if (typeof process.argv[2] != "undefined") {
nRounds = parseInt(process.argv[2]);
} else {
nRounds = 91;
}
console.log(mimcGenContract.createCode(SEED, nRounds));
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimcsponge.js | const Scalar = require("ffjavascript").Scalar
const Web3Utils = require("web3-utils");
const ZqField = require("ffjavascript").ZqField;
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
const SEED = "mimcsponge";
const NROUNDS = 220;
exports.getIV = (seed) => {
if (typeof seed === "undefined") seed = SEED;
const c = Web3Utils.keccak256(seed+"_iv");
const cn = Scalar.fromString(Web3Utils.toBN(c).toString());
const iv = cn.mod(F.p);
return iv;
};
exports.getConstants = (seed, nRounds) => {
if (typeof seed === "undefined") seed = SEED;
if (typeof nRounds === "undefined") nRounds = NROUNDS;
const cts = new Array(nRounds);
let c = Web3Utils.keccak256(SEED);
for (let i=1; i<nRounds; i++) {
c = Web3Utils.keccak256(c);
const n1 = Web3Utils.toBN(c).mod(Web3Utils.toBN(F.p.toString()));
const c2 = Web3Utils.padLeft(Web3Utils.toHex(n1), 64);
cts[i] = F.e(Web3Utils.toBN(c2).toString());
}
cts[0] = F.e(0);
cts[cts.length - 1] = F.e(0);
return cts;
};
const cts = exports.getConstants(SEED, NROUNDS);
exports.hash = (_xL_in, _xR_in, _k) =>{
let xL = F.e(_xL_in);
let xR = F.e(_xR_in);
const k = F.e(_k);
for (let i=0; i<NROUNDS; i++) {
const c = cts[i];
const t = (i==0) ? F.add(xL, k) : F.add(F.add(xL, k), c);
const xR_tmp = F.e(xR);
if (i < (NROUNDS - 1)) {
xR = xL;
xL = F.add(xR_tmp, F.pow(t, 5));
} else {
xR = F.add(xR_tmp, F.pow(t, 5));
}
}
return {
xL: F.normalize(xL),
xR: F.normalize(xR),
};
};
exports.multiHash = (arr, key, numOutputs) => {
if (typeof(numOutputs) === "undefined") {
numOutputs = 1;
}
if (typeof(key) === "undefined") {
key = F.zero;
}
let R = F.zero;
let C = F.zero;
for (let i=0; i<arr.length; i++) {
R = F.add(R, F.e(arr[i]));
const S = exports.hash(R, C, key);
R = S.xL;
C = S.xR;
}
let outputs = [R];
for (let i=1; i < numOutputs; i++) {
const S = exports.hash(R, C, key);
R = S.xL;
C = S.xR;
outputs.push(R);
}
if (numOutputs == 1) {
return F.normalize(outputs[0]);
} else {
return outputs.map(x => F.normalize(x));
}
};
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimcsponge_gencontract.js | // Copyright (c) 2018 Jordi Baylina
// License: LGPL-3.0+
//
const Web3Utils = require("web3-utils");
const Contract = require("./evmasm");
function createCode(seed, n) {
let ci = Web3Utils.keccak256(seed);
const C = new Contract();
C.push(0x64);
C.push("0x00");
C.push("0x00");
C.calldatacopy();
C.push("0x0100000000000000000000000000000000000000000000000000000000");
C.push("0x00");
C.mload();
C.div();
C.push("0x3f1a1187"); // MiMCSponge(uint256,uint256,uint256)
C.eq();
C.jmpi("start");
C.invalid();
C.label("start");
C.push("0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); // q
C.push("0x44");
C.mload(); // k q
C.push("0x04");
C.mload(); // xL k q
C.dup(2); // q xL k q
C.push("0x24");
C.mload(); // xR q xL k q
C.dup(1); // q xR q xL k q
C.dup(0); // q q xR q xL k q
C.dup(4); // xL q q xR q xL k q
C.dup(6); // k xL q q xR q xL k q
C.addmod(); // t=k+xL q xR q xL k q
C.dup(1); // q t q xR q xL k q
C.dup(0); // q q t q xR q xL k q
C.dup(2); // t q q t q xR q xL k q
C.dup(0); // t t q q t q xR q xL k q
C.mulmod(); // b=t^2 q t q xR q xL k q
C.dup(0); // b b q t q xR q xL k q
C.mulmod(); // c=t^4 t q xR q xL k q
C.mulmod(); // d=t^5 xR q xL k q
C.addmod(); // e=t^5+xR xL k q (for next round: xL xR k q)
for (let i=0; i<n-1; i++) {
if (i < n-2) {
ci = Web3Utils.keccak256(ci);
} else {
ci = "0x00";
}
C.swap(1); // xR xL k q
C.dup(3); // q xR xL k q
C.dup(3); // k q xR xL k q
C.dup(1); // q k q xR xL k q
C.dup(4); // xL q k q xR xL k q
C.push(ci); // ci xL q k q xR xL k q
C.addmod(); // a=ci+xL k q xR xL k q
C.addmod(); // t=a+k xR xL k q
C.dup(4); // q t xR xL k q
C.swap(1); // t q xR xL k q
C.dup(1); // q t q xR xL k q
C.dup(0); // q q t q xR xL k q
C.dup(2); // t q q t q xR xL k q
C.dup(0); // t t q q t q xR xL k q
C.mulmod(); // b=t^2 q t q xR xL k q
C.dup(0); // b b q t q xR xL k q
C.mulmod(); // c=t^4 t q xR xL k q
C.mulmod(); // d=t^5 xR xL k q
C.dup(4); // q d xR xL k q
C.swap(2); // xR d q xL k q
C.addmod(); // e=t^5+xR xL k q (for next round: xL xR k q)
}
C.push("0x20");
C.mstore(); // Save it to pos 0;
C.push("0x00");
C.mstore(); // Save it to pos 1;
C.push("0x40");
C.push("0x00");
C.return();
return C.createTxData();
}
module.exports.abi = [
{
"constant": true,
"inputs": [
{
"name": "xL_in",
"type": "uint256"
},
{
"name": "xR_in",
"type": "uint256"
},
{
"name": "k",
"type": "uint256"
}
],
"name": "MiMCSponge",
"outputs": [
{
"name": "xL",
"type": "uint256"
},
{
"name": "xR",
"type": "uint256"
}
],
"payable": false,
"stateMutability": "pure",
"type": "function"
}
];
module.exports.createCode = createCode;
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimcsponge_printconstants.js | const mimcsponge = require("./mimcsponge.js");
const nRounds = 220;
let S = "[\n";
const cts = mimcsponge.getConstants();
for (let i=0; i<nRounds; i++) {
S = S + cts[i].toString();
if (i<nRounds-1) S = S + ",";
S=S+"\n";
}
S = S + "]\n";
console.log(S);
| https://github.com/socathie/circomlib-ml |
test/modules/src/mimcsponge_printcontract.js | const mimcGenContract = require("./mimcsponge_gencontract");
const SEED = "mimcsponge";
let nRounds;
if (typeof process.argv[2] != "undefined") {
nRounds = parseInt(process.argv[2]);
} else {
nRounds = 220;
}
console.log(mimcGenContract.createCode(SEED, nRounds));
| https://github.com/socathie/circomlib-ml |
test/modules/src/pedersenHash.js | const babyJub = require("./babyjub");
const createBlakeHash = require("blake-hash");
const blake2b = require("blake2b");
const Scalar = require("ffjavascript").Scalar;
const GENPOINT_PREFIX = "PedersenGenerator";
const windowSize = 4;
const nWindowsPerSegment = 50;
exports.hash = pedersenHash;
exports.getBasePoint = getBasePoint;
function baseHash(type, S) {
if (type == "blake") {
return createBlakeHash("blake256").update(S).digest();
} else if (type == "blake2b") {
return Buffer.from(blake2b(32).update(Buffer.from(S)).digest());
}
}
function pedersenHash(msg, options) {
options = options || {};
options.baseHash = options.baseHash || "blake";
const bitsPerSegment = windowSize*nWindowsPerSegment;
const bits = buffer2bits(msg);
const nSegments = Math.floor((bits.length - 1)/(windowSize*nWindowsPerSegment)) +1;
let accP = [babyJub.F.zero,babyJub.F.one];
for (let s=0; s<nSegments; s++) {
let nWindows;
if (s == nSegments-1) {
nWindows = Math.floor(((bits.length - (nSegments - 1)*bitsPerSegment) - 1) / windowSize) +1;
} else {
nWindows = nWindowsPerSegment;
}
let escalar = Scalar.e(0);
let exp = Scalar.e(1);
for (let w=0; w<nWindows; w++) {
let o = s*bitsPerSegment + w*windowSize;
let acc = Scalar.e(1);
for (let b=0; ((b<windowSize-1)&&(o<bits.length)) ; b++) {
if (bits[o]) {
acc = Scalar.add(acc, Scalar.shl(Scalar.e(1), b) );
}
o++;
}
if (o<bits.length) {
if (bits[o]) {
acc = Scalar.neg(acc);
}
o++;
}
escalar = Scalar.add(escalar, Scalar.mul(acc, exp));
exp = Scalar.shl(exp, windowSize+1);
}
if (Scalar.lt(escalar, 0)) {
escalar = Scalar.add( escalar, babyJub.subOrder);
}
accP = babyJub.addPoint(accP, babyJub.mulPointEscalar(getBasePoint(options.baseHash, s), escalar));
}
return babyJub.packPoint(accP);
}
let bases = [];
function getBasePoint(baseHashType, pointIdx) {
if (pointIdx<bases.length) return bases[pointIdx];
let p= null;
let tryIdx = 0;
while (p==null) {
const S = GENPOINT_PREFIX + "_" + padLeftZeros(pointIdx, 32) + "_" + padLeftZeros(tryIdx, 32);
const h = baseHash(baseHashType, S);
h[31] = h[31] & 0xBF; // Set 255th bit to 0 (256th is the signal and 254th is the last possible bit to 1)
p = babyJub.unpackPoint(h);
tryIdx++;
}
const p8 = babyJub.mulPointEscalar(p, 8);
if (!babyJub.inSubgroup(p8)) {
throw new Error("Point not in curve");
}
bases[pointIdx] = p8;
return p8;
}
function padLeftZeros(idx, n) {
let sidx = "" + idx;
while (sidx.length<n) sidx = "0"+sidx;
return sidx;
}
/*
Input a buffer
Returns an array of booleans. 0 is LSB of first byte and so on.
*/
function buffer2bits(buff) {
const res = new Array(buff.length*8);
for (let i=0; i<buff.length; i++) {
const b = buff[i];
res[i*8] = b & 0x01;
res[i*8+1] = b & 0x02;
res[i*8+2] = b & 0x04;
res[i*8+3] = b & 0x08;
res[i*8+4] = b & 0x10;
res[i*8+5] = b & 0x20;
res[i*8+6] = b & 0x40;
res[i*8+7] = b & 0x80;
}
return res;
}
| https://github.com/socathie/circomlib-ml |
test/modules/src/pedersen_printbases.js | const pedersenHash = require("./pedersenHash.js");
let nBases;
if (typeof process.argv[2] != "undefined") {
nBases = parseInt(process.argv[2]);
} else {
nBases = 5;
}
let baseHash;
if (typeof process.argv[3] != "undefined") {
baseHash = process.argv[3];
} else {
baseHash = "blake";
}
for (let i=0; i < nBases; i++) {
const p = pedersenHash.getBasePoint(baseHash, i);
console.log(`[${p[0]},${p[1]}]`);
}
| https://github.com/socathie/circomlib-ml |
test/modules/src/poseidon.js | const assert = require("assert");
const Scalar = require("ffjavascript").Scalar;
const ZqField = require("ffjavascript").ZqField;
const { unstringifyBigInts } = require("ffjavascript").utils;
// Prime 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
// Parameters are generated by a reference script https://extgit.iaik.tugraz.at/krypto/hadeshash/-/blob/master/code/generate_parameters_grain.sage
// Used like so: sage generate_parameters_grain.sage 1 0 254 2 8 56 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
const { C, M } = unstringifyBigInts(require("./poseidon_constants.json"));
// Using recommended parameters from whitepaper https://eprint.iacr.org/2019/458.pdf (table 2, table 8)
// Generated by https://extgit.iaik.tugraz.at/krypto/hadeshash/-/blob/master/code/calc_round_numbers.py
// And rounded up to nearest integer that divides by t
const N_ROUNDS_F = 8;
const N_ROUNDS_P = [56, 57, 56, 60, 60, 63, 64, 63];
const pow5 = a => F.mul(a, F.square(F.square(a, a)));
function poseidon(inputs) {
assert(inputs.length > 0);
assert(inputs.length < N_ROUNDS_P.length - 1);
const t = inputs.length + 1;
const nRoundsF = N_ROUNDS_F;
const nRoundsP = N_ROUNDS_P[t - 2];
let state = [F.zero, ...inputs.map(a => F.e(a))];
for (let r = 0; r < nRoundsF + nRoundsP; r++) {
state = state.map((a, i) => F.add(a, C[t - 2][r * t + i]));
if (r < nRoundsF / 2 || r >= nRoundsF / 2 + nRoundsP) {
state = state.map(a => pow5(a));
} else {
state[0] = pow5(state[0]);
}
state = state.map((_, i) =>
state.reduce((acc, a, j) => F.add(acc, F.mul(M[t - 2][i][j], a)), F.zero)
);
}
return F.normalize(state[0]);
}
module.exports = poseidon;
| https://github.com/socathie/circomlib-ml |
test/modules/src/poseidon_gencontract.js | // Copyright (c) 2018 Jordi Baylina
// License: LGPL-3.0+
//
const Contract = require("./evmasm");
const { unstringifyBigInts } = require("ffjavascript").utils;
const Web3Utils = require("web3-utils");
const { C:K, M } = unstringifyBigInts(require("./poseidon_constants.json"));
const N_ROUNDS_F = 8;
const N_ROUNDS_P = [56, 57, 56, 60, 60, 63, 64, 63];
function toHex256(a) {
let S = a.toString(16);
while (S.length < 64) S="0"+S;
return "0x" + S;
}
function createCode(nInputs) {
if (( nInputs<1) || (nInputs>8)) throw new Error("Invalid number of inputs. Must be 1<=nInputs<=8");
const t = nInputs + 1;
const nRoundsF = N_ROUNDS_F;
const nRoundsP = N_ROUNDS_P[t - 2];
const C = new Contract();
function saveM() {
for (let i=0; i<t; i++) {
for (let j=0; j<t; j++) {
C.push(toHex256(M[t-2][i][j]));
C.push((1+i*t+j)*32);
C.mstore();
}
}
}
function ark(r) { // st, q
for (let i=0; i<t; i++) {
C.dup(t); // q, st, q
C.push(toHex256(K[t-2][r*t+i])); // K, q, st, q
C.dup(2+i); // st[i], K, q, st, q
C.addmod(); // newSt[i], st, q
C.swap(1 + i); // xx, st, q
C.pop();
}
}
function sigma(p) {
// sq, q
C.dup(t); // q, st, q
C.dup(1+p); // st[p] , q , st, q
C.dup(1); // q, st[p] , q , st, q
C.dup(0); // q, q, st[p] , q , st, q
C.dup(2); // st[p] , q, q, st[p] , q , st, q
C.dup(0); // st[p] , st[p] , q, q, st[p] , q , st, q
C.mulmod(); // st2[p], q, st[p] , q , st, q
C.dup(0); // st2[p], st2[p], q, st[p] , q , st, q
C.mulmod(); // st4[p], st[p] , q , st, q
C.mulmod(); // st5[p], st, q
C.swap(1+p);
C.pop(); // newst, q
}
function mix() {
C.label("mix");
for (let i=0; i<t; i++) {
for (let j=0; j<t; j++) {
if (j==0) {
C.dup(i+t); // q, newSt, oldSt, q
C.push((1+i*t+j)*32);
C.mload(); // M, q, newSt, oldSt, q
C.dup(2+i+j); // oldSt[j], M, q, newSt, oldSt, q
C.mulmod(); // acc, newSt, oldSt, q
} else {
C.dup(1+i+t); // q, acc, newSt, oldSt, q
C.push((1+i*t+j)*32);
C.mload(); // M, q, acc, newSt, oldSt, q
C.dup(3+i+j); // oldSt[j], M, q, acc, newSt, oldSt, q
C.mulmod(); // aux, acc, newSt, oldSt, q
C.dup(2+i+t); // q, aux, acc, newSt, oldSt, q
C.swap(2); // acc, aux, q, newSt, oldSt, q
C.addmod(); // acc, newSt, oldSt, q
}
}
}
for (let i=0; i<t; i++) {
C.swap((t -i) + (t -i-1));
C.pop();
}
C.push(0);
C.mload();
C.jmp();
}
// Check selector
C.push("0x0100000000000000000000000000000000000000000000000000000000");
C.push(0);
C.calldataload();
C.div();
C.dup(0);
C.push(Web3Utils.keccak256(`poseidon(uint256[${nInputs}])`).slice(0, 10)); // poseidon(uint256[n])
C.eq();
C.swap(1);
C.push(Web3Utils.keccak256(`poseidon(bytes32[${nInputs}])`).slice(0, 10)); // poseidon(bytes32[n])
C.eq();
C.or();
C.jmpi("start");
C.invalid();
C.label("start");
saveM();
C.push("0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"); // q
// Load t values from the call data.
// The function has a single array param param
// [Selector (4)] [item1 (32)] [item2 (32)] ....
// Stack positions 0-nInputs.
for (let i=0; i<nInputs; i++) {
C.push(0x04+(0x20*(nInputs-i-1)));
C.calldataload();
}
C.push(0);
for (let i=0; i<nRoundsF+nRoundsP; i++) {
ark(i);
if ((i<nRoundsF/2) || (i>=nRoundsP+nRoundsF/2)) {
for (let j=0; j<t; j++) {
sigma(j);
}
} else {
sigma(0);
}
const strLabel = "aferMix"+i;
C._pushLabel(strLabel);
C.push(0);
C.mstore();
C.jmp("mix");
C.label(strLabel);
}
C.push("0x00");
C.mstore(); // Save it to pos 0;
C.push("0x20");
C.push("0x00");
C.return();
mix();
return C.createTxData();
}
function generateABI(nInputs) {
return [
{
"constant": true,
"inputs": [
{
"internalType": `bytes32[${nInputs}]`,
"name": "input",
"type": `bytes32[${nInputs}]`
}
],
"name": "poseidon",
"outputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"payable": false,
"stateMutability": "pure",
"type": "function"
},
{
"constant": true,
"inputs": [
{
"internalType": `uint256[${nInputs}]`,
"name": "input",
"type": `uint256[${nInputs}]`
}
],
"name": "poseidon",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"payable": false,
"stateMutability": "pure",
"type": "function"
}
];
}
module.exports.generateABI = generateABI;
module.exports.createCode = createCode;
| https://github.com/socathie/circomlib-ml |
test/modules/src/poseidon_printcontract.js | const poseidonGenContract = require("./poseidon_gencontract");
if (process.argv.length != 3) {
console.log("Usage: node poseidon_gencontract.js [numberOfInputs]");
process.exit(1);
}
const nInputs = Number(process.argv[2]);
console.log(nInputs);
console.log(poseidonGenContract.createCode(nInputs));
| https://github.com/socathie/circomlib-ml |
test/modules/src/poseidon_printmatrix.js |
const Poseidon = require("./poseidon.js");
const M = Poseidon.getMatrix();
let S = "[\n ";
for (let i=0; i<M.length; i++) {
const LC = M[i];
S = S + "[\n";
for (let j=0; j<LC.length; j++) {
S = S + " " + M[i][j].toString();
if (j<LC.length-1) S = S + ",";
S = S + "\n";
}
S = S + " ]";
if (i<M.length-1) S = S + ",";
}
S=S+ "\n]\n";
console.log(S);
| https://github.com/socathie/circomlib-ml |
test/modules/src/smt.js | const Scalar = require("ffjavascript").Scalar;
const SMTMemDB = require("./smt_memdb");
const {hash0, hash1, F} = require("./smt_hashes_poseidon");
class SMT {
constructor(db, root) {
this.db = db;
this.root = root;
}
_splitBits(_key) {
const res = Scalar.bits(_key);
while (res.length<256) res.push(false);
return res;
}
async update(_key, _newValue) {
const key = Scalar.e(_key);
const newValue = F.e(_newValue);
const resFind = await this.find(key);
const res = {};
res.oldRoot = this.root;
res.oldKey = key;
res.oldValue = resFind.foundValue;
res.newKey = key;
res.newValue = newValue;
res.siblings = resFind.siblings;
const ins = [];
const dels = [];
let rtOld = hash1(key, resFind.foundValue);
let rtNew = hash1(key, newValue);
ins.push([rtNew, [1, key, newValue ]]);
dels.push(rtOld);
const keyBits = this._splitBits(key);
for (let level = resFind.siblings.length-1; level >=0; level--) {
let oldNode, newNode;
const sibling = resFind.siblings[level];
if (keyBits[level]) {
oldNode = [sibling, rtOld];
newNode = [sibling, rtNew];
} else {
oldNode = [rtOld, sibling];
newNode = [rtNew, sibling];
}
rtOld = hash0(oldNode[0], oldNode[1]);
rtNew = hash0(newNode[0], newNode[1]);
dels.push(rtOld);
ins.push([rtNew, newNode]);
}
res.newRoot = rtNew;
await this.db.multiDel(dels);
await this.db.multiIns(ins);
await this.db.setRoot(rtNew);
this.root = rtNew;
return res;
}
async delete(_key) {
const key = Scalar.e(_key);
const resFind = await this.find(key);
if (!resFind.found) throw new Error("Key does not exists");
const res = {
siblings: [],
delKey: key,
delValue: resFind.foundValue
};
const dels = [];
const ins = [];
let rtOld = hash1(key, resFind.foundValue);
let rtNew;
dels.push(rtOld);
let mixed;
if (resFind.siblings.length > 0) {
const record = await this.db.get(resFind.siblings[resFind.siblings.length - 1]);
if ((record.length == 3)&&(F.eq(record[0], F.one))) {
mixed = false;
res.oldKey = record[1];
res.oldValue = record[2];
res.isOld0 = false;
rtNew = resFind.siblings[resFind.siblings.length - 1];
} else if (record.length == 2) {
mixed = true;
res.oldKey = key;
res.oldValue = F.zero;
res.isOld0 = true;
rtNew = F.zero;
} else {
throw new Error("Invalid node. Database corrupted");
}
} else {
rtNew = F.zero;
res.oldKey = key;
res.oldValue = F.zero;
res.isOld0 = true;
}
const keyBits = this._splitBits(key);
for (let level = resFind.siblings.length-1; level >=0; level--) {
let newSibling = resFind.siblings[level];
if ((level == resFind.siblings.length-1)&&(!res.isOld0)) {
newSibling = F.zero;
}
const oldSibling = resFind.siblings[level];
if (keyBits[level]) {
rtOld = hash0(oldSibling, rtOld);
} else {
rtOld = hash0(rtOld, oldSibling);
}
dels.push(rtOld);
if (!F.isZero(newSibling)) {
mixed = true;
}
if (mixed) {
res.siblings.unshift(resFind.siblings[level]);
let newNode;
if (keyBits[level]) {
newNode = [newSibling, rtNew];
} else {
newNode = [rtNew, newSibling];
}
rtNew = hash0(newNode[0], newNode[1]);
ins.push([rtNew, newNode]);
}
}
await this.db.multiIns(ins);
await this.db.setRoot(rtNew);
this.root = rtNew;
await this.db.multiDel(dels);
res.newRoot = rtNew;
res.oldRoot = rtOld;
return res;
}
async insert(_key, _value) {
const key = Scalar.e(_key);
const value = F.e(_value);
let addedOne = false;
const res = {};
res.oldRoot = this.root;
const newKeyBits = this._splitBits(key);
let rtOld;
const resFind = await this.find(key);
if (resFind.found) throw new Error("Key already exists");
res.siblings = resFind.siblings;
let mixed;
if (!resFind.isOld0) {
const oldKeyits = this._splitBits(resFind.notFoundKey);
for (let i= res.siblings.length; oldKeyits[i] == newKeyBits[i]; i++) {
res.siblings.push(F.zero);
}
rtOld = hash1(resFind.notFoundKey, resFind.notFoundValue);
res.siblings.push(rtOld);
addedOne = true;
mixed = false;
} else if (res.siblings.length >0) {
mixed = true;
rtOld = F.zero;
}
const inserts = [];
const dels = [];
let rt = hash1(key, value);
inserts.push([rt,[1, key, value]] );
for (let i=res.siblings.length-1; i>=0; i--) {
if ((i<res.siblings.length-1)&&(!F.isZero(res.siblings[i]))) {
mixed = true;
}
if (mixed) {
const oldSibling = resFind.siblings[i];
if (newKeyBits[i]) {
rtOld = hash0(oldSibling, rtOld);
} else {
rtOld = hash0(rtOld, oldSibling);
}
dels.push(rtOld);
}
let newRt;
if (newKeyBits[i]) {
newRt = hash0(res.siblings[i], rt);
inserts.push([newRt,[res.siblings[i], rt]] );
} else {
newRt = hash0(rt, res.siblings[i]);
inserts.push([newRt,[rt, res.siblings[i]]] );
}
rt = newRt;
}
if (addedOne) res.siblings.pop();
while ((res.siblings.length>0) && (F.isZero(res.siblings[res.siblings.length-1]))) {
res.siblings.pop();
}
res.oldKey = resFind.notFoundKey;
res.oldValue = resFind.notFoundValue;
res.newRoot = rt;
res.isOld0 = resFind.isOld0;
await this.db.multiIns(inserts);
await this.db.setRoot(rt);
this.root = rt;
await this.db.multiDel(dels);
return res;
}
async find(key) {
const keyBits = this._splitBits(key);
return await this._find(key, keyBits, this.root, 0);
}
async _find(key, keyBits, root, level) {
if (typeof root === "undefined") root = this.root;
let res;
if (F.isZero(root)) {
res = {
found: false,
siblings: [],
notFoundKey: key,
notFoundValue: F.zero,
isOld0: true
};
return res;
}
const record = await this.db.get(root);
if ((record.length==3)&&(F.eq(record[0],F.one))) {
if (F.eq(record[1],key)) {
res = {
found: true,
siblings: [],
foundValue: record[2],
isOld0: false
};
} else {
res = {
found: false,
siblings: [],
notFoundKey: record[1],
notFoundValue: record[2],
isOld0: false
};
}
} else {
if (keyBits[level] == 0) {
res = await this._find(key, keyBits, record[0], level+1);
res.siblings.unshift(record[1]);
} else {
res = await this._find(key, keyBits, record[1], level+1);
res.siblings.unshift(record[0]);
}
}
return res;
}
}
async function loadFromFile(fileName) {
}
async function newMemEmptyTrie() {
const db = new SMTMemDB();
const rt = await db.getRoot();
const smt = new SMT(db, rt);
return smt;
}
module.exports.loadFromFile = loadFromFile;
module.exports.newMemEmptyTrie = newMemEmptyTrie;
module.exports.SMT = SMT;
module.exports.SMTMemDB = SMTMemDB;
| https://github.com/socathie/circomlib-ml |
test/modules/src/smt_hashes_mimc.js | const mimc7 = require("./mimc7");
const bigInt = require("big-integer");
exports.hash0 = function (left, right) {
return mimc7.multiHash(left, right);
};
exports.hash1 = function(key, value) {
return mimc7.multiHash([key, value], bigInt.one);
};
exports.F = mimc7.F;
| https://github.com/socathie/circomlib-ml |
test/modules/src/smt_hashes_poseidon.js |
const ZqField = require("ffjavascript").ZqField;
const Scalar = require("ffjavascript").Scalar;
const poseidon = require("./poseidon");
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
exports.hash0 = function (left, right) {
return poseidon([left, right]);
};
exports.hash1 = function(key, value) {
return poseidon([key, value, F.one]);
};
exports.F = F;
| https://github.com/socathie/circomlib-ml |
test/modules/src/smt_memdb.js |
const Scalar = require("ffjavascript").Scalar;
const ZqField = require("ffjavascript").ZqField;
// Prime 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
class SMTMemDb {
constructor() {
this.nodes = {};
this.root = F.zero;
}
async getRoot() {
return this.root;
}
_key2str(k) {
// const keyS = bigInt(key).leInt2Buff(32).toString("hex");
const keyS = k.toString();
return keyS;
}
_normalize(n) {
for (let i=0; i<n.length; i++) {
n[i] = F.e(n[i]);
}
}
async get(key) {
const keyS = this._key2str(key);
return this.nodes[keyS];
}
async multiGet(keys) {
const promises = [];
for (let i=0; i<keys.length; i++) {
promises.push(this.get(keys[i]));
}
return await Promise.all(promises);
}
async setRoot(rt) {
this.root = rt;
}
async multiIns(inserts) {
for (let i=0; i<inserts.length; i++) {
const keyS = this._key2str(inserts[i][0]);
this._normalize(inserts[i][1]);
this.nodes[keyS] = inserts[i][1];
}
}
async multiDel(dels) {
for (let i=0; i<dels.length; i++) {
const keyS = this._key2str(dels[i]);
delete this.nodes[keyS];
}
}
}
module.exports = SMTMemDb;
| https://github.com/socathie/circomlib-ml |
build.rs | use eyre::{bail, eyre, Result, WrapErr as _};
use std::{
env::{var, VarError},
fs,
path::Path,
process::Command,
};
use time::{format_description::well_known::Rfc3339, OffsetDateTime, UtcOffset};
fn main() -> Result<()> {
let commit = rerun_if_git_changes().unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
None
});
println!(
"cargo:rustc-env=COMMIT_SHA={}",
env_or_cmd("COMMIT_SHA", &["git", "rev-parse", "HEAD"]).unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
commit.unwrap_or_else(|| "0000000000000000000000000000000000000000".to_string())
})
);
let build_date = OffsetDateTime::now_utc();
let commit_date = env_or_cmd("COMMIT_DATE", &[
"git",
"log",
"-n1",
"--pretty=format:'%aI'",
])
.and_then(|str| Ok(OffsetDateTime::parse(str.trim_matches('\''), &Rfc3339)?))
.unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
OffsetDateTime::UNIX_EPOCH
});
println!(
"cargo:rustc-env=COMMIT_DATE={}",
commit_date.to_offset(UtcOffset::UTC).date()
);
println!(
"cargo:rustc-env=BUILD_DATE={}",
build_date.to_offset(UtcOffset::UTC).date()
);
println!(
"cargo:rustc-env=TARGET={}",
var("TARGET").wrap_err("Fetching environment variable TARGET")?
);
Ok(())
}
fn env_or_cmd(env: &str, cmd: &[&str]) -> Result<String> {
// Try env first
match var(env) {
Ok(s) => return Ok(s),
Err(VarError::NotPresent) => (),
Err(e) => bail!(e),
};
// Try command
let err = || {
format!(
"Variable {} is unset and command \"{}\" failed",
env,
cmd.join(" ")
)
};
let output = Command::new(cmd[0])
.args(&cmd[1..])
.output()
.with_context(err)?;
if output.status.success() {
Ok(String::from_utf8(output.stdout)?.trim().to_string())
} else {
bail!(err())
}
}
fn rerun_if_git_changes() -> Result<Option<String>> {
let git_head = Path::new(".git/HEAD");
// Skip if not in a git repo
if !git_head.exists() {
eprintln!("No .git/HEAD found, not rerunning on git change");
return Ok(None);
}
// TODO: Worktree support where `.git` is a file
println!("cargo:rerun-if-changed=.git/HEAD");
// If HEAD contains a ref, then echo that path also.
let contents = fs::read_to_string(git_head).wrap_err("Error reading .git/HEAD")?;
let head_ref = contents.split(": ").collect::<Vec<_>>();
let commit = if head_ref.len() == 2 && head_ref[0] == "ref" {
let ref_path = Path::new(".git").join(head_ref[1].trim());
let ref_path_str = ref_path
.to_str()
.ok_or_else(|| eyre!("Could not convert ref path {:?} to string", ref_path))?;
println!("cargo:rerun-if-changed={}", ref_path_str);
fs::read_to_string(&ref_path).with_context(|| format!("Error reading {}", ref_path_str))?
} else {
contents
};
Ok(Some(commit))
}
| https://github.com/worldcoin/proto-neural-zkp |
criterion.rs | use neural_zkp as lib;
fn main() {
let mut criterion = criterion::Criterion::default().configure_from_args();
lib::bench::group(&mut criterion);
criterion.final_summary();
}
| https://github.com/worldcoin/proto-neural-zkp |
ref_cnn/benchmark_cnn.py | ### Will take a long time! Took 13.2min on my machine (M1 Max MBP - DC)
import timeit
import numpy as np
from vanilla_cnn import *
if __name__ == "__main__":
# instantiate matrices outside of time it block
np.random.seed(12345)
x = np.random.randint(low=-5, high=5, size=(120, 80, 3))
f = np.random.randint(low=-10, high=+10, size=(32, 5, 5, 3))
k = np.random.randint(low=-10, high=+10, size=(32,5,5,32))
weights1 = np.random.randint(low=-10, high=+10, size=(1000, 14688))
biases1 = np.random.randint(low=-10, high=+10, size=(1000))
weights2 = np.random.randint(low=-10, high=+10, size=(5, 1000))
biases2 = np.random.randint(low=-10, high=+10, size=(5))
times = []
runs = 100
for _ in range(runs):
starttime = timeit.default_timer()
# conv layer
x, n_params, n_multiplications, name = conv_layer(x, f)
# max pooling
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
# conv layer
x, n_params, n_multiplications, name = conv_layer(x, k)
# max pooling
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
# flatten
x, n_params, n_multiplications, name = flatten_layer(x)
# fully connected
x, n_params, n_multiplications, name = fully_connected_layer(x, weights1, biases1)
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
# fully connected
x, n_params, n_multiplications, name = fully_connected_layer(x, weights2, biases2)
# normalization
x, n_params, n_multiplications, name = normalize(x)
times.append(timeit.default_timer() - starttime)
np.random.seed(12345)
x = np.random.randint(low=-5, high=5, size=(120, 80, 3))
average = sum(times) / len(times)
print(f'The average time is {average} seconds for {runs} runs')
# Result = 0.8297840171150046 for 1000 runs | https://github.com/worldcoin/proto-neural-zkp |
ref_cnn/generate_cnn_json.py | # Imports
import json
from json import JSONEncoder
import numpy as np
from enum import Enum
import re
# Encoder
class Encoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, Layer):
return obj.value[0]
return JSONEncoder.default(self, obj)
# Layer definitions
def conv_layer(input, f):
"""
Evaluate the output of a convolutional layer using the filter f.
input.shape = (h, w, c)
f.shape = (c_out, hf, wf, c_in)
"""
h, w, c = input.shape
c_out, hf, wf, c_in = f.shape
assert c == c_in, "Input channels must match!"
assert hf%2 == 1, "Height of the filter (f.shape[1]) must be an uneven number!"
assert wf%2 == 1, "Width of the filter (f.shape[2]) must be an uneven number!"
# unilateral width and heght
dh = hf//2
dw = wf//2
# after convolution dw and dh get substracted from all sides of the image, c_out is number of convolutions which dictates # of channels
# initialize matrix with 0s
output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))
# run convolution
# go over image height - kernel padding (2*dh)
for i in range(dh, h-dh):
# go over image width - kernel padding (2*dw)
for j in range(dw, w-dw):
# kernel slice
a = input[i-dh:i+dh+1, j-dw:j+dw+1]
for k in range(c_out):
# filter channel 1..c_out
b = f[k,:,:,:]
# apply filter
output[i-dh, j-dw, k] = (a*b).sum() # a.size multiplication
n_params = f.size
n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)
name = f"conv {'x'.join([str(e) for e in f.shape])}"
return output, n_params, n_multiplications, name
def relu_layer(input):
output = input.copy()
output[output<0] = 0
n_params = 0
n_multiplications = input.size
return output, n_params, n_multiplications, "relu"
def max_pooling_layer(input, s):
"""
Apply max pooling layer using a sxs patch.
"""
h, w, c = input.shape
assert h%s == 0, "Height must be divisible by s!"
assert w%s == 0, "Width must be dibisible by s!"
output = np.zeros(shape=(h//s, w//s, c))
for i in range(0, h, s):
for j in range(0, w, s):
for k in range(c):
a = input[i:i+s, j:j+s, k]
output[i//s, j//s, k] = a.max()
n_params = 0
n_multiplications = input.size
return output, n_params, n_multiplications, "max-pool"
def flatten_layer(input):
output = input.flatten()
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "flatten"
def fully_connected_layer(input, weights, biases):
"""
Evaluate the output of a fully connected layer.
input.shape = (output_dim)
weights.shape = (output_dim, input_dim)
f.shape = (output_dim)
"""
assert input.ndim == 1, "Input must be a flattend array!"
assert weights.shape[1] == input.shape[0], "Input shapes must match!"
assert weights.shape[0] == biases.shape[0], "Output shapes must match!"
output = np.dot(weights, input) + biases
n_params = weights.size + biases.size
n_multiplications = weights.size
name = f"full {'x'.join([str(e) for e in weights.shape])}"
return output, n_params, n_multiplications, name
def normalize(input):
output = input / np.linalg.norm(input)
n_params = 0
n_multiplications = 1 + input.size
return output, n_params, n_multiplications, "normalize"
############
# Model #
############
class Layer(Enum):
Convolution = 'convolution',
MaxPool = 'max_pool',
Relu = 'relu',
Flatten = 'flatten',
FullyConnected = 'fully_connected',
Normalize = 'normalize',
np.random.seed(12345)
p = "{:>20} | {:>15} | {:>15} | {:>15} "
print(p.format("layer", "output shape", "#parameters", "#ops"))
print(p.format("-"*20, "-"*15, "-"*15, "-"*15))
shape = (120,80,3)
# input
x = np.random.randint(low=-5, high=5, size=shape)
initial = x.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": initial
}
# conv layer
shape = (32,5,5,3)
f = np.random.randint(low=-10, high=+10, size=shape)
conv1 = f.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": conv1
}
conv = {
"layer_type": Layer.Convolution,
"input_shape": x.shape,
"kernel": data,
}
model = [conv]
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# max pooling
maxpool = {
"layer_type": Layer.MaxPool,
"input_shape": x.shape,
"window": 2,
}
model.append(maxpool)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# conv layer
shape = (32,5,5,32)
f = np.random.randint(low=-10, high=+10, size=shape)
conv2 = f.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": conv2
}
conv = {
"layer_type": Layer.Convolution,
"input_shape": x.shape,
"kernel":data
}
model.append(conv)
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# max pooling
maxpool = {
"layer_type": Layer.MaxPool,
"input_shape": x.shape,
"window": 2
}
model.append(maxpool)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape,
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# flatten
flatten = {
"layer_type": Layer.Flatten,
"input_shape": x.shape,
}
model.append(flatten)
x, n_params, n_multiplications, name = flatten_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# fully connected
shape = (1000, x.shape[0])
weights = np.random.randint(low=-10, high=+10, size=shape)
# weights json
data = {
"v": 1,
"dim": shape,
"data": weights.flatten().astype(np.float32, copy=False)
}
# biases json
shape = [1000]
biases = np.random.randint(low=-10, high=+10, size=(1000))
data2 = {
"v": 1,
# ndarray can't take a single value, needs to be in json array
"dim": shape,
"data": biases.flatten().astype(np.float32, copy=False)
}
fully_connected = {
"layer_type": Layer.FullyConnected,
"input_shape": x.shape,
"weights":data,
"biases": data2
}
model.append(fully_connected)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape,
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# fully connected
shape = (5, x.shape[0])
weights = np.random.randint(low=-10, high=+10, size=shape)
data = {
"v": 1,
"dim": shape,
"data": weights.flatten().astype(np.float32, copy=False)
}
shape = [5]
biases = np.random.randint(low=-10, high=+10, size=shape)
data2 = {
"v": 1,
# ndarray can't take a single value, needs to be in json array
"dim": [5],
"data": biases.flatten().astype(np.float32, copy=False)
}
fully_connected = {
"layer_type": Layer.FullyConnected,
"input_shape": x.shape,
"weights":data,
"biases": data2
}
model.append(fully_connected)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())
# normalization
norm = {
"layer_type": Layer.Normalize,
"input_shape": x.shape,
}
model.append(norm)
model = {
"layers": model
}
x, n_params, n_multiplications, name = normalize(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
print("\nfinal output:", x)
model_data = json.dumps(model, cls=Encoder)
with open('../src/json/model.json', "w") as f:
print('\ncreated model.json in the proto-neural-zkp/src/json folder')
f.write(model_data) | https://github.com/worldcoin/proto-neural-zkp |
ref_cnn/prover_perf.ipynb | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "cf9cc6d3-87d9-4c07-8773-39cfcc94d0f5",
"metadata": {},
"outputs": [],
"source": [
"# !pip install numpy pandas matplotlib watermark"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb8f77a5-dc82-4c34-8ee8-4d510afb175d",
"metadata": {},
"outputs": [],
"source": [
"import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc648e92-7bfa-4991-829b-012822604d0f",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d306a253-dc0d-425f-b77a-44b0dfadcaa3",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b73322c2-d188-4dae-8480-558a3317fd17",
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib as mpl\n",
"\n",
"# Defaults more suitable for 4K monitors\n",
"mpl.rcParams[\"figure.dpi\"] = 120\n",
"mpl.rcParams[\"figure.figsize\"] = [12.0, 8.0]\n",
"# Change font to match Latex\n",
"mpl.rcParams['mathtext.fontset'] = 'cm'\n",
"mpl.rcParams['font.family'] = 'CMU Serif'\n",
"# Export settings\n",
"mpl.rcParams[\"savefig.bbox\"] = 'tight'\n",
"mpl.rcParams[\"savefig.dpi\"] = 300\n",
"mpl.rcParams[\"savefig.facecolor\"] = 'white'\n",
"# High compression lossless WebP for animations using ffmpeg -> libwebp\n",
"mpl.rcParams[\"animation.writer\"] = 'ffmpeg'\n",
"mpl.rcParams[\"animation.codec\"] = 'webp'\n",
"mpl.rcParams[\"animation.ffmpeg_args\"] = [\"-lossless\", \"1\", \"-qscale\", \"100\"]\n",
"# Use anim.save(\"movie.webp\", fps=60)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "64b79d42-6bcf-49d4-899f-113f193f6d11",
"metadata": {},
"outputs": [],
"source": [
"import watermark.watermark as watermark\n",
"print(watermark(machine=True, iso8601=True, python=True, iversions=True, globals_=globals()))\n",
"print(os.getcwd())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33eec25e-635a-4779-b34f-09f638a246d2",
"metadata": {},
"outputs": [],
"source": [
"df1 = pd.read_csv('../bench-1.csv')\n",
"df1['ops'] = df.input_size * df.output_size\n",
"df1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0a47916-268f-42fb-8880-7298aef847e6",
"metadata": {},
"outputs": [],
"source": [
"df2 = pd.read_csv('../bench-2.csv')\n",
"df2['ops'] = df.input_size * df.output_size\n",
"df2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd37c694-d984-46af-af89-febee74eefcf",
"metadata": {},
"outputs": [],
"source": [
"df5 = pd.read_csv('../bench-5.csv')\n",
"df5['ops'] = df.input_size * df.output_size\n",
"df5"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1d9a5d1-a8fa-417d-9ea3-378018f9ff36",
"metadata": {},
"outputs": [],
"source": [
"df10 = pd.read_csv('../bench-10.csv')\n",
"df10['ops'] = df10.input_size * df10.output_size\n",
"df10"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "952a923e-8241-4b78-a156-961bf1b85570",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance')\n",
"plt.xlabel('# of muladds')\n",
"plt.ylabel('proof time [s]')\n",
"plt.plot(df1.ops, df1.proof_time_s, label='1 thread')\n",
"plt.plot(df2.ops, df2.proof_time_s, label='2 thread')\n",
"plt.plot(df5.ops, df5.proof_time_s, label='5 thread')\n",
"plt.plot(df10.ops, df10.proof_time_s, label='10 threads')\n",
"plt.ylim([0, 120])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "149a5d0c-a272-4fc4-b8fe-18771b9d6d44",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "f758c3a7-4694-4673-971b-f5087b70c2ad",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance')\n",
"plt.xlabel('# of muladds')\n",
"plt.ylabel('utilization')\n",
"plt.plot(df1.ops, df1.proof_time_s / (df2.proof_time_s * 2), label='2 thread utilization')\n",
"plt.plot(df1.ops, df1.proof_time_s / (df5.proof_time_s * 5), label='5 thread utilization')\n",
"plt.plot(df.ops, df1.proof_time_s / (df.proof_time_s * 10), label='10 thread utilization')\n",
"plt.ylim([0, 1])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "25aaf206-4a58-47f1-8272-3df48b5e6e54",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance (1 threads)')\n",
"plt.xlabel('# of muladds')\n",
"plt.ylabel('memory [b]')\n",
"plt.plot(df1.ops, df1.proof_mem_b, label='1 threads')\n",
"plt.plot(df5.ops, df5.proof_mem_b, label='5 threads')\n",
"plt.plot(df10.ops, df10.proof_mem_b, label='10 threads')\n",
"plt.ylim([0, 1.1*np.max(df.proof_mem_b)])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3cef9590-7689-477f-bac3-21d68ff591c2",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "99d7647e-e9f2-48ba-8ea3-27ab41b06808",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| https://github.com/worldcoin/proto-neural-zkp |
ref_cnn/vanilla_cnn.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "d5c490d4",
"metadata": {},
"source": [
"# Vanilla CNN layers implementation with numpy"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "794b9914-cce9-4d71-b20d-96df96f6403b",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4c3349aa-434a-4c9b-a451-feeb341aaa72",
"metadata": {},
"outputs": [],
"source": [
"def conv_layer(input, f):\n",
" \"\"\"\n",
" Evaluate the output of a convolutional layer using the filter f.\n",
" input.shape = (h, w, c)\n",
" f.shape = (c_out, hf, wf, c_in)\n",
" \"\"\"\n",
" h, w, c = input.shape\n",
" c_out, hf, wf, c_in = f.shape\n",
"\n",
" assert c == c_in, \"Input channels must match!\"\n",
" assert hf%2 == 1, \"Height of the filter (f.shape[1]) must be an uneven number!\"\n",
" assert wf%2 == 1, \"Width of the filter (f.shape[2]) must be an uneven number!\"\n",
"\n",
" # unilateral width and heght\n",
" dh = hf//2\n",
" dw = wf//2\n",
"\n",
" # after convolution dw and dh get substracted from all sides of the image, c_out is number of convolutions which dictates # of channels\n",
" # initialize matrix with 0s\n",
" output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))\n",
"\n",
"# run convolution\n",
"# go over image height - kernel padding (2*dh)\n",
" for i in range(dh, h-dh):\n",
" # go over image width - kernel padding (2*dw)\n",
" for j in range(dw, w-dw):\n",
" # kernel slice\n",
" a = input[i-dh:i+dh+1, j-dw:j+dw+1]\n",
" for k in range(c_out):\n",
" # filter channel 1..c_out\n",
" b = f[k,:,:,:]\n",
" # apply filter\n",
" output[i-dh, j-dw, k] = (a*b).sum() # a.size multiplication\n",
" \n",
" n_params = f.size\n",
" n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)\n",
" name = f\"conv {'x'.join([str(e) for e in f.shape])}\"\n",
" \n",
" return output, n_params, n_multiplications, name\n",
"\n",
"\n",
"def relu_layer(input): \n",
" output = input.copy()\n",
" output[output<0] = 0\n",
" \n",
" n_params = 0\n",
" n_multiplications = input.size\n",
" \n",
" return output, n_params, n_multiplications, \"relu\"\n",
"\n",
"\n",
"def max_pooling_layer(input, s):\n",
" \"\"\"\n",
" Apply max pooling layer using a sxs patch.\n",
" \"\"\"\n",
" h, w, c = input.shape\n",
"\n",
" assert h%s == 0, \"Height must be divisible by s!\"\n",
" assert w%s == 0, \"Width must be dibisible by s!\"\n",
"\n",
" output = np.zeros(shape=(h//s, w//s, c))\n",
"\n",
" for i in range(0, h, s):\n",
" for j in range(0, w, s):\n",
" for k in range(c):\n",
" a = input[i:i+s, j:j+s, k]\n",
" output[i//s, j//s, k] = a.max()\n",
" \n",
" n_params = 0\n",
" n_multiplications = input.size\n",
" return output, n_params, n_multiplications, \"max-pool\"\n",
"\n",
"\n",
"def flatten_layer(input):\n",
" output = input.flatten()\n",
" n_params = 0\n",
" n_multiplications = 0\n",
" return output, n_params, n_multiplications, \"flatten\"\n",
"\n",
"\n",
"def fully_connected_layer(input, weights, biases):\n",
" \"\"\"\n",
" Evaluate the output of a fully connected layer.\n",
" input.shape = (output_dim)\n",
" weights.shape = (output_dim, input_dim)\n",
" f.shape = (output_dim)\n",
" \"\"\"\n",
" assert input.ndim == 1, \"Input must be a flattend array!\"\n",
" assert weights.shape[1] == input.shape[0], \"Input shapes must match!\"\n",
" assert weights.shape[0] == biases.shape[0], \"Output shapes must match!\"\n",
"\n",
" output = np.dot(weights, input) + biases\n",
" \n",
" n_params = weights.size + biases.size\n",
" n_multiplications = weights.size\n",
" name = f\"full {'x'.join([str(e) for e in weights.shape])}\"\n",
" \n",
" return output, n_params, n_multiplications, name\n",
"\n",
"\n",
"def normalize(input):\n",
" output = input / np.linalg.norm(input)\n",
" n_params = 0\n",
" n_multiplications = 1 + input.size\n",
" return output, n_params, n_multiplications, \"normalize\"\n"
]
},
{
"cell_type": "markdown",
"id": "2896d5ed",
"metadata": {},
"source": [
"# Vanilla CNN model"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "00fd6550",
"metadata": {},
"outputs": [],
"source": [
"np.random.seed(12345)\n",
"\n",
"p = \"{:>20} | {:>15} | {:>15} | {:>15} \"\n",
"print(p.format(\"layer\", \"output shape\", \"#parameters\", \"#ops\"))\n",
"print(p.format(\"-\"*20, \"-\"*15, \"-\"*15, \"-\"*15))\n",
"\n",
"# input\n",
"x = np.random.randint(low=-5, high=5, size=(120,80,3))\n",
"\n",
"# conv layer\n",
"f = np.random.randint(low=-10, high=+10, size=(32,5,5,3)) \n",
"x, n_params, n_multiplications, name = conv_layer(x, f)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# max pooling\n",
"x, n_params, n_multiplications, name = max_pooling_layer(x, 2)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# relu layer\n",
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# conv layer\n",
"f = np.random.randint(low=-10, high=+10, size=(32,5,5,32)) \n",
"x, n_params, n_multiplications, name = conv_layer(x, f)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# max pooling\n",
"x, n_params, n_multiplications, name = max_pooling_layer(x, 2)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# relu layer\n",
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# flatten\n",
"x, n_params, n_multiplications, name = flatten_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# fully connected\n",
"weights = np.random.randint(low=-10, high=+10, size=(1000, x.shape[0])) \n",
"biases = np.random.randint(low=-10, high=+10, size=(1000)) \n",
"x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# relu layer\n",
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"# fully connected\n",
"weights = np.random.randint(low=-10, high=+10, size=(5, x.shape[0])) \n",
"biases = np.random.randint(low=-10, high=+10, size=(5)) \n",
"x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())\n",
"\n",
"# normalization\n",
"x, n_params, n_multiplications, name = normalize(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"print(\"\\nfinal output:\", x)"
]
},
{
"cell_type": "markdown",
"id": "43c065d3-e68b-4a63-9e7e-237dbf704819",
"metadata": {},
"source": [
"# JSON example"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d97c695a-f706-401f-b85c-b46396a8c42f",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"# Encoder\n",
"from json import JSONEncoder\n",
"\n",
"class Encoder(JSONEncoder):\n",
" def default(self, obj):\n",
" if isinstance(obj, np.ndarray):\n",
" return obj.tolist()\n",
" return JSONEncoder.default(self, obj)\n",
"\n",
"np.random.seed(12345)\n",
"\n",
"shape = (3,3,3)\n",
"x = np.random.randint(low=-5, high=5, size=shape)\n",
"\n",
"x = x.flatten()\n",
"\n",
"# Serialization\n",
"data = {\n",
" \"v\": 1,\n",
" \"dim\": shape,\n",
" \"data\": x\n",
" }\n",
"\n",
"json_data = json.dumps(data, cls=Encoder)\n",
"\n",
"with open(\"../src/json/test.json\", \"w\") as f:\n",
" f.write(json_data)"
]
},
{
"cell_type": "markdown",
"id": "e9f4e31a",
"metadata": {},
"source": [
"# Miscellaneous"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a619bbbb-fad3-46ae-83b4-b74398cb953b",
"metadata": {},
"outputs": [],
"source": [
"8388608"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5a1787ed-b448-40e2-96e7-7d597a4660e2",
"metadata": {},
"outputs": [],
"source": [
"4897963 / 8388608"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33087999-ac7f-4ebd-b290-8dbbe3d0c51b",
"metadata": {},
"outputs": [],
"source": [
"np.log(262144)/np.log(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ae322e92-7fcb-4270-9025-392773fc6da4",
"metadata": {},
"outputs": [],
"source": [
"4896000 * 3 / (14688 * 1000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3078557e-adc4-4385-be93-567aa7d93e2d",
"metadata": {},
"outputs": [],
"source": [
"np.log(14688 * 800 / 3) / np.log(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8316d646-b622-47b1-a58c-2ceb5de41c74",
"metadata": {},
"outputs": [],
"source": [
"14688 * 400"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8abe8302-5958-427f-9453-7e2c4ff2cdc9",
"metadata": {},
"outputs": [],
"source": [
"14688 * 1600"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70387136-104a-4a18-9f20-72847ecee8a0",
"metadata": {},
"outputs": [],
"source": [
"234076 * 100"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "93298188-6ed4-4e9e-b3cc-20f9778e38bc",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"\n",
"output = [-6276474000, 8343393300, 8266027500, -7525360600, 7814137000]\n",
"norm = np.linalg.norm(output)\n",
"norm"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1b09558",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"math.sqrt(6276474000**2 + 8343393300**2 + 8266027500**2 + 7525360600**2 + 7814137000**2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c9c85ea1",
"metadata": {},
"outputs": [],
"source": [
"output/norm"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.13 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"vscode": {
"interpreter": {
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| https://github.com/worldcoin/proto-neural-zkp |
ref_cnn/vanilla_cnn.py | import numpy as np
def conv_layer(input, f):
"""
Evaluate the output of a convolutional layer using the filter f.
input.shape = (h, w, c)
f.shape = (c_out, hf, wf, c_in)
"""
h, w, c = input.shape
c_out, hf, wf, c_in = f.shape
assert c == c_in, "Input channels must match!"
assert hf%2 == 1, "Height of the filter (f.shape[1]) must be an uneven number!"
assert wf%2 == 1, "Width of the filter (f.shape[2]) must be an uneven number!"
dh = hf//2
dw = wf//2
output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))
for i in range(dh, h-dh):
for j in range(dw, w-dw):
a = input[i-dh:i+dh+1, j-dw:j+dw+1]
for k in range(c_out):
b = f[k,:,:,:]
output[i-dh, j-dw, k] = (a*b).sum() # a.size multiplication
n_params = f.size
n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)
name = f"conv {'x'.join([str(e) for e in f.shape])}"
return output, n_params, n_multiplications, name
def relu_layer(input):
output = input.copy()
output[output<0] = 0
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "relu"
def max_pooling_layer(input, s):
"""
Apply max pooling layer using a sxs patch.
"""
h, w, c = input.shape
assert h%s == 0, "Height must be devisable by s!"
assert w%s == 0, "Width must be devisable by s!"
output = np.zeros(shape=(h//s, w//s, c))
for i in range(0, h, s):
for j in range(0, w, s):
for k in range(c):
a = input[i:i+s, j:j+s, k]
output[i//s, j//s, k] = a.max()
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "max-pool"
def flatten_layer(input):
output = input.flatten()
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "flatten"
def fully_connected_layer(input, weights, biases):
"""
Evaluate the output of a fully connected layer.
input.shape = (output_dim)
weights.shape = (output_dim, input_dim)
f.shape = (output_dim)
"""
assert input.ndim == 1, "Input must be a flattend array!"
assert weights.shape[1] == input.shape[0], "Input shapes must match!"
assert weights.shape[0] == biases.shape[0], "Output shapes must match!"
output = np.dot(weights, input) + biases
n_params = weights.size + biases.size
n_multiplications = weights.size
name = f"conv {'x'.join([str(e) for e in weights.shape])}"
return output, n_params, n_multiplications, name
def normalize(input):
output = input / np.linalg.norm(input)
n_params = 0
n_multiplications = 1 + input.size
return output, n_params, n_multiplications, "normalize"
if __name__ == "__main__":
np.random.seed(12345)
p = "{:>20} | {:>15} | {:>15} | {:>15} "
print(p.format("layer", "output shape", "#parameters", "#ops"))
print(p.format("-"*20, "-"*15, "-"*15, "-"*15))
# input
x = np.random.randint(low=-5, high=5, size=(120,80,3))
# conv layer
f = np.random.randint(low=-10, high=+10, size=(32,5,5,3))
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# max pooling
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# conv layer
f = np.random.randint(low=-10, high=+10, size=(32,5,5,32))
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# max pooling
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# flatten
x, n_params, n_multiplications, name = flatten_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# fully connected
weights = np.random.randint(low=-10, high=+10, size=(1000, x.shape[0]))
biases = np.random.randint(low=-10, high=+10, size=(1000))
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# relu layer
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
# fully connected
weights = np.random.randint(low=-10, high=+10, size=(5, x.shape[0]))
biases = np.random.randint(low=-10, high=+10, size=(5))
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())
# normalization
x, n_params, n_multiplications, name = normalize(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
print("\nfinal output:", x) | https://github.com/worldcoin/proto-neural-zkp |
src/allocator.rs | use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
pub use std::alloc::System as StdAlloc;
use std::alloc::{GlobalAlloc, Layout};
#[cfg(feature = "mimalloc")]
pub use mimalloc::MiMalloc;
#[cfg_attr(feature = "std", derive(Debug))]
pub struct Allocator<T: GlobalAlloc> {
inner: T,
pub allocated: AtomicUsize,
pub peak_allocated: AtomicUsize,
pub total_allocated: AtomicUsize,
pub largest_allocated: AtomicUsize,
pub num_allocations: AtomicUsize,
}
#[cfg(not(feature = "mimalloc"))]
// TODO: Turn this into a generic constructor taking an `inner: T` once
// #![feature(const_fn_trait_bound)] is stable.
pub const fn new_std() -> Allocator<StdAlloc> {
Allocator::new(StdAlloc)
}
#[cfg(feature = "mimalloc")]
pub const fn new_mimalloc() -> Allocator<MiMalloc> {
Allocator::new(MiMalloc)
}
impl<T: GlobalAlloc> Allocator<T> {
pub const fn new(alloc: T) -> Self {
Self {
inner: alloc,
allocated: AtomicUsize::new(0),
peak_allocated: AtomicUsize::new(0),
total_allocated: AtomicUsize::new(0),
largest_allocated: AtomicUsize::new(0),
num_allocations: AtomicUsize::new(0),
}
}
fn count_alloc(&self, size: usize) {
// TODO: We are doing a lot of atomic operations here, what is
// the performance impact?
let allocated = self.allocated.fetch_add(size, Relaxed);
self.total_allocated.fetch_add(size, Relaxed);
self.num_allocations.fetch_add(1, Relaxed);
// HACK: Using `allocated` here again is not completely fool proof
self.peak_allocated.fetch_max(allocated, Relaxed);
self.largest_allocated.fetch_max(size, Relaxed);
}
fn count_dealloc(&self, size: usize) {
self.allocated.fetch_sub(size, Relaxed);
}
}
// GlobalAlloc is an unsafe trait for allocators
#[allow(unsafe_code)]
unsafe impl<T: GlobalAlloc> GlobalAlloc for Allocator<T> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.count_alloc(layout.size());
self.inner.alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.count_dealloc(layout.size());
self.inner.dealloc(ptr, layout);
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
self.count_alloc(layout.size());
self.inner.alloc_zeroed(layout)
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let old_size = layout.size();
if new_size >= old_size {
self.count_alloc(new_size - old_size);
} else {
self.count_dealloc(old_size - new_size);
}
self.inner.realloc(ptr, layout, new_size)
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/anyhow.rs | use eyre::eyre;
pub trait MapAny<T> {
fn map_any(self) -> eyre::Result<T>;
}
impl<T> MapAny<T> for anyhow::Result<T> {
fn map_any(self) -> eyre::Result<T> {
self.map_err(|e| eyre!(e.to_string()))
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/cli/logging.rs | #![warn(clippy::all, clippy::pedantic, clippy::cargo, clippy::nursery)]
use core::str::FromStr;
use eyre::{bail, Error as EyreError, Result as EyreResult, WrapErr as _};
use std::process::id as pid;
use structopt::StructOpt;
use tracing::{info, Level, Subscriber};
use tracing_log::{AsLog as _, LogTracer};
use tracing_subscriber::{
filter::{LevelFilter, Targets},
fmt::{self, time::Uptime},
layer::SubscriberExt,
Layer, Registry,
};
#[derive(Debug, PartialEq)]
enum LogFormat {
Compact,
Pretty,
Json,
}
impl LogFormat {
fn to_layer<S>(&self) -> impl Layer<S>
where
S: Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a> + Send + Sync,
{
let layer = fmt::Layer::new().with_writer(std::io::stderr);
match self {
LogFormat::Compact => {
Box::new(layer.event_format(fmt::format().with_timer(Uptime::default()).compact()))
as Box<dyn Layer<S> + Send + Sync>
}
LogFormat::Pretty => Box::new(layer.event_format(fmt::format().pretty())),
LogFormat::Json => Box::new(layer.event_format(fmt::format().json())),
}
}
}
impl FromStr for LogFormat {
type Err = EyreError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"compact" => Self::Compact,
"pretty" => Self::Pretty,
"json" => Self::Json,
_ => bail!("Invalid log format: {}", s),
})
}
}
#[derive(Debug, PartialEq, StructOpt)]
pub struct Options {
/// Verbose mode (-v, -vv, -vvv, etc.)
#[structopt(short, long, parse(from_occurrences))]
verbose: usize,
/// Apply an env_filter compatible log filter
#[structopt(long, env, default_value)]
log_filter: String,
/// Log format, one of 'compact', 'pretty' or 'json'
#[structopt(long, env, default_value = "compact")]
log_format: LogFormat,
}
impl Options {
#[allow(dead_code)]
pub fn init(&self) -> EyreResult<()> {
// Log filtering is a combination of `--log-filter` and `--verbose` arguments.
let verbosity = {
let (all, app) = match self.verbose {
0 => (Level::INFO, Level::INFO),
1 => (Level::INFO, Level::DEBUG),
2 => (Level::INFO, Level::TRACE),
3 => (Level::DEBUG, Level::TRACE),
_ => (Level::TRACE, Level::TRACE),
};
Targets::new()
.with_default(all)
.with_target("lib", app)
.with_target(env!("CARGO_CRATE_NAME"), app)
};
let log_filter = if self.log_filter.is_empty() {
Targets::new()
} else {
self.log_filter
.parse()
.wrap_err("Error parsing log-filter")?
};
// FIXME: The log-filter can not overwrite the global log level.
let targets = verbosity.with_targets(log_filter);
// Route events to stderr
let subscriber = Registry::default().with(self.log_format.to_layer().with_filter(targets));
tracing::subscriber::set_global_default(subscriber)?;
// Enable `log` crate compatibility with max level hint
LogTracer::builder()
.with_max_level(LevelFilter::current().as_log())
.init()?;
// Log version information
info!(
host = env!("TARGET"),
pid = pid(),
main = &crate::main as *const _ as usize,
commit = &env!("COMMIT_SHA")[..8],
"{name} {version}",
name = env!("CARGO_CRATE_NAME"),
version = env!("CARGO_PKG_VERSION"),
);
Ok(())
}
}
#[cfg(test)]
pub mod test {
use super::*;
#[test]
fn test_parse_args() {
let cmd = "arg0 -v --log-filter foo -vvv";
let options = Options::from_iter_safe(cmd.split(' ')).unwrap();
assert_eq!(options, Options {
verbose: 4,
log_filter: "foo".to_owned(),
log_format: LogFormat::Compact,
});
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/cli/main.rs | #![warn(clippy::all, clippy::pedantic, clippy::cargo, clippy::nursery)]
use neural_zkp as lib;
mod logging;
mod random;
use eyre::{Result as EyreResult, WrapErr as _};
use structopt::StructOpt;
use tokio::runtime::{self, Runtime};
use tracing::info;
const VERSION: &str = concat!(
env!("CARGO_PKG_VERSION"),
"\n",
env!("COMMIT_SHA"),
" ",
env!("COMMIT_DATE"),
"\n",
env!("TARGET"),
" ",
env!("BUILD_DATE"),
"\n",
env!("CARGO_PKG_AUTHORS"),
"\n",
env!("CARGO_PKG_HOMEPAGE"),
"\n",
env!("CARGO_PKG_DESCRIPTION"),
);
#[derive(StructOpt)]
struct Options {
#[structopt(flatten)]
log: logging::Options,
#[structopt(flatten)]
app: lib::Options,
#[structopt(flatten)]
random: random::Options,
/// Number of compute threads to use (defaults to number of cores)
#[structopt(long)]
threads: Option<usize>,
}
fn main() -> EyreResult<()> {
// Install error handler
color_eyre::install()?;
// Parse CLI and handle help and version (which will stop the application).
let matches = Options::clap().long_version(VERSION).get_matches();
let options = Options::from_clap(&matches);
// Start subsystems
options.log.init()?;
let rng = options.random.init();
init_rayon(options.threads)?;
let runtime = init_tokio()?;
// Run main
let main_future = lib::main(rng, options.app);
runtime.block_on(main_future)?;
// Terminate successfully
info!("Program terminating normally");
Ok(())
}
fn init_rayon(threads: Option<usize>) -> EyreResult<()> {
if let Some(threads) = threads {
rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build_global()
.context("Failed to build thread pool.")?;
}
info!(
"Using {} compute threads on {} cores",
rayon::current_num_threads(),
num_cpus::get()
);
Ok(())
}
fn init_tokio() -> EyreResult<Runtime> {
runtime::Builder::new_multi_thread()
.enable_all()
.build()
.wrap_err("Error creating Tokio runtime")
// TODO: Log num_workers once RuntimeMetrics are stable
}
#[cfg(test)]
pub mod test {
use super::*;
use tracing::{error, warn};
use tracing_test::traced_test;
#[test]
#[traced_test]
fn test_with_log_output() {
error!("logged on the error level");
assert!(logs_contain("logged on the error level"));
}
#[tokio::test]
#[traced_test]
#[allow(clippy::semicolon_if_nothing_returned)] // False positive
async fn async_test_with_log() {
// Local log
info!("This is being logged on the info level");
// Log from a spawned task (which runs in a separate thread)
tokio::spawn(async {
warn!("This is being logged on the warn level from a spawned task");
})
.await
.unwrap();
// Ensure that `logs_contain` works as intended
assert!(logs_contain("logged on the info level"));
assert!(logs_contain("logged on the warn level"));
assert!(!logs_contain("logged on the error level"));
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/cli/random.rs | use rand::{rngs::OsRng, RngCore, SeedableRng};
use rand_pcg::Mcg128Xsl64;
use structopt::StructOpt;
use tracing::info;
pub type Generator = Mcg128Xsl64;
#[derive(StructOpt)]
pub struct Options {
/// Random seed for deterministic random number generation.
/// If not specified a seed is periodically generated from OS entropy.
#[structopt(long, parse(try_from_str = parse_hex_u64))]
seed: Option<u64>,
}
impl Options {
#[must_use]
pub fn init(self) -> Generator {
let rng_seed = self.seed.unwrap_or_else(random_seed);
info!("Using random seed {rng_seed:16x}");
Mcg128Xsl64::seed_from_u64(rng_seed)
}
}
#[must_use]
fn random_seed() -> u64 {
OsRng::default().next_u64()
}
#[must_use]
fn parse_hex_u64(src: &str) -> Result<u64, std::num::ParseIntError> {
u64::from_str_radix(src, 16)
}
| https://github.com/worldcoin/proto-neural-zkp |
src/layers/conv.rs | use ndarray::{s, Array, Array3, Array4, ArrayD, ArrayViewD, Ix3};
use serde::Serialize;
use super::{Layer, LayerJson};
#[derive(Clone, Serialize)]
pub struct Convolution {
kernel: Array4<f32>,
name: String,
input_shape: Vec<usize>,
}
impl Convolution {
#[must_use]
pub fn new(kernel: Array4<f32>, input_shape: Vec<usize>) -> Convolution {
let (c_out, hf, wf, c_in) = kernel.dim();
let name = format!("conv {}x{}x{}x{}", c_out, hf, wf, c_in);
Convolution {
kernel,
name,
input_shape,
}
}
}
impl Layer for Convolution {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
// height, width, channels
let input = input.clone().into_dimensionality::<Ix3>().unwrap();
let (h, w, c) = input.dim();
// output channels, kernel height, kernel width, input channels
let (c_out, hf, wf, c_in) = self.kernel.dim();
assert_eq!(c, c_in, "input channels must match");
assert!(hf % 2 == 1, "height of the kernel must be an odd number");
assert!(wf % 2 == 1, "width of the kernel must be an odd number");
let window_dim = (hf, wf, c_in);
let output_shape = (h - hf + 1, w - wf + 1);
let mut output = Array3::zeros((output_shape.0, output_shape.1, c_out));
for i in 0..c_out {
let mut output_mut = output.slice_mut(s![.., .., i]);
let kernel = self.kernel.slice(s![i, .., .., ..]);
let values = input
.windows(window_dim)
.into_iter()
.map(|w| (&w * &kernel).sum());
let values = Array::from_iter(values)
.into_shape(output_shape)
.expect("Kernel result dimensions mismatch");
output_mut.assign(&values);
}
output.into_dyn()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
self.kernel.len()
}
fn num_muls(&self) -> usize {
// output channels, kernel height, kernel width, input channels
let (c_out, hf, wf, _) = self.kernel.dim();
let output_shape = self.output_shape();
output_shape[0] * output_shape[1] * c_out * hf * wf
}
fn output_shape(&self) -> Vec<usize> {
let input_shape = self.input_shape();
let h = input_shape[0];
let w = input_shape[1];
let c = input_shape[2];
// output channels, kernel height, kernel width, input channels
let (c_out, hf, wf, c_in) = self.kernel.dim();
assert_eq!(c, c_in, "input channels must match");
assert!(hf % 2 == 1, "height of the kernel must be an odd number");
assert!(wf % 2 == 1, "width of the kernel must be an odd number");
vec![h - hf + 1, w - wf + 1, c_out]
}
fn to_json(&self) -> LayerJson {
LayerJson::Convolution {
kernel: self.kernel.clone().into(),
input_shape: self.input_shape(),
}
}
}
#[cfg(test)]
pub mod test {
use super::*;
#[test]
fn test_small() {
use ndarray::array;
let input = array![
[
[0.51682377_f32],
[-2.3552072],
[-0.120499134],
[2.3132505],
[-3.470844]
],
[[-1.1741579], [3.4295654], [-1.2318683], [-1.9749749], [
-0.8161392
]],
[[4.7562046], [-2.8918338], [2.308525], [2.6111293], [
-1.0765815
]],
[[-4.1224194], [3.022316], [-4.5339823], [4.2970715], [
2.6773367
]],
[[-4.289216], [-3.3795083], [-2.651745], [-1.1392272], [
3.9378529
]]
];
let kernel: Array4<f32> = array![
[[1.0336475_f32], [-4.7104144], [-0.24099827]],
[[4.626501], [-6.941688], [-2.3483157]],
[[6.859131], [-2.4637365], [-3.9499497]]
]
.into_shape((1, 3, 3, 1))
.unwrap();
let expected = array![
[[15.940444], [-9.205237], [13.396301]],
[[1.7727833], [-10.784569], [-48.952152]],
[[-22.043327], [8.725433], [-97.68271]]
];
let conv = Convolution::new(kernel, vec![1, 5, 5, 1]);
let result = conv.apply(&input.into_dyn().view());
let delta = result - &expected;
let max_error = delta.into_iter().map(f32::abs).fold(0.0, f32::max);
dbg!(max_error);
assert!(max_error < 10.0 * f32::EPSILON);
}
#[test]
fn conv_test() {
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
// man of culture fixed randomness seed
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((120, 80, 3), Uniform::<f32>::new(-5., 5.), &mut rng);
let kernel = Array4::random_using((32, 5, 5, 3), Uniform::<f32>::new(-10., 10.), &mut rng);
let conv = Convolution::new(kernel, vec![120, 80, 3]);
let result = conv
.apply(&input.into_dyn().view())
.into_dimensionality::<Ix3>()
.unwrap();
assert_eq!(conv.output_shape(), vec![116, 76, 32]);
let (dim_x, dim_y, dim_z) = result.dim();
println!(
"# of parameters: {}\n
output dim: {}x{}x{}\n
# of multiplications: {}
{} output:\n
{}",
conv.num_params(),
dim_x,
dim_y,
dim_z,
conv.num_muls(),
conv.name(),
result
);
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/layers/flatten.rs | use ndarray::{Array1, ArrayD, ArrayViewD};
use serde::Serialize;
use super::{Layer, LayerJson};
#[derive(Clone, Serialize)]
pub struct Flatten {
name: String,
input_shape: Vec<usize>,
}
impl Flatten {
#[must_use]
pub fn new(input_shape: Vec<usize>) -> Flatten {
Flatten {
name: "flatten".into(),
input_shape,
}
}
}
impl Layer for Flatten {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
Array1::from_iter(input.iter().copied()).into_dyn()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
0
}
fn num_muls(&self) -> usize {
0
}
fn output_shape(&self) -> Vec<usize> {
let mut output_shape = 1;
for i in self.input_shape() {
output_shape *= i;
}
vec![output_shape]
}
fn to_json(&self) -> LayerJson {
LayerJson::Flatten {
input_shape: self.input_shape(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use ndarray::Array3;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
#[test]
fn flatten_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((27, 17, 32), Uniform::<f32>::new(-5.0, 5.0), &mut rng);
let flat = Flatten::new(vec![27, 17, 32]);
let output = flat.apply(&input.into_dyn().view());
let n_multiplications = flat.num_muls();
let n_params = flat.num_params();
assert_eq!(output.len(), 14688);
println!(
"
{} \n
# of parameters: {}\n
output dim: {} \n
# of ops: {}\n
output:\n
{}",
flat.name,
n_params,
output.len(),
n_multiplications,
output
);
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/layers/fully_connected.rs | use ndarray::{Array1, Array2, ArrayD, ArrayViewD, Ix1};
use serde::Serialize;
use super::{Layer, LayerJson};
#[derive(Clone, Serialize)]
pub struct FullyConnected {
weights: Array2<f32>,
biases: Array1<f32>,
name: String,
}
impl FullyConnected {
#[must_use]
pub fn new(weights: Array2<f32>, biases: Array1<f32>) -> FullyConnected {
FullyConnected {
weights,
biases,
name: "full".into(),
}
}
}
impl Layer for FullyConnected {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
assert!(input.ndim() == 1, "Input must be a flattenened array!");
assert!(
self.weights.shape()[1] == input.shape()[0],
"Input shapes must match (for the dot product to work)!"
);
assert!(
self.weights.shape()[0] == self.biases.shape()[0],
"Output shapes must match!"
);
let output = self
.weights
.dot(&input.clone().into_dimensionality::<Ix1>().unwrap())
+ self.biases.clone();
output.into_dyn()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
self.weights.len() + self.biases.len()
}
fn num_muls(&self) -> usize {
self.weights.len()
}
fn output_shape(&self) -> Vec<usize> {
assert!(
self.weights.shape()[0] == self.biases.shape()[0],
"Output shapes must match!"
);
vec![self.weights.shape()[0]]
}
fn input_shape(&self) -> Vec<usize> {
vec![self.weights.shape()[1]]
}
fn to_json(&self) -> LayerJson {
LayerJson::FullyConnected {
weights: self.weights.clone().into(),
biases: self.biases.clone().into(),
}
}
}
#[cfg(test)]
pub mod test {
use super::*;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
#[test]
fn fully_connected_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array1::random_using(14688, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let weights =
Array2::random_using((1000, 14688), Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let biases = Array1::random_using(1000, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let fully_connected = FullyConnected::new(weights, biases);
let output = fully_connected.apply(&input.into_dyn().view());
let n_params = fully_connected.num_params();
let n_multiplications = fully_connected.num_muls();
println!(
"
{}
# of parameters: {}
output dim: {}x1
# of ops: {}
output:\n{}",
fully_connected.name,
n_params,
output.len(),
n_multiplications,
output
);
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/layers/maxpool.rs | use ndarray::{s, Array3, ArrayD, ArrayViewD, Ix3};
use ndarray_stats::QuantileExt;
use serde::Serialize;
use super::{Layer, LayerJson};
#[derive(Clone, Serialize)]
pub struct MaxPool {
kernel_side: usize,
name: String,
input_shape: Vec<usize>,
}
impl MaxPool {
#[must_use]
pub fn new(kernel_side: usize, input_shape: Vec<usize>) -> MaxPool {
MaxPool {
name: "max-pool".into(),
kernel_side,
input_shape,
}
}
}
impl Layer for MaxPool {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
let input = input.clone().into_dimensionality::<Ix3>().unwrap();
let (h, w, c) = input.dim();
assert!(h % self.kernel_side == 0, "Height must be divisible by s!");
assert!(w % self.kernel_side == 0, "Width must be divisible by s!");
let mut output = Array3::<f32>::zeros((h / self.kernel_side, w / self.kernel_side, c));
// TODO: turn loops into iterators and parallelize with rayon or
// ndarray::parallel
// let h_iter = (0..h).into_par_iter().filter(|x| x % s == 0);
for i in (0..h).step_by(self.kernel_side) {
for j in (0..w).step_by(self.kernel_side) {
for k in 0..c {
let a = input.slice(s![i..i + self.kernel_side, j..j + self.kernel_side, k]);
// https://docs.rs/ndarray-stats/latest/ndarray_stats/trait.QuantileExt.html#tymethod.max
output[[i / self.kernel_side, j / self.kernel_side, k]] = *a.max().unwrap();
}
}
}
output.into_dyn()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
0
}
fn num_muls(&self) -> usize {
let mut muls = 1;
for i in self.input_shape() {
muls *= i;
}
muls
}
fn output_shape(&self) -> Vec<usize> {
let input_shape = self.input_shape();
let h = input_shape[0];
let w = input_shape[1];
let c = input_shape[2];
assert!(h % self.kernel_side == 0, "Height must be divisible by s!");
assert!(w % self.kernel_side == 0, "Width must be divisible by s!");
vec![w / self.kernel_side, h / self.kernel_side, c]
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn to_json(&self) -> LayerJson {
LayerJson::MaxPool {
window: self.kernel_side,
input_shape: self.input_shape(),
}
}
}
#[cfg(test)]
pub mod test {
use super::*;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
#[test]
fn maxpool_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((116, 76, 32), Uniform::<f32>::new(-5.0, 5.0), &mut rng);
let maxpool = MaxPool::new(2, vec![126, 76, 32]);
let output = maxpool
.apply(&input.into_dyn().view())
.into_dimensionality::<Ix3>()
.unwrap();
let n_params = maxpool.num_params();
let n_multiplications = maxpool.num_muls();
assert_eq!(output.dim(), (58, 38, 32));
let (dim_x, dim_y, dim_z) = output.dim();
println!(
"
{} \n
# of parameters: {}\n
output dim: {}x{}x{}\n
# of ops: {}\n
output:\n
{}",
maxpool.name, n_params, dim_x, dim_y, dim_z, n_multiplications, output
);
}
}
| https://github.com/worldcoin/proto-neural-zkp |
src/layers/mod.rs | use std::fmt::{Display, Formatter, Result};
use erased_serde::serialize_trait_object;
use ndarray::{ArcArray, ArrayD, ArrayViewD, Ix1, Ix2, Ix4};
use serde::{Deserialize, Serialize};
pub mod conv;
pub mod flatten;
pub mod fully_connected;
pub mod maxpool;
pub mod normalize;
pub mod relu;
pub trait Layer: erased_serde::Serialize {
#[must_use]
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32>;
fn input_shape(&self) -> Vec<usize>;
#[must_use]
fn name(&self) -> &str;
#[must_use]
fn num_params(&self) -> usize;
#[must_use]
fn num_muls(&self) -> usize;
fn output_shape(&self) -> Vec<usize>;
#[must_use]
fn to_json(&self) -> LayerJson;
fn box_clone(&self) -> Box<dyn Layer>;
}
serialize_trait_object!(Layer);
impl Display for Box<dyn Layer> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(
f,
"{:<20} | {:?}{:<5} | {:<5} | {:<5}",
self.name(),
self.output_shape(),
"",
self.num_params(),
self.num_muls(),
)
}
}
impl Clone for Box<dyn Layer> {
fn clone(&self) -> Self {
// self -> &Box<dyn Layer>
// *self -> Box<dyn Layer>
// **self -> dyn Layer
// &**self -> &dyn Layer
Layer::box_clone(&**self)
}
}
#[derive(Clone, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "layer_type")]
pub enum LayerJson {
Convolution {
kernel: ArcArray<f32, Ix4>,
input_shape: Vec<usize>,
},
MaxPool {
window: usize,
input_shape: Vec<usize>,
},
FullyConnected {
weights: ArcArray<f32, Ix2>,
biases: ArcArray<f32, Ix1>,
},
Relu {
input_shape: Vec<usize>,
},
Flatten {
input_shape: Vec<usize>,
},
Normalize {
input_shape: Vec<usize>,
},
}
#[derive(Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct NNJson {
pub layers: Vec<LayerJson>,
}
impl TryFrom<LayerJson> for Box<dyn Layer> {
type Error = ();
fn try_from(value: LayerJson) -> std::result::Result<Self, ()> {
Ok(match value {
LayerJson::Convolution {
kernel,
input_shape,
} => Box::new(conv::Convolution::new(kernel.to_owned(), input_shape)),
LayerJson::MaxPool {
window,
input_shape,
} => Box::new(maxpool::MaxPool::new(window.to_owned(), input_shape)),
LayerJson::FullyConnected { weights, biases } => Box::new(
fully_connected::FullyConnected::new(weights.to_owned(), biases.to_owned()),
),
LayerJson::Flatten { input_shape } => Box::new(flatten::Flatten::new(input_shape)),
LayerJson::Relu { input_shape } => Box::new(relu::Relu::new(input_shape)),
LayerJson::Normalize { input_shape } => {
Box::new(normalize::Normalize::new(input_shape))
}
})
}
}
impl FromIterator<LayerJson> for NNJson {
fn from_iter<T: IntoIterator<Item = LayerJson>>(iter: T) -> Self {
let mut nnvec = vec![];
for i in iter {
nnvec.push(i);
}
Self { layers: nnvec }
}
}
impl From<NeuralNetwork> for NNJson {
fn from(nn: NeuralNetwork) -> Self {
nn.layers.into_iter().map(|l| l.to_json()).collect()
}
}
impl TryFrom<NNJson> for NeuralNetwork {
type Error = ();
fn try_from(value: NNJson) -> std::result::Result<Self, ()> {
Ok(Self {
layers: value
.layers
.into_iter()
.map(|i| i.try_into().unwrap())
.collect(),
})
}
}
#[derive(Clone, Serialize)]
pub struct NeuralNetwork {
layers: Vec<Box<dyn Layer>>,
}
impl NeuralNetwork {
pub fn new() -> Self {
Self { layers: vec![] }
}
pub fn add_layer(&mut self, layer: Box<dyn Layer>) {
self.layers.push(layer);
}
pub fn apply(&self, input: &ArrayViewD<f32>, dim: usize) -> Option<ArrayD<f32>> {
if dim == 3 {
let mut output = input.view().into_owned();
for layer in &self.layers {
// TODO: add dimensionality sanity checks
output = layer.apply(&output.view());
println!("{}", layer);
}
Some(output)
} else {
None
}
}
}
impl Default for NeuralNetwork {
fn default() -> Self {
Self::new()
}
}
| https://github.com/worldcoin/proto-neural-zkp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.