text
stringlengths 1
2.05k
|
---|
use core::integer;
use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut;
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
HALF, ONE, TWO, FP8x23W, FP8x23WImpl, FP8x23WAdd, FP8x23WSub, FP8x23WMul, FP8x23WDiv,
FP8x23WIntoFelt252, FixedTrait
};
const TWO_PI: u64 = 52707178;
const PI: u64 = 26353589;
const HALF_PI: u64 = 13176795;
fn acos(a: FP8x23W) -> FP8x23W {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt();
let asin_res = asin(asin_arg);
if (a.sign) {
FixedTrait::new(PI, false) - asin_res
} else {
asin_res
}
}
fn acos_fast(a: FP8x23W) -> FP8x23W {
let asin_arg = (FixedTrait::ONE() - a * a).sqrt();
let asin_res = asin_fast(asin_arg);
if (a.sign) {
FixedTrait::new(PI, false) - asin_res
} else {
asin_res
}
}
fn asin(a: FP8x23W) -> FP8x23W {
if (a.mag == ONE) {
return FixedTrait::new(HALF_PI, a.sign);
}
let div = (FixedTrait::ONE() - a * a).sqrt();
atan(a / div)
}
fn asin_fast(a: FP8x23W) -> FP8x23W {
if (a.mag == ONE) {
return FixedTrait::new(HALF_PI, a.sign);
}
let div = (FixedTrait::ONE() - a * a).sqrt();
atan_fast(a / div)
}
fn atan(a: FP8x23W) -> FP8x23W {
let mut at = a.abs();
let mut shift = false;
let mut invert = false;
if (at.mag > ONE) {
at = FixedTrait::ONE() / at;
invert = true;
}
if (at.mag > 5872026) {
let sqrt3_3 = FixedTrait::new(4843165, false);
at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3);
shift = true;
}
let r10 = FixedTrait::new(15363, true) * at;
let r9 = (r10 + FixedTrait::new(392482, true)) * at;
let r8 = (r9 + FixedTrait::new(1629064, false)) * at;
let r7 = (r8 + FixedTrait::new(2197820, true)) * at;
let r6 = (r7 + FixedTrait::new(366693, false)) * at;
let r5 = (r6 + FixedTrait::new(1594324, false)) * at;
let r4 = (r5 + FixedTrait::new(11519, false)) * at;
let r3 = (r4 + Fixed |
Trait::new(2797104, true)) * at;
let r2 = (r3 + FixedTrait::new(34, false)) * at;
let mut res = (r2 + FixedTrait::new(8388608, false)) * at;
if (shift) {
res = res + FixedTrait::new(4392265, false);
}
if (invert) {
res = res - FixedTrait::new(HALF_PI, false);
}
FixedTrait::new(res.mag, a.sign)
}
fn atan_fast(a: FP8x23W) -> FP8x23W {
let mut at = a.abs();
let mut shift = false;
let mut invert = false;
if (at.mag > ONE) {
at = FixedTrait::ONE() / at;
invert = true;
}
if (at.mag > 5872026) {
let sqrt3_3 = FixedTrait::new(4843165, false);
at = (at - sqrt3_3) / (FixedTrait::ONE() + at * sqrt3_3);
shift = true;
}
let (start, low, high) = lut::atan(at.mag);
let partial_step = FixedTrait::new(at.mag - start, false) / FixedTrait::new(58720, false);
let mut res = partial_step * FixedTrait::new(high - low, false) + FixedTrait::new(low, false);
if (shift) {
res = res + FixedTrait::new(4392265, false);
}
if (invert) {
res = res - FixedTrait::<FP8x23W>::new(HALF_PI, false);
}
FixedTrait::new(res.mag, a.sign)
}
fn cos(a: FP8x23W) -> FP8x23W {
sin(FixedTrait::new(HALF_PI, false) - a)
}
fn cos_fast(a: FP8x23W) -> FP8x23W {
sin_fast(FixedTrait::new(HALF_PI, false) - a)
}
fn sin(a: FP8x23W) -> FP8x23W {
let a1 = a.mag % TWO_PI;
let (whole_rem, partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI));
let a2 = FixedTrait::new(partial_rem, false);
let partial_sign = whole_rem == 1;
let loop_res = a2 * _sin_loop(a2, 7, FixedTrait::ONE());
FixedTrait::new(loop_res.mag, a.sign ^ partial_sign && loop_res.mag != 0)
}
fn sin_fast(a: FP8x23W) -> FP8x23W {
let a1 = a.mag % TWO_PI;
let (whole_rem, mut partial_rem) = integer::u64_safe_divmod(a1, integer::u64_as_non_zero(PI));
let partial_sign = whole_rem == 1;
if partial_rem >= HALF_PI {
partial_rem = PI - partial_rem;
} |
let (start, low, high) = lut::sin(partial_rem);
let partial_step = FixedTrait::new(partial_rem - start, false) / FixedTrait::new(51472, false);
let res = partial_step * (FixedTrait::new(high, false) - FixedTrait::new(low, false))
+ FixedTrait::<FP8x23W>::new(low, false);
FixedTrait::new(res.mag, a.sign ^ partial_sign && res.mag != 0)
}
fn tan(a: FP8x23W) -> FP8x23W {
let sinx = sin(a);
let cosx = cos(a);
assert(cosx.mag != 0, 'tan undefined');
sinx / cosx
}
fn tan_fast(a: FP8x23W) -> FP8x23W {
let sinx = sin_fast(a);
let cosx = cos_fast(a);
assert(cosx.mag != 0, 'tan undefined');
sinx / cosx
}
fn _sin_loop(a: FP8x23W, i: u64, acc: FP8x23W) -> FP8x23W {
let div = (2 * i + 2) * (2 * i + 3);
let term = a * a * acc / FixedTrait::new_unscaled(div, false);
let new_acc = FixedTrait::ONE() - term;
if (i == 0) {
return new_acc;
}
_sin_loop(a, i - 1, new_acc)
}
mod tests {
use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{
assert_precise, assert_relative
};
use super::{
FixedTrait, acos, HALF_PI, ONE, acos_fast, PI, atan_fast, atan, asin, cos, cos_fast, sin,
sin_fast, tan
}; |
fn test_acos() {
let error = Option::Some(84);
let a = FixedTrait::ONE();
assert(acos(a).into() == 0, 'invalid one');
let a = FixedTrait::new(ONE / 2, false);
assert_relative(acos(a), 8784530, 'invalid half', error);
let a = FixedTrait::ZERO();
assert_relative(acos(a), HALF_PI.into(), 'invalid zero', Option::None(()));
let a = FixedTrait::new(ONE / 2, true);
assert_relative(acos(a), 17569060, 'invalid neg half', error);
let a = FixedTrait::new(ONE, true);
assert_relative(acos(a), PI.into(), 'invalid neg one', Option::None(()));
} |
fn test_acos_fast() {
let error = Option::Some(84);
let a = FixedTrait::ONE();
assert(acos_fast(a).into() == 0, 'invalid one');
let a = FixedTrait::new(ONE / 2, false);
assert_relative(acos_fast(a), 8784530, 'invalid half', error);
let a = FixedTrait::ZERO();
assert_relative(acos_fast(a), HALF_PI.into(), 'invalid zero', Option::None(()));
let a = FixedTrait::new(ONE / 2, true);
assert_relative(acos_fast(a), 17569060, 'invalid neg half', error);
let a = FixedTrait::new(ONE, true);
assert_relative(acos_fast(a), PI.into(), 'invalid neg one', Option::None(()));
} |
fn test_acos_fail() {
let a = FixedTrait::new(2 * ONE, true);
acos(a);
} |
fn test_atan_fast() {
let error = Option::Some(84);
let a = FixedTrait::new(2 * ONE, false);
assert_relative(atan_fast(a), 9287437, 'invalid two', error);
let a = FixedTrait::ONE();
assert_relative(atan_fast(a), 6588397, 'invalid one', error);
let a = FixedTrait::new(ONE / 2, false);
assert_relative(atan_fast(a), 3889358, 'invalid half', error);
let a = FixedTrait::ZERO();
assert(atan_fast(a).into() == 0, 'invalid zero');
let a = FixedTrait::new(ONE / 2, true);
assert_relative(atan_fast(a), -3889358, 'invalid neg half', error);
let a = FixedTrait::new(ONE, true);
assert_relative(atan_fast(a), -6588397, 'invalid neg one', error);
let a = FixedTrait::new(2 * ONE, true);
assert_relative(atan_fast(a), -9287437, 'invalid neg two', error);
} |
fn test_atan() {
let a = FixedTrait::new(2 * ONE, false);
assert_relative(atan(a), 9287437, 'invalid two', Option::None(()));
let a = FixedTrait::ONE();
assert_relative(atan(a), 6588397, 'invalid one', Option::None(()));
let a = FixedTrait::new(ONE / 2, false);
assert_relative(atan(a), 3889358, 'invalid half', Option::None(()));
let a = FixedTrait::ZERO();
assert(atan(a).into() == 0, 'invalid zero');
let a = FixedTrait::new(ONE / 2, true);
assert_relative(atan(a), -3889358, 'invalid neg half', Option::None(()));
let a = FixedTrait::new(ONE, true);
assert_relative(atan(a), -6588397, 'invalid neg one', Option::None(()));
let a = FixedTrait::new(2 * ONE, true);
assert_relative(atan(a), -9287437, 'invalid neg two', Option::None(()));
} |
fn test_asin() {
let error = Option::Some(84);
let a = FixedTrait::ONE();
assert_relative(asin(a), HALF_PI.into(), 'invalid one', Option::None(()));
let a = FixedTrait::new(ONE / 2, false);
assert_relative(asin(a), 4392265, 'invalid half', error);
let a = FixedTrait::ZERO();
assert_precise(asin(a), 0, 'invalid zero', Option::None(()));
let a = FixedTrait::new(ONE / 2, true);
assert_relative(asin(a), -4392265, 'invalid neg half', error);
let a = FixedTrait::new(ONE, true);
assert_relative(asin(a), -HALF_PI.into(), 'invalid neg one', Option::None(()));
} |
fn test_asin_fail() {
let a = FixedTrait::new(2 * ONE, false);
asin(a);
} |
fn test_cos() {
let a = FixedTrait::new(HALF_PI, false);
assert(cos(a).into() == 0, 'invalid half pi');
let a = FixedTrait::new(HALF_PI / 2, false);
assert_relative(
cos(a), 5931642, 'invalid quarter pi', Option::None(())
);
let a = FixedTrait::new(PI, false);
assert_relative(cos(a), -1 * ONE.into(), 'invalid pi', Option::None(()));
let a = FixedTrait::new(HALF_PI, true);
assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(()));
let a = FixedTrait::new_unscaled(17, false);
assert_relative(cos(a), -2308239, 'invalid 17', Option::None(()));
let a = FixedTrait::new_unscaled(17, true);
assert_relative(cos(a), -2308236, 'invalid -17', Option::None(()));
} |
fn test_cos_fast() {
let error = Option::Some(84);
let a = FixedTrait::new(HALF_PI, false);
assert(cos_fast(a).into() == 0, 'invalid half pi');
let a = FixedTrait::new(HALF_PI / 2, false);
assert_precise(cos_fast(a), 5931642, 'invalid quarter pi', error);
let a = FixedTrait::new(PI, false);
assert_precise(cos_fast(a), -1 * ONE.into(), 'invalid pi', error);
let a = FixedTrait::new(HALF_PI, true);
assert_precise(cos(a), 0, 'invalid neg half pi', Option::None(()));
let a = FixedTrait::new_unscaled(17, false);
assert_precise(cos_fast(a), -2308239, 'invalid 17', error);
} |
fn test_sin() {
let a = FixedTrait::new(HALF_PI, false);
assert_precise(sin(a), ONE.into(), 'invalid half pi', Option::None(()));
let a = FixedTrait::new(HALF_PI / 2, false);
assert_precise(
sin(a), 5931642, 'invalid quarter pi', Option::None(())
);
let a = FixedTrait::new(PI, false);
assert(sin(a).into() == 0, 'invalid pi');
let a = FixedTrait::new(HALF_PI, true);
assert_precise(
sin(a), -ONE.into(), 'invalid neg half pi', Option::None(())
);
let a = FixedTrait::new_unscaled(17, false);
assert_precise(sin(a), -8064787, 'invalid 17', Option::None(()));
let a = FixedTrait::new_unscaled(17, true);
assert_precise(sin(a), 8064787, 'invalid -17', Option::None(()));
} |
fn test_sin_fast() {
let error = Option::Some(84);
let a = FixedTrait::new(HALF_PI, false);
assert_precise(sin_fast(a), ONE.into(), 'invalid half pi', error);
let a = FixedTrait::new(HALF_PI / 2, false);
assert_precise(sin_fast(a), 5931642, 'invalid quarter pi', error);
let a = FixedTrait::new(PI, false);
assert(sin_fast(a).into() == 0, 'invalid pi');
let a = FixedTrait::new(HALF_PI, true);
assert_precise(
sin_fast(a), -ONE.into(), 'invalid neg half pi', error
);
let a = FixedTrait::new_unscaled(17, false);
assert_precise(sin_fast(a), -8064787, 'invalid 17', error);
let a = FixedTrait::new_unscaled(17, true);
assert_precise(sin_fast(a), 8064787, 'invalid -17', error);
} |
fn test_tan() {
let a = FixedTrait::new(HALF_PI / 2, false);
assert_precise(tan(a), ONE.into(), 'invalid quarter pi', Option::None(()));
let a = FixedTrait::new(PI, false);
assert_precise(tan(a), 0, 'invalid pi', Option::None(()));
let a = FixedTrait::new_unscaled(17, false);
assert_precise(tan(a), 29309069, 'invalid 17', Option::None(()));
let a = FixedTrait::new_unscaled(17, true);
assert_precise(tan(a), -29309106, 'invalid -17', Option::None(()));
}
} |
use core::integer;
const HALF_PRIME: felt252 =
1809251394333065606848661391547535052811553607665798349986546028067936010240;
// Returns the sign of a signed `felt252` as with signed magnitude representation
// true = negative
// false = positive
fn felt_sign(a: felt252) -> bool {
integer::u256_from_felt252(a) > integer::u256_from_felt252(HALF_PRIME)
}
// Returns the absolute value of a signed `felt252`
fn felt_abs(a: felt252) -> felt252 {
let a_sign = felt_sign(a);
if a_sign {
return a * -1;
} else {
return a * 1;
}
}
#[cfg(test)]
mod tests {
use super::{felt_sign, felt_abs};
#[test]
fn test_sign() {
let min = -1809251394333065606848661391547535052811553607665798349986546028067936010240;
let max = 1809251394333065606848661391547535052811553607665798349986546028067936010240;
assert(felt_sign(min), 'invalid result');
assert(felt_sign(-1), 'invalid result');
assert(!felt_sign(0), 'invalid result');
assert(!felt_sign(1), 'invalid result');
assert(!felt_sign(max), 'invalid result');
}
#[test]
fn test_abs() {
assert(felt_abs(5) == 5, 'abs of pos should be pos');
assert(felt_abs(-5) == 5, 'abs of neg should be pos');
assert(felt_abs(0) == 0, 'abs of 0 should be 0');
}
}
|
mod tensor;
mod nn;
mod ml;
mod matrix;
mod vec;
mod sequence;
|
use orion::numbers::NumberTrait;
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
struct MutMatrix<T> {
data: NullableVec<T>,
rows: usize,
cols: usize,
}
impl MutMatrixDestruct<T, +Drop<T>> of Destruct<MutMatrix<T>> {
fn destruct(self: MutMatrix<T>) nopanic {
self.data.destruct()
}
}
impl MutMatrixImpl<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>
> of MutMatrixTrait<T> {
fn new(rows: usize, cols: usize) -> MutMatrix<T> {
MutMatrix { data: NullableVecImpl::new(), rows: rows, cols: cols }
}
fn get(ref self: MutMatrix<T>, row: usize, col: usize) -> Option<T> {
if row >= self.rows || col >= self.cols {
Option::None
} else {
self.data.get(row * self.cols + col)
}
}
fn at(ref self: MutMatrix<T>, row: usize, col: usize) -> T {
match self.get(row, col) {
Option::Some(val) => val,
Option::None => NumberTrait::zero(),
}
}
fn matrix_vector_product<+Mul<T>, +Add<T>, +Div<T>, +AddEq<T>>(
ref self: MutMatrix<T>, ref vec: NullableVec<T>
) -> NullableVec<T> {
assert(self.cols == vec.len, 'wrong matrix shape for dot');
let m = self.rows;
let n = self.cols;
let mut result_vec = VecTrait::new();
let mut i = 0_usize;
while i != m {
let mut sum: T = NumberTrait::zero();
let mut k = 0_usize;
while k != n {
sum += MutMatrixImpl::at(ref self, i, k) * VecTrait::at(ref vec, k);
k += 1;
};
VecTrait::set(ref result_vec, i, sum);
i += 1;
};
result_vec
} |
fn set(ref self: MutMatrix<T>, row: usize, col: usize, value: T) {
if row < self.rows && col < self.cols {
let index = row * self.cols + col;
self.data.set(index, value)
}
}
fn shape(self: MutMatrix<T>) -> (usize, usize) {
(self.rows, self.cols)
}
fn argmax(ref self: MutMatrix<T>, axis: usize) -> Span<usize> {
assert(axis < 2, 'Invalid axis');
let mut result: Array<usize> = ArrayTrait::new();
if axis == 0 {
let mut col: usize = 0;
while col != self.cols {
let mut max_value = self.get(0, col);
let mut max_value = match max_value {
Option::Some => { max_value.unwrap() },
Option::None => { NumberTrait::min_value() }
};
let mut max_index = 0;
let mut row: usize = 1;
while row != self.rows {
let mut value = self.get(row, col);
let mut value = match value {
Option::Some => { value.unwrap() },
Option::None => { NumberTrait::min_value() }
};
if value > max_value {
max_value = value;
max_index = row;
}
row += 1;
};
result.append(max_index);
col += 1;
};
return result.span();
}
let mut row: usize = 0;
while row != self.rows {
let mut max_value = self.get(row, 0);
let mut max_value = match max_value {
Option::Some => { max_value.unwrap() },
Option::None => { NumberTrait::min_value() }
};
let mut max_index = 0;
let mut col: usize = 1;
while col != self.cols {
let mut value = self.get(row, col);
let mut value = match valu |
e {
Option::Some => { value.unwrap() },
Option::None => { NumberTrait::min_value() }
};
if value > max_value {
max_value = value;
max_index = col;
}
col += 1;
};
result.append(max_index);
row += 1;
};
result.span()
}
fn softmax<+AddEq<T>, +Div<T>>(ref self: MutMatrix<T>, axis: usize) -> MutMatrix<T> {
assert(axis < 2, 'Invalid axis');
let mut result = MutMatrixImpl::new(self.rows, self.cols);
if axis == 0 {
let mut col: usize = 0;
while col != self.cols {
let mut sum_exp = NumberTrait::zero();
let mut row: usize = 0;
while row != self.rows {
let value = self.get(row, col).unwrap().into();
sum_exp += value.exp();
row += 1;
};
row = 0;
while row != self.rows {
let value = self.get(row, col).unwrap().into();
let softmax_value = (value.exp() / sum_exp).into();
result.set(row, col, softmax_value);
row += 1;
};
col += 1;
};
} else {
let mut row: usize = 0;
while row != self.rows {
let mut sum_exp = NumberTrait::zero();
let mut col: usize = 0;
while col != self.cols {
let value = self.get(row, col).unwrap().into();
sum_exp += value.exp();
col += 1;
};
col = 0;
while col != self.cols {
let value = self.get(row, col).unwrap().into();
let softmax_value = (value.exp() / sum_exp).into();
result.set(row, col, softmax_value); |
col += 1;
};
row += 1;
};
}
result
}
fn softmax_zero<+AddEq<T>, +Div<T>, +PartialEq<T>>(
ref self: MutMatrix<T>, axis: usize
) -> MutMatrix<T> {
assert(axis < 2, 'Invalid axis');
let mut result = MutMatrixImpl::new(self.rows, self.cols);
if axis == 0 {
let mut col: usize = 0;
while col != self.cols {
let mut sum_exp = NumberTrait::zero();
let mut row: usize = 0;
while row != self.rows {
let value = self.get(row, col).unwrap().into();
if value != NumberTrait::zero() {
sum_exp += value.exp();
}
row += 1;
};
row = 0;
while row != self.rows {
let value = self.get(row, col).unwrap().into();
if value != NumberTrait::zero() {
let softmax_value = (value.exp() / sum_exp).into();
result.set(row, col, softmax_value);
} else {
result.set(row, col, NumberTrait::zero());
}
row += 1;
};
col += 1;
};
} else {
let mut row: usize = 0;
while row != self.rows {
let mut sum_exp = NumberTrait::zero();
let mut col: usize = 0;
while col != self.cols {
let value = self.get(row, col).unwrap().into();
if value != NumberTrait::zero() {
sum_exp += value.exp();
}
col += 1;
};
col = 0;
while col != self.cols {
let value = self.get(row, col).unwrap().into();
if value != NumberTrait::zero() {
let sof |
tmax_value = (value.exp() / sum_exp).into();
result.set(row, col, softmax_value);
} else {
result.set(row, col, NumberTrait::zero());
}
col += 1;
};
row += 1;
};
}
result
}
fn sigmoid<+Mul<T>, +Add<T>, +Div<T>>(ref self: MutMatrix<T>) -> MutMatrix<T> {
let mut result = MutMatrixImpl::new(self.rows, self.cols);
let mut row: usize = 0;
while row != self.rows {
let mut col: usize = 0;
while col != self.cols {
let value = self.get(row, col);
if value.is_some() {
let value = NumberTrait::one()
/ (NumberTrait::one() + (value.unwrap() * NumberTrait::neg_one()).exp());
result.set(row, col, value);
}
col += 1;
};
row += 1;
};
result
}
} |
mod tree_ensemble;
mod linear;
mod svm;
mod normalizer;
use orion::operators::ml::tree_ensemble::core::{
TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES
};
use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{
TreeEnsembleClassifier, TreeEnsembleClassifierImpl, TreeEnsembleClassifierTrait
};
use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{
TreeEnsembleRegressor, TreeEnsembleRegressorImpl, TreeEnsembleRegressorTrait, AGGREGATE_FUNCTION
};
use orion::operators::ml::linear::linear_regressor::{
LinearRegressorTrait, LinearRegressorImpl, LinearRegressor
};
use orion::operators::ml::linear::linear_classifier::{
LinearClassifierTrait, LinearClassifierImpl, LinearClassifier
};
use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM};
#[derive(Copy, Drop)]
enum POST_TRANSFORM {
NONE,
SOFTMAX,
LOGISTIC,
SOFTMAXZERO,
PROBIT,
}
|
mod linear_regressor;
mod linear_classifier;
|
use core::array::ArrayTrait;
use core::array::SpanTrait;
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait};
use orion::operators::nn::{NNTrait, FP16x16NN};
use orion::operators::ml::POST_TRANSFORM;
struct LinearClassifier<T> {
classlabels: Option<Span<usize>>,
coefficients: Span<T>,
intercepts: Option<Span<T>>,
multi_class: usize,
post_transform: POST_TRANSFORM,
}
trait LinearClassifierTrait<T> {
fn predict(classifier: LinearClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>);
}
impl LinearClassifierImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
+Add<Tensor<T>>,
+NNTrait<T>
> of LinearClassifierTrait<T> {
fn predict(classifier: LinearClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>) {
let n: usize = classifier.coefficients.len() / *(X.shape).at(1);
let mut shape = ArrayTrait::<usize>::new();
shape.append(n);
shape.append(*(X.shape).at(1));
let mut coefficients = TensorTrait::new(shape.span(), classifier.coefficients);
let coefficients = coefficients.transpose(array![1, 0].span());
let mut scores = X.matmul(@coefficients);
mat |
ch classifier.intercepts {
Option::Some(intercepts) => {
let mut shape = ArrayTrait::<usize>::new();
shape.append(1);
shape.append(intercepts.len());
let intercepts = TensorTrait::new(shape.span(), intercepts);
scores = TensorTrait::add(scores, intercepts);
},
Option::None => {},
};
let (n_classes, classlabels) = match classifier.classlabels {
Option::Some(classlabels) => { (classlabels.len(), classlabels) },
Option::None => { (0, ArrayTrait::<usize>::new().span()) },
};
if *coefficients.shape.at(1) == 1 && n_classes == 2 {
let mut new_scores = array![];
loop {
match scores.data.pop_front() {
Option::Some(item) => {
new_scores.append(NumberTrait::neg(*item));
new_scores.append(*item);
},
Option::None => { break; },
}
};
scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span());
}
scores = match classifier.post_transform {
POST_TRANSFORM::NONE => { scores },
POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, Option::Some(1)) },
POST_TRANSFORM::LOGISTIC => { NNTrait::sigmoid(@scores) },
POST_TRANSFORM::SOFTMAXZERO => { NNTrait::softmax_zero(@scores, 1) },
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
let mut labels_list = array![];
if *scores.shape.at(1) > 1 {
let mut labels = scores.argmax(1, Option::None, Option::None);
loop {
match labels.data.pop_front() {
Option::Some(i) => {
labels_list.append(*classlabels[(*i).try_into().unwrap()]);
},
Option::None |
=> { break; }
};
};
} else {
let mut i = 0;
match classifier.post_transform {
POST_TRANSFORM::NONE => {
while i != scores
.data
.len() {
if *scores.data.at(i) >= NumberTrait::zero() {
labels_list.append(*classlabels[0]);
} else {
labels_list.append(0);
}
i += 1;
};
},
POST_TRANSFORM::SOFTMAX => {
while i != scores
.data
.len() {
if *scores.data.at(i) >= NumberTrait::half() {
labels_list.append(*classlabels[0]);
} else {
labels_list.append(0);
}
i += 1;
};
},
POST_TRANSFORM::LOGISTIC => {
while i != scores
.data
.len() {
if *scores.data.at(i) >= NumberTrait::half() {
labels_list.append(*classlabels[0]);
} else {
labels_list.append(0);
}
i += 1;
};
},
POST_TRANSFORM::SOFTMAXZERO => {
while i != scores
.data
.len() {
if *scores.data.at(i) >= NumberTrait::half() {
labels_list.append(*classlabels[0]);
} else {
labels_list.append(0);
} |
i += 1;
};
},
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
}
(labels_list.span(), scores)
}
}
fn max(a: usize, b: usize) -> usize {
if a > b {
a
} else {
b
}
} |
use core::array::ArrayTrait;
use core::clone::Clone;
use core::traits::Into;
use core::array::SpanTrait;
use core::dict::Felt252DictTrait;
use core::dict::Felt252DictEntryTrait;
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd};
use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait};
use core::debug::PrintTrait;
use orion::operators::nn::{NNTrait, FP16x16NN};
use orion::operators::ml::POST_TRANSFORM;
struct LinearRegressor<T> {
coefficients: Span<T>,
intercepts: Option<Span<T>>,
target: usize,
post_transform: POST_TRANSFORM,
}
trait LinearRegressorTrait<T> {
fn predict(regressor: LinearRegressor<T>, X: Tensor<T>) -> Tensor<T>;
}
impl LinearRegressorImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
+Add<Tensor<T>>,
+NNTrait<T>,
> of LinearRegressorTrait<T> {
fn predict(regressor: LinearRegressor<T>, X: Tensor<T>) -> Tensor<T> {
let n: usize = regressor.coefficients.len() / regressor.target;
let mut shape = ArrayTrait::<usize>::new();
shape.append(regressor.targe |
t);
shape.append(n);
let mut coefficients = TensorTrait::new(shape.span(), regressor.coefficients);
let coefficients = coefficients.transpose(array![1, 0].span());
let mut score = X.matmul(@coefficients);
match regressor.intercepts {
Option::Some(intercepts) => {
let mut shape: Array<usize> = array![];
shape.append(1);
shape.append(intercepts.len());
let intercepts = TensorTrait::new(shape.span(), intercepts);
score = TensorTrait::add(score, intercepts);
},
Option::None => {},
};
let score = match regressor.post_transform {
POST_TRANSFORM::NONE => score,
POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)),
POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score),
POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1),
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
score
}
} |
mod normalizer;
|
use core::array::ArrayTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
enum NORM {
MAX,
L1,
L2,
}
trait NormalizerTrait<T> {
fn predict(X: Tensor<T>, norm: NORM) -> Tensor<T>;
}
impl NormalizerImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+TensorTrait<T>,
+AddEq<T>,
+Div<Tensor<T>>,
+Mul<T>
> of NormalizerTrait<T> {
fn predict(X: Tensor<T>, norm: NORM) -> Tensor<T> {
assert(X.shape.len() == 2, 'input should be 2D: NxC');
let normalized_tensor = match norm {
NORM::MAX => { norm_max(X) },
NORM::L1 => { norm_l1(X) },
NORM::L2 => { norm_l2(X) },
};
return normalized_tensor;
}
}
fn norm_max<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+PartialOrd<T>,
+Div<Tensor<T>>,
>(
X: Tensor<T>
) -> Tensor<T> {
let div_data = reduce_max_2D_axis_1(X.abs());
let div = TensorTrait::new(
array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data
);
let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span());
let safe_div = TensorTrait::max(tensors: array![div, epsillon].span());
return X / safe_div;
}
fn norm_l1<
T,
MAG,
+Drop<T>,
+Copy<T>,
+AddEq<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+PartialOrd<T>,
+Div<Tensor<T>>,
>(
X: Tensor<T>
) -> Tensor<T> {
let div_data = reduce_sum_2D_axis_1(X.abs());
let div = TensorTrait::new(
array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data
);
let epsi |
llon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span());
let safe_div = TensorTrait::max(tensors: array![div, epsillon].span());
return X / safe_div;
}
fn norm_l2<
T,
MAG,
+Drop<T>,
+Copy<T>,
+AddEq<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+PartialOrd<T>,
+Div<Tensor<T>>,
+Mul<T>
>(
X: Tensor<T>
) -> Tensor<T> {
let div_data = reduce_sum_2D_axis_1(square(X));
let div = TensorTrait::new(
array![*X.shape.at(0), (div_data.len() / *X.shape.at(0))].span(), div_data
);
let epsillon = TensorTrait::new(array![1, 1].span(), array![NumberTrait::from_felt(1)].span());
let safe_div = TensorTrait::max(tensors: array![div.sqrt(), epsillon].span());
return X / safe_div;
}
fn reduce_max_2D_axis_1<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>,
>(
X: Tensor<T>
) -> Span<T> {
let mut new_data = ArrayTrait::new();
let N = *X.shape.at(0);
let C = *X.shape.at(1);
let mut i = 0;
while i != N {
let max = max(SpanTrait::slice(X.data, i * C, C));
new_data.append(max);
i += 1;
};
return new_data.span();
}
fn max<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>,>(
mut a: Span<T>
) -> T {
assert(a.len() > 0, 'span cannot be empty');
let mut max = *a.at(0);
loop {
match a.pop_front() {
Option::Some(v) => { if *v > max {
max = *v;
}; },
Option::None => { break max; }
};
}
}
fn sum<T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>,>(mut a: Span<T>) -> T {
assert(a.len() > 0, 'span cannot be empty');
let mut sum = NumberTrait::zero();
loop {
match a.pop_front() {
Option::Some(v) => { sum += *v; },
Option::None => { break sum; }
};
}
}
fn square<
T,
MAG,
+Drop<T>,
+Copy<T>,
+AddEq<T>,
+NumberTrait<T, MAG>,
+Tenso |
rTrait<T>,
+PartialOrd<T>,
+Mul<T>
>(
mut a: Tensor<T>
) -> Tensor<T> {
let mut arr = ArrayTrait::new();
loop {
match a.data.pop_front() {
Option::Some(v) => { arr.append(*v * *v); },
Option::None => { break TensorTrait::new(a.shape, arr.span()); }
};
}
}
fn reduce_sum_2D_axis_1<
T, MAG, +Drop<T>, +Copy<T>, +AddEq<T>, +NumberTrait<T, MAG>, +TensorTrait<T>,
>(
X: Tensor<T>
) -> Span<T> {
let mut new_data = ArrayTrait::new();
let N = *X.shape.at(0);
let C = *X.shape.at(1);
let mut i = 0;
while i != N {
let sum = sum(SpanTrait::slice(X.data, i * C, C));
new_data.append(sum);
i += 1;
};
return new_data.span();
} |
mod core;
mod svm_regressor;
mod svm_classifier;
|
use orion::numbers::NumberTrait;
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::utils::get_row;
#[derive(Copy, Drop)]
enum KERNEL_TYPE {
LINEAR,
POLY,
RBF,
SIGMOID,
}
fn kernel_dot<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
>(
kernel_params: Span<T>, pA: Span<T>, pB: Span<T>, kernel: KERNEL_TYPE
) -> T {
let s = match kernel {
KERNEL_TYPE::LINEAR => sv_dot(pA, pB),
KERNEL_TYPE::POLY => {
let mut s = sv_dot(pA, pB);
s = s * *kernel_params.at(0) + *kernel_params.at(1);
s.pow(*kernel_params.at(2))
},
KERNEL_TYPE::RBF => {
let mut s = squared_diff(pA, pB);
NumberTrait::exp(-*kernel_params.at(0) * s)
},
KERNEL_TYPE::SIGMOID => {
let mut s = sv_dot(pA, pB);
s = s * *kernel_params.at(0) + *kernel_params.at(1);
NumberTrait::tanh(s)
},
};
s
}
fn sv_dot<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>,
>(
pA: Span<T>, pB: Span<T>
) -> T {
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != pA.len() {
sum = sum + *pA.at(i) * *pB.at(i);
i += 1;
};
sum
}
fn squared_diff<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Sub<T>,
>(
pA: Span<T>, pB: Span<T>
) -> T {
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != pA.len() {
sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one());
i += 1;
};
sum
}
|
use core::array::ArrayTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
use orion::operators::matrix::{MutMatrix, MutMatrixImpl};
use orion::numbers::{FP64x64, FP64x64Impl};
use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor};
use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN};
use orion::utils::get_row;
use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE};
use orion::operators::ml::POST_TRANSFORM;
struct SVMClassifier<T> {
classlabels: Span<usize>,
coefficients: Span<T>,
kernel_params: Span<T>,
kernel_type: KERNEL_TYPE,
post_transform: POST_TRANSFORM,
prob_a: Span<T>,
prob_b: Span<T>,
rho: Span<T>,
support_vectors: Span<T>,
vectors_per_class: Option<Span<usize>>,
}
enum MODE {
SVM_LINEAR,
SVM_SVC,
}
trait SVMClassifierTrait<T> { |
fn predict(ref self: SVMClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>);
}
impl SVMClassifierImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
+NNTrait<T>,
> of SVMClassifierTrait<T> {
fn predict(ref self: SVMClassifier<T>, X: Tensor<T>) -> (Span<usize>, Tensor<T>) {
let mut vector_count_ = 0;
let class_count_ = max(self.classlabels.len(), 1);
let mut starting_vector_: Array<usize> = array![];
let (vectors_per_class_, starting_vector_) = match self.vectors_per_class {
Option::Some(vectors_per_class) => {
let mut i = 0;
while i != vectors_per_class.len() {
starting_vector_.append(vector_count_);
vector_count_ += *vectors_per_class.at(i);
i += 1;
};
(vectors_per_class, starting_vector_.span())
},
Option::None => { (array![].span(), array![].span()) },
};
let (mode, kernel_type_, sv, coefs) = if vector_count_ > 0 {
let mode = MODE::SVM_SVC;
let kernel_type_ = self.kernel_type;
let sv = TensorTrait::new(
array![vector_count_, self.support_vectors.len() / vector_count_].span(),
self.support_vectors
);
let coefs = TensorTrait::new(
array![self.coefficients.len() / vector_count_, vector_count_].span(),
self.coefficients
);
(mode, kernel_type_, sv, coefs)
} else {
let mode = MODE::SVM_LINEAR;
let kernel_type_ = KERNEL_TYPE::LINEAR;
let sv = TensorTrait::new(
array![self.support_vectors.len()].span(), self.support_vectors
);
let coefs = TensorTrait:: |
new(
array![class_count_, self.coefficients.len() / class_count_].span(),
self.coefficients
);
(mode, kernel_type_, sv, coefs)
};
let weights_are_all_positive_ = (min(self.coefficients) >= NumberTrait::zero());
let (res, votes) = match mode {
MODE::SVM_LINEAR => {
let mut res: Array<T> = array![];
let mut n = 0;
while n != *X.shape.at(0) {
let mut x_n = get_row(@X, n);
let scores = run_linear(ref self, x_n, coefs, class_count_, kernel_type_);
let mut i = 0;
while i != scores.len() {
res.append(*scores.at(i));
i += 1;
};
n += 1;
};
(
TensorTrait::new(array![*X.shape.at(0), class_count_].span(), res.span()),
Option::None
)
},
MODE::SVM_SVC => {
let mut res: Array<T> = array![];
let mut votes: Array<T> = array![];
let mut n = 0;
while n != *X.shape.at(0) {
let mut x_n = get_row(@X, n);
let (scores, mut vote) = run_svm(
ref self,
x_n,
sv,
vector_count_,
kernel_type_,
class_count_,
starting_vector_,
coefs,
vectors_per_class_
);
let mut i = 0;
while i != scores.len() {
res.append(*scores.at(i));
i += 1;
};
let mut i = 0;
while i != vote.len() {
votes.append(vote.at(i)); |
i += 1;
};
n += 1;
};
(
TensorTrait::new(
array![*X.shape.at(0), class_count_ * (class_count_ - 1) / 2].span(),
res.span()
),
Option::Some(
TensorTrait::new(array![*X.shape.at(0), class_count_].span(), votes.span())
)
)
},
};
let (scores, has_proba) = match mode {
MODE::SVM_LINEAR => { (res, false) },
MODE::SVM_SVC => {
let (scores, has_proba) = if self.prob_a.len() > 0 {
let mut scores: Array<T> = array![];
let mut n = 0;
while n != *res.shape.at(0) {
let res_n = get_row(@res, n);
let mut s = probablities(ref self, res_n, class_count_);
let mut i = 0;
while i != s.len() {
scores.append(s.at(i));
i += 1;
};
n += 1;
};
(
TensorTrait::new(
array![*res.shape.at(0), scores.len() / *res.shape.at(0)].span(),
scores.span()
),
true
)
} else {
(res, false)
};
(scores, has_proba)
},
};
let mut labels: Array<usize> = array![];
let mut final_scores: Array<T> = array![];
let mut n = 0;
while n != *scores.shape.at(0) {
let mut scores_n = get_row(@scores, n);
match votes {
Option::Some(votes) => {
let mut votes_n = get_row(@votes, n);
let (label, new_scores) = c |
ompute_final_scores(
ref self,
votes_n,
scores_n,
weights_are_all_positive_,
has_proba,
self.classlabels
);
let mut i = 0;
while i != new_scores.data.len() {
final_scores.append(*new_scores.data.at(i));
i += 1;
};
labels.append(label);
},
Option::None => {
let (label, new_scores) = compute_final_scores(
ref self,
array![].span(),
scores_n,
weights_are_all_positive_,
has_proba,
self.classlabels
);
let mut i = 0;
while i != new_scores.data.len() {
final_scores.append(*new_scores.data.at(i));
i += 1;
};
labels.append(label);
},
}
n += 1;
};
let labels = labels.span();
if self.classlabels.len() > 0 {
let mut class_labels: Array<usize> = array![];
let mut i = 0;
while i != labels.len() {
class_labels.append(*self.classlabels.at(*labels.at(i)));
i += 1;
};
return (
class_labels.span(),
TensorTrait::new(
array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(),
final_scores.span()
)
);
}
(
labels,
TensorTrait::new(
array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(),
final_scores.span()
)
)
}
}
fn run_svm< |
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
+PartialOrd<T>,
>(
ref self: SVMClassifier<T>,
X: Span<T>,
sv: Tensor<T>,
vector_count_: usize,
kernel: KERNEL_TYPE,
class_count_: usize,
starting_vector_: Span<usize>,
coefs: Tensor<T>,
vectors_per_class_: Span<usize>
) -> (Array<T>, NullableVec<T>) {
let mut evals = 0;
let mut kernels: Array<T> = array![];
let mut j = 0;
while j != vector_count_ {
let sv_j = get_row(@sv, j);
kernels.append(kernel_dot(self.kernel_params, X, sv_j, kernel));
j += 1;
};
let kernels = kernels.span();
let mut scores: Array<T> = array![];
let mut votes = VecTrait::new();
VecTrait::set(ref votes, class_count_ - 1, NumberTrait::zero());
let mut i = 0;
while i != class_count_ {
let si_i = *starting_vector_.at(i);
let class_i_sc = *vectors_per_class_.at(i);
let mut j = i + 1;
while j != class_count_ {
let si_j = *starting_vector_.at(j);
let class_j_sc = *vectors_per_class_.at(j);
let s1 = dot_start_end(
coefs.data,
kernels,
(j - 1) * *coefs.shape.at(0) + si_i,
(j - 1) * *coefs.shape.at(0) + si_i + class_i_sc,
si_i,
si_i + class_i_sc
);
let s2 = dot_start_end(
coefs.data,
kernels,
i * *coefs.shape.at(0) + si_j,
i * *coefs.shape.at(0) + si_j + class_j_sc,
si_j,
si_j + class_j_sc
);
let s = *self.rho.at(evals) + s1 + s2;
scores.append(s);
if s > NumberTrait::zero() {
VecTrait::set(ref votes, i, VecTrait::at(ref votes, i) + NumberTrait::one());
} else {
VecTrait::set(ref votes, j, VecTrait::at(ref votes, |
j) + NumberTrait::one());
}
evals += 1;
j += 1;
};
i += 1;
};
(scores, votes)
}
fn run_linear<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
>(
ref self: SVMClassifier<T>,
X: Span<T>,
coefs: Tensor<T>,
class_count_: usize,
kernel: KERNEL_TYPE
) -> Array<T> {
let mut scores: Array<T> = array![];
let mut j = 0;
while j != class_count_ {
let coefs_j = get_row(@coefs, j);
let d = kernel_dot(self.kernel_params, X, coefs_j, kernel);
let score = *self.rho.at(0) + d;
scores.append(score);
j += 1;
};
scores
}
fn compute_final_scores<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+NNTrait<T>,
+Into<usize, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
+Div<T>,
+PartialOrd<T>,
>(
ref self: SVMClassifier<T>,
votes: Span<T>,
scores: Span<T>,
weights_are_all_positive_: bool,
has_proba: bool,
classlabels: Span<usize>
) -> (usize, Tensor<T>) {
let (max_class, max_weight) = if votes.len() > 0 {
let max_class = argmax_span(votes);
let max_weight = *votes.at(max_class);
(max_class, max_weight)
} else {
let max_class = argmax_span(scores);
let max_weight = *scores.at(max_class);
(max_class, max_weight)
};
let (label, write_additional_scores) = if self.rho.len() == 1 {
let (label, write_additional_scores) = set_score_svm(
max_weight, max_class, weights_are_all_positive_, has_proba, classlabels, 1, 0
);
(label, write_additional_scores)
} else if classlabels.len() > 0 {
let label = *classlabels.at(max_class);
(label, 4)
} else {
(max_class, 4)
};
let new_scores = write_scores(
scores.len(),
TensorTrait::new(array![ |
scores.len()].span(), scores),
self.post_transform,
write_additional_scores
);
(label, new_scores)
}
fn write_scores<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+PartialOrd<T>,
+NNTrait<T>,
+Neg<T>,
+Sub<T>,
>(
n_classes: usize, scores: Tensor<T>, post_transform: POST_TRANSFORM, add_second_class: usize
) -> Tensor<T> {
let new_scores = if n_classes >= 2 {
let new_scores = match post_transform {
POST_TRANSFORM::NONE => scores,
POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@scores, Option::Some(0)),
POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@scores),
POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@scores, 0),
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
new_scores
} else {
let new_scores = match post_transform {
POST_TRANSFORM::NONE => {
let scores = if add_second_class == 0 || add_second_class == 1 {
TensorTrait::new(
array![2].span(),
array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span()
)
} else if add_second_class == 2 || add_second_class == 3 {
TensorTrait::new(
array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span()
)
} else {
TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span())
};
scores
},
POST_TRANSFORM::SOFTMAX => {
let scores = if add_second_class == 0 || add_second_class == 1 {
TensorTrait::new(
array![2].span(),
array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span()
)
} else if add_second_cla |
ss == 2 || add_second_class == 3 {
NNTrait::softmax(
@TensorTrait::new(
array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span()
),
Option::Some(0)
)
} else {
TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span())
};
scores
},
POST_TRANSFORM::LOGISTIC => {
let scores = if add_second_class == 0 || add_second_class == 1 {
TensorTrait::new(
array![2].span(),
array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span()
)
} else if add_second_class == 2 || add_second_class == 3 {
NNTrait::sigmoid(
@TensorTrait::new(
array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span()
)
)
} else {
TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span())
};
scores
},
POST_TRANSFORM::SOFTMAXZERO => {
let scores = if add_second_class == 0 || add_second_class == 1 {
TensorTrait::new(
array![2].span(),
array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span()
)
} else if add_second_class == 2 || add_second_class == 3 {
NNTrait::softmax_zero(
@TensorTrait::new(
array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span()
),
0
)
} else { |
TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span())
};
scores
},
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not applicable here.'),
};
new_scores
};
new_scores
}
fn set_score_svm<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +PartialOrd<T>,
>(
max_weight: T,
maxclass: usize,
weights_are_all_positive_: bool,
has_proba: bool,
classlabels: Span<usize>,
posclass: usize,
negclass: usize
) -> (usize, usize) {
let mut write_additional_scores = 0;
if classlabels.len() == 2 {
write_additional_scores = 2;
if !has_proba {
if weights_are_all_positive_ && max_weight >= NumberTrait::half() {
return (*classlabels.at(1), write_additional_scores);
};
};
return (*classlabels.at(maxclass), write_additional_scores);
}
if max_weight >= NumberTrait::zero() {
return (posclass, write_additional_scores);
};
(negclass, write_additional_scores)
}
fn argmax_span<T, +Drop<T>, +Copy<T>, +PartialOrd<T>,>(span: Span<T>) -> usize {
let mut max = 0;
let mut i = 0;
while i != span.len() {
if *span.at(i) > *span.at(max) {
max = i;
}
i += 1;
};
max
}
fn probablities<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Into<usize, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
+Div<T>,
+PartialOrd<T>,
>(
ref self: SVMClassifier<T>, scores: Span<T>, class_count_: usize
) -> NullableVec<T> {
let mut probsp2: MutMatrix<T> = MutMatrixImpl::new(class_count_, class_count_);
let mut index = 0;
let mut i = 0;
while i != class_count_ {
let mut j = i + 1;
while j != class_count_ {
let val1 = sigmoid_probability(
*scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) |
);
let mut val2 = NumberTrait::min(
val1, NumberTrait::one()
);
probsp2.set(i, j, val2);
probsp2.set(j, i, NumberTrait::one() - val2);
j += 1;
index += 1;
};
i += 1;
};
multiclass_probability(class_count_, ref probsp2)
}
fn multiclass_probability<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+Add<T>,
+Mul<T>,
+Div<T>,
+Sub<T>,
+Neg<T>,
+AddEq<T>,
+Into<usize, MAG>,
>(
k: usize, ref R: MutMatrix<T>
) -> NullableVec<T> {
let max_iter = max(100, k);
let k_fp = NumberTrait::<T>::new_unscaled(k.into(), false);
let mut Q: MutMatrix<T> = MutMatrixImpl::new(k, k);
MutMatrixImpl::set(ref Q, k - 1, k - 1, NumberTrait::zero());
let mut P = VecTrait::new();
VecTrait::set(ref P, k - 1, NumberTrait::zero());
let a: usize = 100;
let eps = (NumberTrait::half() / NumberTrait::new_unscaled(a.into(), false)) / k_fp;
let mut t = 0;
while t != k {
VecTrait::set(ref P, t, NumberTrait::one() / k_fp);
let mut i = 0;
let mut acc1 = NumberTrait::zero();
while i != t {
let r_i = MutMatrixImpl::at(ref R, i, t);
acc1 += r_i * r_i;
i += 1;
};
MutMatrixImpl::set(ref Q, t, t, acc1);
let mut i = 0;
while i != t {
MutMatrixImpl::set(ref Q, t, i, MutMatrixImpl::at(ref Q, i, t));
i += 1;
};
let mut i = t + 1;
let mut acc2 = NumberTrait::zero();
while i != k {
let r_i = MutMatrixImpl::at(ref R, i, t);
acc2 += r_i * r_i;
i += 1;
};
MutMatrixImpl::set(ref Q, t, t, acc1 + acc2);
let mut i = t + 1;
let mut acc = NumberTrait::zero();
while i != k {
acc += -MutMatrixImpl::at(ref R, i, t) * MutMatrixImpl::at(ref R, t, i);
i += 1;
};
let mut i |
= t + 1;
while i != k {
MutMatrixImpl::set(ref Q, t, i, acc);
i += 1;
};
t += 1;
};
let mut i = 0;
while i != max_iter {
let mut Qp = MutMatrixImpl::matrix_vector_product(ref Q, ref P);
let mut pQp = dot(ref P, ref Qp);
let mut max_error = NumberTrait::zero();
let mut t = 0;
while t != k {
let error = NumberTrait::abs(Qp.at(t) - pQp);
if error > max_error {
max_error = error;
}
t += 1;
};
if max_error < eps {
break;
}
let mut t = 0;
while t != k {
let diff = (-VecTrait::at(ref Qp, t) + pQp) / MutMatrixImpl::at(ref Q, t, t);
VecTrait::set(ref P, t, VecTrait::at(ref P, t) + diff);
pQp =
(pQp
+ diff
* (diff * MutMatrixImpl::at(ref Q, t, t)
+ (NumberTrait::one() + NumberTrait::one()) * VecTrait::at(ref Qp, t)))
/ ((NumberTrait::one() + diff) * (NumberTrait::one() + diff));
div_element_wise(ref P, NumberTrait::one() + diff);
Qp_computation(ref Q, ref Qp, diff, t);
t += 1;
};
i += 1;
};
P
}
fn Qp_computation<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+AddEq<T>
>(
ref Q: MutMatrix<T>, ref Qp: NullableVec<T>, diff: T, t: usize
) {
let m = Qp.len;
let mut i = 0_usize;
while i != m {
let elem = (VecTrait::at(ref Qp, i) + diff * MutMatrixImpl::at(ref Q, t, i))
/ (NumberTrait::one() + diff);
VecTrait::set(ref Qp, i, elem);
i += 1;
};
}
fn sigmoid_probability<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+Add<T>,
+Mul<T>,
+Div<T>,
+Sub<T>,
+Neg<T>,
>(
score: T, prob_a: T, prob_b: T
) - |
> T {
let val = score * prob_a + prob_b;
let mut v = NumberTrait::one()
/ (NumberTrait::one() + NumberTrait::exp(-NumberTrait::abs(val)));
v = if val < NumberTrait::zero() {
NumberTrait::one() - v
} else {
v
};
NumberTrait::one() - v
}
fn max(a: usize, b: usize) -> usize {
if a > b {
return a;
};
b
}
fn min<T, +Copy<T>, +Drop<T>, +PartialOrd<T>,>(a: Span<T>) -> T {
let mut min = *a.at(0);
let mut i = 0;
while i != a.len() {
if min > *a.at(i) {
min = *a.at(i);
}
i += 1;
};
min
}
fn dot_start_end<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>,
>(
pA: Span<T>, pB: Span<T>, a_start: usize, a_end: usize, b_start: usize, b_end: usize
) -> T {
let mut sum = NumberTrait::zero();
let mut index_a = a_start;
let mut index_b = b_start;
while index_a != a_end && index_b != b_end {
sum = sum + *pA.at(index_a) * *pB.at(index_b);
index_a += 1;
index_b += 1;
};
sum
}
fn sv_dot<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>,
>(
pA: Span<T>, pB: Span<T>
) -> T {
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != pA.len() {
sum = sum + *pA.at(i) * *pB.at(i);
i += 1;
};
sum
}
fn squared_diff<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+Add<T>,
+TensorTrait<T>,
+AddEq<T>,
+Mul<T>,
+Sub<T>,
>(
pA: Span<T>, pB: Span<T>
) -> T {
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != pA.len() {
sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one());
i += 1;
};
sum
}
fn dot<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Mul<T>, +AddEq<T>, +Add<T>, +Div<T>>(
ref self: NullableVec<T>, ref vec: NullableVec<T>
) -> T {
assert(self.len == vec.len, 'wrong vec len for dot prod |
');
let n = self.len;
let mut sum: T = NumberTrait::zero();
let mut i = 0_usize;
while i != n {
sum += self.at(i) * vec.at(i);
i += 1;
};
sum
}
fn div_element_wise<T, MAG, +Mul<T>, +Add<T>, +Div<T>, +NumberTrait<T, MAG>, +Drop<T>, +Copy<T>>(
ref self: NullableVec<T>, elem: T
) {
let m = self.len;
let mut i = 0_usize;
while i != m {
VecTrait::set(ref self, i, VecTrait::at(ref self, i) / elem);
i += 1;
};
} |
use core::traits::TryInto;
use core::array::ArrayTrait;
use core::array::SpanTrait;
use core::traits::Into;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
use core::debug::PrintTrait;
use orion::operators::nn::{NNTrait, FP16x16NN};
use orion::utils::get_row;
use orion::operators::ml::POST_TRANSFORM;
use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE};
struct SVMRegressor<T> {
coefficients: Span<T>,
kernel_params: Span<T>,
kernel_type: KERNEL_TYPE,
n_supports: usize,
one_class: usize,
post_transform: POST_TRANSFORM,
rho: Span<T>,
support_vectors: Span<T>,
}
enum MODE {
SVM_LINEAR,
SVM_SVC,
}
trait SVMRegressorTrait<T> {
fn predict(ref self: SVMRegressor<T>, X: Tensor<T>) -> Tensor<T>;
}
impl SVMRegressorImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
+Neg<T>,
+Sub<T>,
+NNTrait<T>,
> of SVMRegressorTrait<T> {
fn predict(ref self: SVMRegressor<T>, X: Tensor<T>) -> Tensor<T> {
let (mode_, kernel_type_, sv) = if self.n_supports > 0 {
let mode_ = MODE::SVM_SVC;
let kernel_type_ = self.kernel_type;
let sv = Ten |
sorTrait::new(
array![self.n_supports, self.support_vectors.len() / self.n_supports].span(),
self.support_vectors
);
(mode_, kernel_type_, sv)
} else {
let mode_ = MODE::SVM_LINEAR;
let kernel_type_ = KERNEL_TYPE::LINEAR;
let sv = TensorTrait::new(
array![self.support_vectors.len()].span(), self.support_vectors
);
(mode_, kernel_type_, sv)
};
let mut z: Array<T> = array![];
let mut n = 0;
while n != *X.shape.at(0) {
let mut s = NumberTrait::zero();
match mode_ {
MODE::SVM_LINEAR => {
let mut x_n = get_row(@X, n);
s = kernel_dot(self.kernel_params, x_n, self.coefficients, kernel_type_);
s += *self.rho.at(0);
},
MODE::SVM_SVC => {
let mut x_n = get_row(@X, n);
let mut j = 0;
while j != self.n_supports {
let mut sv_j = get_row(@sv, j);
let d = kernel_dot(self.kernel_params, x_n, sv_j, kernel_type_);
s += *self.coefficients.at(j) * d;
j += 1;
};
s += *self.rho.at(0);
},
}
if self.one_class == 1 {
let elem = if s > NumberTrait::zero() {
NumberTrait::one()
} else {
-NumberTrait::one()
};
z.append(elem);
} else {
z.append(s);
};
n += 1;
};
let mut score = TensorTrait::new(array![*X.shape.at(0)].span(), z.span());
score = match self.post_transform {
POST_TRANSFORM::NONE => score,
POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)),
POST_TRANSFORM:: |
LOGISTIC => NNTrait::sigmoid(@score),
POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1),
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
score
}
} |
mod core;
mod tree_ensemble_classifier;
mod tree_ensemble_regressor;
|
use alexandria_data_structures::array_ext::SpanTraitExt;
use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl};
use alexandria_data_structures::array_ext::ArrayTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor};
use orion::utils::get_row;
struct TreeEnsembleAttributes<T> {
nodes_falsenodeids: Span<usize>,
nodes_featureids: Span<usize>,
nodes_missing_value_tracks_true: Span<usize>,
nodes_modes: Span<NODE_MODES>,
nodes_nodeids: Span<usize>,
nodes_treeids: Span<usize>,
nodes_truenodeids: Span<usize>,
nodes_values: Span<T>,
}
struct TreeEnsemble<T> {
atts: TreeEnsembleAttributes<T>,
tree_ids: Span<usize>,
root_index: Felt252Dict<usize>,
node_index: Felt252Dict<usize>,
}
enum NODE_MODES {
BRANCH_LEQ,
BRANCH_LT,
BRANCH_GTE,
BRANCH_GT,
BRANCH_EQ,
BRANCH_NEQ,
LEAF
}
impl TreeEnsembleImpl<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>, +PartialEq<T>
> of TreeEnsembleTrait<T> {
fn leaf_index_tree(ref self: TreeEnsemble<T>, x: Span<T>, tree_id: usize) -> usize {
let mut index: usize = self.root_index.get(tree_id.into());
loop {
match *self.atts.nodes_modes.at(index) {
NODE_MODES::BRANCH_LEQ => {},
NODE_MODES::BRANCH_LT => {},
NODE_MODES::BRANCH_GTE => {},
NODE_MODES::BRANCH_GT => {},
NODE_MODES::BRANCH_EQ => {},
NODE_MODES::BRANCH_NEQ => {},
NODE_MODES::LEAF => { break; },
};
let x_value = *x.at(*(self.atts.nodes_featureids).at(index));
let r = if x_value.is_nan() {
*self.atts.nodes_missing_value_tracks_true.at(index) >= 1
} else {
match *self.atts.nodes_modes.at(index) {
NODE_MODES::BRANCH_LEQ => x_value <= *self.atts.nodes_values[index],
NODE_MODES::BRANCH_LT => x_value |
< *self.atts.nodes_values[index],
NODE_MODES::BRANCH_GTE => x_value >= *self.atts.nodes_values[index],
NODE_MODES::BRANCH_GT => x_value > *self.atts.nodes_values[index],
NODE_MODES::BRANCH_EQ => x_value == *self.atts.nodes_values[index],
NODE_MODES::BRANCH_NEQ => x_value != *self.atts.nodes_values[index],
NODE_MODES::LEAF => {
panic(array!['Unexpected rule for node index ', index.into()])
},
}
};
let nid = if r {
*self.atts.nodes_truenodeids[index]
} else {
*self.atts.nodes_falsenodeids[index]
};
let mut key = PedersenHasherImpl::new();
let key: felt252 = key.hash(tree_id.into(), nid.into());
index = self.node_index.get(key);
};
index
}
fn leave_index_tree(ref self: TreeEnsemble<T>, x: Tensor<T>) -> Tensor<usize> {
let mut outputs: Array<usize> = array![];
let mut i: usize = 0;
let breaker: usize = *x.shape[0];
while i != breaker {
let row_data: Span<T> = get_row(@x, i);
let mut outs: Array<usize> = array![];
let mut tree_ids = self.tree_ids;
loop {
match tree_ids.pop_front() {
Option::Some(tree_id) => {
outs
.append(
TreeEnsembleImpl::<T>::leaf_index_tree(ref self, row_data, *tree_id)
)
},
Option::None => { break; }
};
};
outputs.append_all(ref outs);
i += 1;
};
TensorTrait::new(array![*x.shape[0], self.tree_ids.len()].span(), outputs.span())
}
} |
use core::array::ArrayTrait;
use core::clone::Clone;
use core::box::BoxTrait;
use core::traits::Into;
use core::option::OptionTrait;
use orion::operators::matrix::MutMatrixTrait;
use core::array::SpanTrait;
use core::nullable::NullableTrait;
use core::dict::Felt252DictTrait;
use core::dict::Felt252DictEntryTrait;
use core::nullable::{match_nullable, FromNullableResult};
use orion::operators::tensor::{Tensor, TensorTrait};
use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait};
use orion::numbers::NumberTrait;
use orion::utils::get_row;
use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl};
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::operators::matrix::{MutMatrix, MutMatrixImpl};
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
use orion::operators::ml::POST_TRANSFORM;
use core::debug::PrintTrait;
struct TreeEnsembleClassifier<T> {
ensemble: TreeEnsemble<T>,
class_ids: Span<usize>,
class_nodeids: Span<usize>,
class_treeids: Span<usize>,
class_weights: Span<T>,
classlabels: Span<usize>,
base_values: Option<Span<T>>,
post_transform: POST_TRANSFORM,
}
trait TreeEnsembleClassifierTrait<T> { |
fn predict(
classifier: TreeEnsembleClassifier<T>, X: Tensor<T>
) -> (Span<usize>, MutMatrix::<T>);
}
impl TreeEnsembleClassifierImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>
> of TreeEnsembleClassifierTrait<T> {
fn predict(
classifier: TreeEnsembleClassifier<T>, X: Tensor<T>
) -> (Span<usize>, MutMatrix::<T>) {
let mut classifier = classifier;
let leaves_index = classifier.ensemble.leave_index_tree(X);
let n_classes = classifier.classlabels.len();
let mut res: MutMatrix<T> = MutMatrixImpl::new(*leaves_index.shape.at(0), n_classes);
if classifier.base_values.is_some() {
let mut base_values = classifier.base_values.unwrap();
let mut row: usize = 0;
loop {
if row == res.rows {
break;
}
let mut col: usize = 0;
loop {
if col == res.cols {
break;
}
let value = *base_values.pop_front().unwrap();
res.set(row, col, value);
col += 1
};
row += 1;
}
} else {
let mut row: usize = 0;
loop {
if row == res.rows {
break;
}
let mut col: usize = 0;
loop {
if col == res.cols {
break;
}
res.set(row, col, NumberTrait::zero());
col += 1
}; |
row += 1;
}
}
let mut class_index: Felt252Dict<Nullable<Span<usize>>> = Default::default();
let mut i: usize = 0;
loop {
if i == classifier.class_treeids.len() {
break;
}
let tid = *classifier.class_treeids[i];
let nid = *classifier.class_nodeids[i];
let mut key = PedersenHasherImpl::new();
let key: felt252 = key.hash(tid.into(), nid.into());
match match_nullable(class_index.get(key)) {
FromNullableResult::Null(()) => {
class_index.insert(key, NullableTrait::new(array![i].span()));
},
FromNullableResult::NotNull(val) => {
let mut new_val = val.unbox();
let new_val = new_val.concat(array![i].span());
class_index.insert(key, NullableTrait::new(new_val));
},
}
i += 1;
};
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
let mut indices = get_row(@leaves_index, i);
let mut t_index: Array<Span<core::integer::u32>> = ArrayTrait::new();
loop {
match indices.pop_front() {
Option::Some(index) => {
let mut key = PedersenHasherImpl::new();
let key: felt252 = key
.hash(
(*classifier.ensemble.atts.nodes_treeids[*index]).into(),
(*classifier.ensemble.atts.nodes_nodeids[*index]).into()
);
t_index.append(class_index.get(key).deref());
},
Option::None => { break; }
};
};
let mut t_index = t_index.span();
loop {
match t_index.pop_front() {
Option::Some(its |
) => {
let mut its = *its;
loop {
match its.pop_front() {
Option::Some(it) => {
match res.get(i, *classifier.class_ids[*it]) {
Option::Some(val) => {
res
.set(
i,
*classifier.class_ids[*it],
val + *classifier.class_weights[*it]
);
},
Option::None => {
res
.set(
i,
*classifier.class_ids[*it],
*classifier.class_weights[*it]
);
},
};
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
i += 1;
};
let mut binary = false;
let mut i: usize = 0;
let mut class_ids = classifier.class_ids;
let mut class_id: usize = 0;
match class_ids.pop_front() {
Option::Some(c_id) => { class_id = *c_id; },
Option::None => { class_id = 0; }
};
loop {
if i == classifier.class_ids.len() {
break;
}
match class_ids.pop_front() {
Option::Some(c_id |
) => {
if *c_id == class_id {
binary = true;
continue;
} else {
binary = false;
break;
}
},
Option::None => { break; }
};
};
if binary {
let mut new_res: MutMatrix<T> = MutMatrixImpl::new(res.rows, res.cols);
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match res.get(i, 0) {
Option::Some(res_0) => { new_res.set(i, 1, res_0); },
Option::None => { new_res.set(i, 1, NumberTrait::zero()); },
};
i += 1;
};
match classifier.post_transform {
POST_TRANSFORM::NONE => {
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match new_res.get(i, 1) {
Option::Some(res_1) => {
let value = NumberTrait::sub(NumberTrait::one(), res_1);
new_res.set(i, 0, value);
},
Option::None => { new_res.set(i, 0, NumberTrait::zero()); },
};
i += 1;
};
},
POST_TRANSFORM::SOFTMAX => {
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match new_res.get(i, 1) {
Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); },
Option::None => { new_res.set(i, 0 |
, NumberTrait::zero()); },
};
i += 1;
};
},
POST_TRANSFORM::LOGISTIC => {
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match new_res.get(i, 1) {
Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); },
Option::None => { new_res.set(i, 0, NumberTrait::zero()); },
};
i += 1;
};
},
POST_TRANSFORM::SOFTMAXZERO => {
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match new_res.get(i, 1) {
Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); },
Option::None => { new_res.set(i, 0, NumberTrait::zero()); },
};
i += 1;
};
},
POST_TRANSFORM::PROBIT => {
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
match new_res.get(i, 1) {
Option::Some(res_1) => {
let value = NumberTrait::sub(NumberTrait::one(), res_1);
new_res.set(i, 0, value);
},
Option::None => { new_res.set(i, 0, NumberTrait::zero()); },
};
i += 1;
};
},
};
res = new_res;
} |
let mut new_scores = match classifier.post_transform {
POST_TRANSFORM::NONE => res,
POST_TRANSFORM::SOFTMAX => res.softmax(1),
POST_TRANSFORM::LOGISTIC => res.sigmoid(),
POST_TRANSFORM::SOFTMAXZERO => res.softmax_zero(1),
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
let mut labels = new_scores.argmax(1);
let mut labels_list = ArrayTrait::new();
loop {
match labels.pop_front() {
Option::Some(i) => { labels_list.append(*classifier.classlabels[*i]); },
Option::None => { break; }
};
};
return (labels_list.span(), new_scores);
}
} |
use core::array::ArrayTrait;
use core::clone::Clone;
use core::box::BoxTrait;
use core::traits::Into;
use core::option::OptionTrait;
use orion::operators::matrix::MutMatrixTrait;
use core::array::SpanTrait;
use core::nullable::NullableTrait;
use core::dict::Felt252DictTrait;
use core::dict::Felt252DictEntryTrait;
use core::nullable::{match_nullable, FromNullableResult};
use orion::operators::tensor::{Tensor, TensorTrait};
use orion::operators::ml::tree_ensemble::core::{TreeEnsemble, TreeEnsembleImpl, TreeEnsembleTrait};
use orion::numbers::NumberTrait;
use orion::utils::get_row;
use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl};
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::operators::matrix::{MutMatrix, MutMatrixImpl};
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
use orion::operators::ml::POST_TRANSFORM;
use core::debug::PrintTrait;
struct TreeEnsembleRegressor<T> {
ensemble: TreeEnsemble<T>,
target_ids: Span<usize>,
target_nodeids: Span<usize>,
target_treeids: Span<usize>,
target_weights: Span<T>,
base_values: Option<Span<T>>,
n_targets: usize,
aggregate_function: AGGREGATE_FUNCTION,
post_transform: POST_TRANSFORM,
}
enum AGGREGATE_FUNCTION {
SUM,
AVERAGE,
MIN,
MAX,
}
trait TreeEnsembleRegressorTrait<T> { |
fn predict(regressor: TreeEnsembleRegressor<T>, X: Tensor<T>) -> MutMatrix::<T>;
}
impl TreeEnsembleRegressorImpl<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
> of TreeEnsembleRegressorTrait<T> {
fn predict(regressor: TreeEnsembleRegressor<T>, X: Tensor<T>) -> MutMatrix::<T> {
let mut regressor = regressor;
let leaves_index = regressor.ensemble.leave_index_tree(X);
let n_targets = regressor.n_targets;
let mut res: MutMatrix<T> = MutMatrixImpl::new(*leaves_index.shape.at(0), n_targets);
let n_trees = regressor.ensemble.tree_ids.len();
let mut target_index: Felt252Dict<Nullable<Span<usize>>> = Default::default();
let mut i: usize = 0;
loop {
if i == regressor.target_treeids.len() {
break;
}
let tid = *regressor.target_treeids[i];
let nid = *regressor.target_nodeids[i];
let mut key = PedersenHasherImpl::new();
let key: felt252 = key.hash(tid.into(), nid.into());
match match_nullable(target_index.get(key)) {
FromNullableResult::Null(()) => {
target_index.insert(key, NullableTrait::new(array![i].span()));
},
FromNullableResult::NotNull(val) => {
let mut new_val = val.unbox();
let new_val = new_val.concat(array![i].span());
target_index.insert(key, NullableTrait::new(new_val));
},
}
i += 1;
};
let mut i: usize = 0;
loop {
if i == res.rows {
break;
}
let mu |
t indices = get_row(@leaves_index, i);
let mut t_index: Array<Span<core::integer::u32>> = ArrayTrait::new();
loop {
match indices.pop_front() {
Option::Some(index) => {
let mut key = PedersenHasherImpl::new();
let key: felt252 = key
.hash(
(*regressor.ensemble.atts.nodes_treeids[*index]).into(),
(*regressor.ensemble.atts.nodes_nodeids[*index]).into()
);
t_index.append(target_index.get(key).deref());
},
Option::None => { break; }
};
};
let mut t_index = t_index.span();
match regressor.aggregate_function {
AGGREGATE_FUNCTION::SUM => {
compute_res_SUM(ref regressor, ref res, ref t_index, i);
},
AGGREGATE_FUNCTION::AVERAGE => {
compute_res_AVERAGE(ref regressor, ref res, ref t_index, n_trees, i);
},
AGGREGATE_FUNCTION::MIN => {
compute_res_MIN(ref regressor, ref res, ref t_index, i);
},
AGGREGATE_FUNCTION::MAX => {
compute_res_MAX(ref regressor, ref res, ref t_index, i);
},
};
i += 1;
};
if regressor.base_values.is_some() {
let mut base_values = regressor.base_values.unwrap();
let mut row: usize = 0;
loop {
if row == res.rows {
break;
}
let mut col: usize = 0;
loop {
if col == res.cols {
break;
}
let value = *base_values.pop_front().unwrap();
match res.get(row, col) {
Option::Some |
(val) => { res.set(row, col, val + value); },
Option::None => { res.set(row, col, value); },
};
col += 1
};
row += 1;
}
}
let mut new_scores = match regressor.post_transform {
POST_TRANSFORM::NONE => res,
POST_TRANSFORM::SOFTMAX => res.softmax(1),
POST_TRANSFORM::LOGISTIC => res.sigmoid(),
POST_TRANSFORM::SOFTMAXZERO => res.softmax_zero(1),
POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'),
};
return new_scores;
}
}
fn compute_res_SUM<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
>(
ref self: TreeEnsembleRegressor<T>,
ref res: MutMatrix<T>,
ref t_index: Span<Span<core::integer::u32>>,
i: usize
) {
loop {
match t_index.pop_front() {
Option::Some(its) => {
let mut its = *its;
loop {
match its.pop_front() {
Option::Some(it) => {
match res.get(i, *self.target_ids[*it]) {
Option::Some(val) => {
res
.set(
i,
*self.target_ids[*it],
val + *self.target_weights[*it]
);
},
Option::None => {
res.set(i, *self.target_ids[*it], *self.target_weights[*it]);
},
};
},
Option::None => { b |
reak; }
};
};
},
Option::None => { break; }
};
};
}
fn compute_res_AVERAGE<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>
>(
ref self: TreeEnsembleRegressor<T>,
ref res: MutMatrix<T>,
ref t_index: Span<Span<core::integer::u32>>,
n_trees: usize,
i: usize
) {
let n_trees_felt: felt252 = (n_trees * 65536).into();
let n_trees: T = NumberTrait::from_felt(n_trees_felt);
loop {
match t_index.pop_front() {
Option::Some(its) => {
let mut its = *its;
loop {
match its.pop_front() {
Option::Some(it) => {
match res.get(i, *self.target_ids[*it]) {
Option::Some(val) => {
res
.set(
i,
*self.target_ids[*it],
val + (*self.target_weights[*it]) / n_trees
);
},
Option::None => {
res
.set(
i,
*self.target_ids[*it],
*self.target_weights[*it] / n_trees
);
},
};
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
}
fn compute_res_MIN<
T,
MAG,
+D |
rop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
>(
ref self: TreeEnsembleRegressor<T>,
ref res: MutMatrix<T>,
ref t_index: Span<Span<core::integer::u32>>,
i: usize
) {
let mut j = 0;
loop {
if j == res.cols {
break;
}
res.set(i, j, NumberTrait::max_value());
j += 1;
};
loop {
match t_index.pop_front() {
Option::Some(its) => {
let mut its = *its;
loop {
match its.pop_front() {
Option::Some(it) => {
match res.get(i, *self.target_ids[*it]) {
Option::Some(val) => {
res
.set(
i,
*self.target_ids[*it],
NumberTrait::min(val, *self.target_weights[*it])
);
},
Option::None => {
res.set(i, *self.target_ids[*it], *self.target_weights[*it]);
},
};
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
}
fn compute_res_MAX<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+TensorTrait<usize>,
+TensorTrait<T>,
+PrintTrait<T>,
+AddEq<T>,
+Div<T>,
+Mul<T>,
>(
ref self: TreeEnsembleRegressor<T>,
ref res: MutMatrix<T>,
ref t_index: Span<Span<core::integer::u32>>,
i: usize
) {
let mut j = 0;
lo |
op {
if j == res.cols {
break;
}
res.set(i, j, NumberTrait::min_value());
j += 1;
};
loop {
match t_index.pop_front() {
Option::Some(its) => {
let mut its = *its;
loop {
match its.pop_front() {
Option::Some(it) => {
match res.get(i, *self.target_ids[*it]) {
Option::Some(val) => {
res
.set(
i,
*self.target_ids[*it],
NumberTrait::max(val, *self.target_weights[*it])
);
},
Option::None => {
res.set(i, *self.target_ids[*it], *self.target_weights[*it]);
},
};
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
} |
mod core;
mod implementations;
mod functional;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::implementations::nn_fp8x23::FP8x23NN;
use orion::operators::nn::implementations::nn_fp16x16::FP16x16NN;
use orion::operators::nn::implementations::nn_fp32x32::FP32x32NN;
use orion::operators::nn::implementations::nn_fp64x64::FP64x64NN;
use orion::operators::nn::implementations::nn_i8::I8NN;
use orion::operators::nn::implementations::nn_i32::I32NN;
use orion::operators::nn::implementations::nn_u32::U32NN;
|
use orion::operators::tensor::core::Tensor;
trait NNTrait<T> {
fn relu(tensor: @Tensor<T>) -> Tensor<T>;
fn softmax(tensor: @Tensor<T>, axis: Option<i32>) -> Tensor<T>;
fn softmax_zero(tensor: @Tensor<T>, axis: usize) -> Tensor<T>;
fn logsoftmax(tensor: @Tensor<T>, axis: usize) -> Tensor<T>;
fn sigmoid(tensor: @Tensor<T>) -> Tensor<T>;
fn softsign(tensor: @Tensor<T>) -> Tensor<T>; |
fn softplus(tensor: @Tensor<T>) -> Tensor<T>;
fn linear(inputs: Tensor<T>, weights: Tensor<T>, bias: Tensor<T>) -> Tensor<T>;
fn leaky_relu(inputs: @Tensor<T>, alpha: @T) -> Tensor<T>;
fn hard_sigmoid(tensor: @Tensor<T>, alpha: @T, beta: @T) -> Tensor<T>;
fn thresholded_relu(tensor: @Tensor<T>, alpha: @T) -> Tensor<T>;
fn space_to_depth(tensor: @Tensor<T>, blocksize: usize) -> Tensor<T>; |
fn depth_to_space(tensor: @Tensor<T>, blocksize: usize, mode: felt252) -> Tensor<T>;
fn gemm(
A: Tensor<T>,
B: Tensor<T>,
C: Option<Tensor<T>>,
alpha: Option<T>,
beta: Option<T>,
transA: bool,
transB: bool
) -> Tensor<T>;
fn conv(
X: @Tensor<T>,
W: @Tensor<T>,
B: Option<Span<T>>,
auto_pad: Option<orion::operators::nn::functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T>; |
fn conv_transpose(
X: @Tensor<T>,
W: @Tensor<T>,
B: Option<@Tensor<T>>,
auto_pad: Option<orion::operators::nn::functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T>;
fn col2im(
data: @Tensor<T>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T>; |
fn grid_sample(
X: @Tensor<T>,
grid: @Tensor<T>,
align_corner: Option<usize>,
mode: Option<orion::operators::nn::functional::grid_sample::MODE>,
padding_mode: Option<orion::operators::nn::functional::grid_sample::PADDING_MODE>,
) -> Tensor<T>;
} |
mod relu;
mod leaky_relu;
mod sigmoid;
mod softmax;
mod softmax_zero;
mod softsign;
mod softplus;
mod linear;
mod logsoftmax;
mod thresholded_relu;
mod hard_sigmoid;
mod gemm;
mod grid_sample;
mod col2im;
mod conv_transpose;
mod depth_to_space;
mod space_to_depth;
mod conv;
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{stride};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,};
use orion::operators::vec::{NullableVec, NullableVecImpl};
fn col2im<T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>,>(
data: @Tensor<T>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T> {
let dilations = match dilations {
Option::Some(dilations) => dilations,
Option::None => {
let mut dilations: Array<usize> = array![];
let mut i = 0;
while i != image_shape.len() {
dilations.append(1);
i += 1;
};
dilations.span()
},
};
let pads = match pads {
Option::Some(pads) => pads,
Option::None => {
let mut pads: Array<usize> = array![];
let mut i = 0;
while i != image_shape.len() {
pads.append(0);
pads.append(0);
i += 1;
};
pads.span()
},
};
let strides = match strides {
Option::Some(strides) => strides,
Option::None => {
let mut strides: Array<usize> = array![];
let mut i = 0;
while i != image_shape.len() {
strides.append(1);
i += 1;
};
strides.span()
},
};
let bl = prod(block_shape, 0);
let C = *(*data).shape.at(1) / bl;
let mut new_shape: Array<i32> = array![
(*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap()
];
let mut i = 2;
while i != (*data)
.shape
.len() {
new_shape.append((*(*data).shape.at(i)).try_into().unwrap());
i += 1;
};
let data = data.reshape(new_shape.span(), false);
let mut res: Array< |
T> = array![];
let data_stride = stride(data.shape);
let mut n = 0;
while n != *data
.shape
.at(0) {
let mut c = 0;
while c != *data
.shape
.at(1) {
let data_n_c = TensorTrait::new(
SpanTrait::slice(data.shape, 2, data.shape.len() - 2),
SpanTrait::slice(
data.data,
n * *data_stride.at(0) + c * *data_stride.at(1),
*data_stride.at(1)
)
);
let mut out = col2im_naive_implementation(
@data_n_c, image_shape, block_shape, dilations, pads, strides
);
let mut i = 0;
while i != out.len() {
res.append(out.at(i));
i += 1;
};
c += 1;
};
n += 1;
};
let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)];
let mut i = 0;
while i != image_shape.len() {
new_shape.append(*image_shape.at(i));
i += 1;
};
TensorTrait::new(new_shape.span(), res.span())
}
fn get_image<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> {
assert((*self).shape.len() == 2, 'Expected a 2D tensor');
let row_length = *self.shape[1];
let start = row * row_length;
(*self).data.slice(start, row_length)
}
fn col2im_naive_implementation<
T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>,
>(
data: @Tensor<T>,
image_shape: Span<usize>,
kernel_shape: Span<usize>,
dilations: Span<usize>,
pads: Span<usize>,
strides: Span<usize>,
) -> NullableVec<T> {
let n_dims = pads.len() / 2;
col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides);
let mut dim_col: Array<usize> = array![];
let mut i = 0;
whil |
e i != n_dims {
dim_col
.append(
(*image_shape.at(i)
+ (*pads.at(i) + *pads.at(i + n_dims))
- (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1))
/ *strides.at(i)
+ 1
);
i += 1;
};
let dim_col = dim_col.span();
let stride_img = stride(image_shape);
let mut data_im = NullableVecImpl::new();
data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero());
let kernel_size = prod(kernel_shape, 0);
let col_size = prod(dim_col, 0);
let mut c_col = 0;
while c_col != kernel_size {
let offset = get_indices(c_col, kernel_shape).span();
let mut col = 0;
while col != col_size {
let ind_col = get_indices(col, dim_col).span();
let mut ind_im: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads
.at(i) {
let neg_index = *pads.at(i)
- (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i));
ind_im.append(*image_shape.at(i) + neg_index);
} else {
ind_im
.append(
*ind_col.at(i) * *strides.at(i)
+ *offset.at(i) * *dilations.at(i)
- *pads.at(i)
);
}
i += 1;
};
let ind_im = ind_im.span();
if !is_out(ind_im, image_shape) {
let mut index = 0;
let mut i = 0;
while i != image_shape.len() {
index += *stride_img.at(i) * *ind_im.at(i);
i += 1;
};
data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); |
}
col += 1;
};
c_col += 1;
};
data_im
}
fn col2im_shape_check<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>(
X: @Tensor<T>,
output_shape: Span<usize>,
kernel_shape: Span<usize>,
dilations: Span<usize>,
pads: Span<usize>,
strides: Span<usize>,
) {
let n_input_plane = *(*X).shape.at(0);
let kernel_size = prod(kernel_shape, 0);
assert(n_input_plane % kernel_size == 0, 'wrong input dimension');
let input_length = *(*X).shape.at(1);
let n_dims = output_shape.len();
let mut n_blocks: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
n_blocks
.append(
(*output_shape.at(i)
+ (*pads.at(i) + *pads.at(i + n_dims))
- *dilations.at(i) * (*kernel_shape.at(i) - 1)
- 1)
/ *strides.at(i)
+ 1
);
i += 1;
};
let block_size = prod(n_blocks.span(), 0);
assert(input_length == block_size, 'input_length != block_size');
}
fn get_indices(index: usize, shape: Span<usize>,) -> Array<usize> {
let mut i = index;
let mut res: Array<usize> = array![];
let mut k = shape.len() - 1;
while k != 0 {
let m = i % *shape.at(k);
res.append(m);
i -= m;
i /= *shape.at(k);
k -= 1;
};
let mut new_res: Array<usize> = array![];
new_res.append(i);
let mut i = shape.len() - 1;
while i != 0 {
new_res.append(*res.at(i - 1));
i -= 1;
};
new_res
}
fn is_out(ind: Span<usize>, shape: Span<usize>,) -> bool {
let mut n = 0;
let is_out = loop {
if n == ind.len() {
break false;
}
let s = *shape.at(n);
let i = *ind.at(n);
if i < 0 {
break true;
}
if i >= s {
break true;
}
n += 1;
};
is_out
}
fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T> |
, +Mul<T>,>(
pA: Span<T>, start: usize
) -> T {
let mut i = start;
let mut prod = NumberTrait::one();
while i != pA.len() {
prod = prod * (*pA.at(i));
i += 1;
};
prod
} |
use core::debug::PrintTrait;
use orion::numbers::NumberTrait;
use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,};
use orion::operators::vec::{NullableVec, NullableVecImpl};
use orion::operators::tensor::core::{stride};
enum AUTO_PAD {
NOTSET,
SAME_UPPER,
SAME_LOWER,
VALID
}
fn conv<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+Copy<T>,
+Drop<T>,
+Add<T>,
+Mul<T>,
+AddEq<T>,
+PrintTrait<T>,
>(
X: @Tensor<T>,
W: @Tensor<T>,
B: Option<Span<T>>,
auto_pad: Option<AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T> {
let nd = (*X).shape.len() - 2;
assert((*X).shape.len() >= 3, 'X must have at least 3 dim');
let dilations = match dilations {
Option::Some(dilations) => dilations,
Option::None => {
let mut dilations: Array<usize> = array![];
let mut i = 2;
while i != (*X).shape.len() {
dilations.append(1);
i += 1;
};
dilations.span()
},
};
let kernel_shape = match kernel_shape {
Option::Some(kernel_shape) => kernel_shape,
Option::None => {
let mut kernel_shape: Array<usize> = array![];
let mut i = 2;
while i != (*W).shape.len() {
kernel_shape.append(*(*W).shape.at(i));
i += 1;
};
kernel_shape.span()
},
};
let pads = match pads {
Option::Some(pads) => pads,
Option::None => {
let mut pads: Array<usize> = array![];
let mut i = 2;
while i != (*X).shape.len() {
pads.append(0);
pads.append(0);
i += 1;
};
pads.span()
},
};
let strides = mat |
ch strides {
Option::Some(strides) => strides,
Option::None => {
let mut strides: Array<usize> = array![];
let mut i = 2;
while i != (*X).shape.len() {
strides.append(1);
i += 1;
};
strides.span()
},
};
let group = match group {
Option::Some(group) => group,
Option::None => { 1 },
};
let auto_pad = match auto_pad {
Option::Some(auto_pad) => auto_pad,
Option::None => { AUTO_PAD::NOTSET },
};
if group > 1 {
let sN = *(*X).shape.at(0);
let mut res_b: Array<usize> = array![];
let mut res_cv = array![];
let mut td = 0;
let mg = *(*W).shape.at(0) / group;
let dw = *(*W).shape.at(1);
let X_stride = stride((*X).shape);
let mut gx_shape = array![1, dw];
let mut i = 2;
while i != (*X).shape.len() {
gx_shape.append(*(*X).shape.at(i));
i += 1;
};
let gx_shape = gx_shape.span();
let W_stride = stride((*W).shape);
let mut gw_shape = array![mg];
let mut i = 1;
while i != (*W).shape.len() {
gw_shape.append(*(*W).shape.at(i));
i += 1;
};
let gw_shape = gw_shape.span();
let mut b = 0;
while b != sN {
let mut g = 0;
while g != group {
let gx = TensorTrait::new(
gx_shape,
SpanTrait::slice(
(*X).data,
b * *X_stride.at(0) + (g * dw) * *X_stride.at(1),
*X_stride.at(1) * dw
)
);
let gw = TensorTrait::new(
gw_shape,
SpanTrait::slice((*W).data, (g * mg) * *W_stride.at(0), *W_stride.at(0) * mg)
);
let cv = conv(
@gx,
@gw,
Optio |
n::None,
Option::Some(auto_pad),
Option::Some(dilations),
Option::Some(1),
Option::Some(kernel_shape),
Option::Some(pads),
Option::Some(strides)
);
if b == 0 {
td += *cv.shape.at(1);
}
res_b.append(b);
res_cv.append(cv);
g += 1;
};
b += 1;
};
let res_b = res_b.span();
let res_cv = res_cv.span();
let mut final_shape = array![sN, td];
let mut cv = *res_cv.at(0);
let mut i = 2;
while i != cv.shape.len() {
final_shape.append(*cv.shape.at(i));
i += 1;
};
let final_shape = final_shape.span();
let mut final: Array<T> = array![];
let mut p = 0;
let mut i = 0;
while i != res_b.len() {
let cv = *res_cv.at(i);
let mut n = 0;
while n != cv.data.len() {
final.append(*cv.data.at(n));
n += 1;
};
p += *cv.shape.at(1);
if p >= td {
p = 0;
}
i += 1;
};
let final = final.span();
let final = match B {
Option::Some(B) => {
let mut final_b: Array<T> = array![];
let final_stride = stride(final_shape);
let mut i = 0;
while i != *final_shape.at(0) {
let mut j = 0;
while j != B.len() {
let mut k = 0;
while k != *final_stride.at(1) {
final_b
.append(
*final.at(i * *final_stride.at(0) + j * *final_stride.at(1) + k)
+ *B.at(j)
); |
k += 1;
};
j += 1;
};
i += 1;
};
final_b.span()
},
Option::None => { final },
};
return TensorTrait::new(final_shape, final);
}
if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) {
let nd = dilations.len();
let mut new_kernel_shape: Array<usize> = array![];
let mut new_shape: Array<usize> = array![];
new_shape.append_span(SpanTrait::slice((*W).shape, 0, (*W).shape.len() - nd));
let mut i = 0;
while i != dilations.len() {
let d = *dilations.at(i);
let di = (*W).shape.len() - nd + i;
new_shape.append(*(*W).shape.at(di) + (*(*W).shape.at(di) - 1) * (d - 1));
new_kernel_shape.append(*kernel_shape.at(i) + (*kernel_shape.at(i) - 1) * (d - 1));
i += 1;
};
let new_shape = new_shape.span();
let new_w_strides = stride(new_shape);
let mut new_w = NullableVecImpl::new();
new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero());
let mut indices = array![];
indices.append(arange(0, *new_shape.at(0), 1));
indices.append(arange(0, *new_shape.at(1), 1));
let mut i = 0;
while i != dilations.len() {
let d = *dilations.at(i);
let di = (*W).shape.len() - nd + i;
indices.append(arange(0, *new_shape.at(di), d));
i += 1;
};
let set_of_all_indices = cartesian(indices.span());
let mut new_w_arr: Array<T> = array![];
let mut i = 0;
let mut prev = 0;
while i != (*W).data.len() {
let nd_index = *set_of_all_indices.at(i);
let mut flatten_index = 0;
let mut j = 0;
while j != nd_index.len() {
flatten_index += *nd_index.at(j) * *new_w_strides.at(j); |
j += 1;
};
if flatten_index > prev + 1 {
let mut j = prev + 1;
while j != flatten_index {
new_w_arr.append(NumberTrait::zero());
};
j += 1;
}
new_w_arr.append(*(*W).data.at(i));
new_w.set(flatten_index, *(*W).data.at(i));
prev = flatten_index;
i += 1;
};
}
let pads = match auto_pad {
AUTO_PAD::NOTSET => { pads },
AUTO_PAD::SAME_UPPER => {
let mut head: Array<usize> = array![];
let mut tail: Array<usize> = array![];
let mut i = 0;
while i != nd {
let d = *(*X).shape.at(i);
let target_size = (d + *strides.at(i) - 1) / *strides.at(i);
let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d;
let pad_head = pad_needed / 2;
let pad_tail = pad_needed - pad_head;
head.append(pad_head);
tail.append(pad_tail);
i += 1;
};
head.append_span(tail.span());
let pads = head.span();
pads
},
AUTO_PAD::SAME_LOWER => {
let mut head: Array<usize> = array![];
let mut tail: Array<usize> = array![];
let mut i = 0;
while i != nd {
let d = *(*X).shape.at(i);
let target_size = (d + *strides.at(i) - 1) / *strides.at(i);
let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d;
let pad_head = (pad_needed + 1) / 2;
let pad_tail = pad_needed - pad_head;
head.append(pad_head);
tail.append(pad_tail);
i += 1;
};
head.append_span(tail.span());
let pads = head.span();
pads
},
AUTO_PAD::VALID => {
let mut head: Array<usize |
> = array![];
let mut tail: Array<usize> = array![];
let mut i = 0;
while i != nd {
let d = *(*X).shape.at(i);
let target_size = (d + *strides.at(i) - 1) / *strides.at(i);
let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d;
let pad_head = pad_needed / 2;
let pad_tail = pad_needed - pad_head;
head.append(pad_head);
tail.append(pad_tail);
i += 1;
};
head.append_span(tail.span());
let pads = head.span();
pads
},
};
if (*X).shape.len() == 3 {
let sN = *(*X).shape.at(0);
let sC = *(*X).shape.at(1);
let sH = *(*X).shape.at(2);
let sM = *(*W).shape.at(0);
let kh = *kernel_shape.at(0);
let sth = *strides.at(0);
let h_out = ((sH - kh + *pads.at(0) + *pads.at(1)) / sth) + 1;
let h0 = *pads.at(0);
let oh: i32 = -1 * (kh % 2).into();
let bh: i32 = -h0.into();
let eh = h_out * sth;
let mut res = NullableVecImpl::new();
let res_shape = array![sN, sM, h_out].span();
let res_strides = stride(res_shape);
res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero());
match B {
Option::Some(B) => {
let mut i = 0;
while i != sN {
let mut j = 0;
while j != sM {
let b_j = *B.at(j);
let mut k = 0;
while k != h_out {
res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j);
k += 1;
};
j += 1;
};
i += 1;
};
},
Option::None => {},
}
let mut n = 0;
while n != sN {
let mut nw = 0; |
while nw != sM {
let mut c = 0;
while c != sC {
let w = SpanTrait::slice((*W).data, nw * sC * kh + c * kh, kh);
let mut io = bh;
while io < eh.into() {
let hr = (io - bh) / sth.into();
if hr < h_out.into() {
let i = io + (kh % 2).into();
let ih1 = I32Number::max(0, i + oh).into();
let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into();
let img = SpanTrait::slice((*X).data, n * sN + c * sC + ih1, ih2 - ih1);
let s = if w.len() != img.len() {
let jh1 = I32Number::max(0, -i - oh).into();
let jh2 = I32Number::min(sH.into() - (i + oh), kh.into()).into();
let w_ = SpanTrait::slice(w, jh1, jh2 - jh1);
assert(w_.len() == img.len(), 'unexpected w and img len');
dot(img, w_)
} else {
dot(img, w)
};
let hr = if hr < 0 {
*res_strides.at(1) - hr.into()
} else {
hr.into()
};
res
.set(
n * *res_strides.at(0) + nw * *res_strides.at(1) + hr,
res.at(n * *res_strides.at(0) + nw * *res_strides.at(1) + hr)
+ s
);
}
io += sth.into();
};
c += 1;
};
nw += 1;
};
n += 1;
};
let mut res_d |
ata: Array<T> = array![];
let mut i = 0;
while i != res.len() {
res_data.append(res.at(i));
i += 1;
};
return TensorTrait::new(res_shape, res_data.span());
}
if (*X).shape.len() == 4 {
let sN = *(*X).shape.at(0);
let sC = *(*X).shape.at(1);
let sH = *(*X).shape.at(2);
let sW = *(*X).shape.at(3);
let sM = *(*W).shape.at(0);
let kh = *kernel_shape.at(0);
let kw = *kernel_shape.at(1);
let sth = *strides.at(0);
let stw = *strides.at(1);
let h_out = ((sH - kh + *pads.at(0) + *pads.at(2)) / sth) + 1;
let w_out = ((sW - kw + *pads.at(1) + *pads.at(3)) / stw) + 1;
let h0 = *pads.at(0);
let w0 = *pads.at(1);
let oh: i32 = -1 * (kh % 2).into();
let ow: i32 = -1 * (kw % 2).into();
let bh: i32 = -h0.into();
let bw: i32 = -w0.into();
let eh = h_out * sth;
let ew = w_out * stw;
let mut res = NullableVecImpl::new();
let res_shape = array![sN, sM, h_out, w_out].span();
let res_strides = stride(res_shape);
res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero());
match B {
Option::Some(B) => {
let mut i = 0;
while i != sN {
let mut j = 0;
while j != sM {
let b_j = *B.at(j);
let mut k = 0;
while k != h_out {
let mut l = 0;
while l != w_out {
res
.set(
i * *res_strides.at(0)
+ j * *res_strides.at(1)
+ k * *res_strides.at(2)
+ l,
b_j
); |
l += 1;
};
k += 1;
};
j += 1;
};
i += 1;
};
},
Option::None => {},
}
let mut n = 0;
while n != sN {
let mut nw = 0;
while nw != sM {
let mut c = 0;
while c != sC {
let w = SpanTrait::slice(
(*W).data, nw * (sC * kh * kw) + c * (kh * kw), kh * kw
);
let mut io = bh;
while io < eh.into() {
let hr = (io - bh) / sth.into();
if hr < h_out.into() {
let i = io + (kh % 2).into();
let ih1 = I32Number::max(0, i + oh).into();
let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into();
let mut jo = bw;
while jo < ew.into() {
let wr = (jo - bw) / stw.into();
if wr < w_out.into() {
let j = jo + (kw % 2).into();
let iw1 = I32Number::max(0, j + ow).into();
let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into();
let mut img: Array<T> = array![];
let mut ihi = ih1;
while ihi != ih2 {
img
.append_span(
SpanTrait::slice(
(*X).data,
n * (sC * sH * sW)
+ c * (sH * sW) |
+ ihi * sW
+ iw1,
iw2 - iw1
)
);
ihi += 1;
};
let img = img.span();
let s = if w.len() != img.len() {
let jh1 = I32Number::max(0, -i - oh).into();
let jh2 = I32Number::min(sH.into() - (i + oh), kh.into())
.into();
let jw1 = I32Number::max(0, -j - ow).into();
let jw2 = I32Number::min(sW.into() - (j + ow), kw.into())
.into();
let mut w_: Array<T> = array![];
let mut jhj = jh1;
while jhj != jh2 {
w_
.append_span(
SpanTrait::slice(w, jhj * kw + jw1, jw2 - jw1)
);
jhj += 1;
};
let w_ = w_.span();
assert(w_.len() == img.len(), 'unexpected w and img len');
dot(img, w_)
} else {
dot(img, w)
};
let hr = if hr < 0 {
h_out - hr.into()
} else { |
hr.into()
};
let wr = if wr < 0 {
w_out - wr.into()
} else {
wr.into()
};
res
.set(
n * *res_strides.at(0)
+ nw * *res_strides.at(1)
+ hr * *res_strides.at(2)
+ wr,
res
.at(
n * *res_strides.at(0)
+ nw * *res_strides.at(1)
+ hr * *res_strides.at(2)
+ wr
)
+ s
);
}
jo += stw.into();
};
}
io += sth.into();
};
c += 1;
};
nw += 1;
};
n += 1;
};
let mut res_data: Array<T> = array![];
let mut i = 0;
while i != res.len() {
res_data.append(res.at(i));
i += 1;
};
return TensorTrait::new(res_shape, res_data.span());
}
if (*X).shape.len() == 5 {
let sN = *(*X).shape.at(0);
let sC = *(*X).shape.at(1);
let sH = *(*X).shape.at(2);
let sW = *(*X).shape.at(3);
let sZ = *(*X).shape.at(4);
let sM = *(*W).shape.at(0);
let kh = *ker |
nel_shape.at(0);
let kw = *kernel_shape.at(1);
let kz = *kernel_shape.at(2);
let sth = *strides.at(0);
let stw = *strides.at(1);
let stz = *strides.at(2);
let h_out = ((sH - kh + *pads.at(0) + *pads.at(3)) / sth) + 1;
let w_out = ((sW - kw + *pads.at(1) + *pads.at(4)) / stw) + 1;
let z_out = ((sZ - kz + *pads.at(2) + *pads.at(5)) / stz) + 1;
let h0 = *pads.at(0);
let w0 = *pads.at(1);
let z0 = *pads.at(2);
let oh: i32 = -1 * (kh % 2).into();
let ow: i32 = -1 * (kw % 2).into();
let oz: i32 = -1 * (kz % 2).into();
let bh: i32 = -h0.into();
let bw: i32 = -w0.into();
let bz: i32 = -z0.into();
let eh = h_out * sth;
let ew = w_out * stw;
let ez = z_out * stz;
let mut res = NullableVecImpl::new();
let res_shape = array![sN, sM, h_out, w_out, z_out].span();
let res_strides = stride(res_shape);
res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero());
match B {
Option::Some(B) => {
let mut i = 0;
while i != sN {
let mut j = 0;
while j != sM {
let b_j = *B.at(j);
let mut k = 0;
while k != h_out {
let mut l = 0;
while l != w_out {
let mut m = 0;
while m != z_out {
res
.set(
i * *res_strides.at(0)
+ j * *res_strides.at(1)
+ k * *res_strides.at(2)
+ l * *res_strides.at(3)
+ m,
b_j |
);
m += 1;
};
l += 1;
};
k += 1;
};
j += 1;
};
i += 1;
};
},
Option::None => {},
}
let mut n = 0;
while n != sN {
let mut nw = 0;
while nw != sM {
let mut c = 0;
while c != sC {
let w = SpanTrait::slice(
(*W).data, nw * (sC * kh * kw * kz) + c * (kh * kw * kz), kh * kw * kz
);
let mut io = bh;
while io < eh.into() {
let hr = (io - bh) / sth.into();
if hr < h_out.into() {
let i = io + (kh % 2).into();
let ih1 = I32Number::max(0, i + oh).into();
let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into();
let mut jo = bw;
while jo < ew.into() {
let wr = (jo - bw) / stw.into();
if wr < w_out.into() {
let j = jo + (kw % 2).into();
let iw1 = I32Number::max(0, j + ow).into();
let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into();
let mut zo = bz;
while zo < ez.into() {
let zr = (zo - bz) / stz.into();
if zr < z_out.into() {
let z = zo + (kz % 2).into();
let iz1 = I32Number::max(0, z + oz).into(); |
let iz2 = I32Number::min(z + oz + kz.into(), sW.into())
.into();
let mut img: Array<T> = array![];
let mut ihi = ih1;
while ihi != ih2 {
let mut iwi = iw1;
while iwi != iw2 {
img
.append_span(
SpanTrait::slice(
(*X).data,
n * (sC * sH * sW * sZ)
+ c * (sH * sW * sZ)
+ ihi * (sW * sZ)
+ iwi * sZ
+ iz1,
iz2 - iz1
)
);
iwi += 1;
};
ihi += 1;
};
let img = img.span();
let s = if w.len() != img.len() {
let jh1 = I32Number::max(0, -i - oh).into();
let jh2 = I32Number::min(
sH.into() - (i + oh), kh.into()
) |
.into();
let jw1 = I32Number::max(0, -j - ow).into();
let jw2 = I32Number::min(
sW.into() - (j + ow), kw.into()
)
.into();
let jz1 = I32Number::max(0, -z - oz).into();
let jz2 = I32Number::min(
sZ.into() - (z + oz), kz.into()
)
.into();
let mut w_: Array<T> = array![];
let mut jhj = jh1;
while jhj != jh2 {
let mut jwj = jw1;
while jwj != jw2 {
w_
.append_span(
SpanTrait::slice(
w,
jhj * kw * kz + jwj * kz + jz1,
jz2 - jz1
)
);
jwj += 1;
};
jhj += 1;
};
let w_ = w_.span(); |
assert(
w_.len() == img.len(),
'unexpected w and img len'
);
dot(img, w_)
} else {
dot(img, w)
};
let hr = if hr < 0 {
h_out - hr.into()
} else {
hr.into()
};
let wr = if wr < 0 {
w_out - wr.into()
} else {
wr.into()
};
let zr = if zr < 0 {
z_out - zr.into()
} else {
zr.into()
};
res
.set(
n * *res_strides.at(0)
+ nw * *res_strides.at(1)
+ hr * *res_strides.at(2)
+ wr * *res_strides.at(3)
+ zr,
res
.at(
n * *res_strides.at(0) |
+ nw * *res_strides.at(1)
+ hr * *res_strides.at(2)
+ wr * *res_strides.at(3)
+ zr
)
+ s
);
}
zo += stz.into();
};
}
jo += stw.into();
};
}
io += sth.into();
};
c += 1;
};
nw += 1;
};
n += 1;
};
let mut res_data: Array<T> = array![];
let mut i = 0;
while i != res.len() {
res_data.append(res.at(i));
i += 1;
};
return TensorTrait::new(res_shape, res_data.span());
}
let sN = *(*X).shape.at(0);
let sC = *(*X).shape.at(1);
let sM = *(*W).shape.at(0);
let w_stride = stride((*W).shape);
let x_stride = stride((*X).shape);
let mut shape_out: Array<usize> = array![];
let mut o_index: Array<i32> = array![];
let mut b_index: Array<i32> = array![];
let mut e_index: Array<usize> = array![];
let mut range_len: Array<usize> = array![];
let mut i = 0;
while i != nd {
shape_out
.append(
((*(*X).shape.at(2 + i) - *kernel_shape.at(i) + *pads.at(i) + *pads.at(i + nd))
/ *strides.at(i))
+ 1
);
let k = *kernel_shape.at(i);
o_index.append(-1 * (k % 2).into());
b_index.append(-(*pads.at(i)).into());
e_index.append(*shape_out.at(i) * *strides.at(i));
range_len.app |
end((((*e_index.at(i)).into() - *b_index.at(i)).into()) / *strides.at(i));
i += 1;
};
let o_index = o_index.span();
let b_index = b_index.span();
let shape_out = shape_out.span();
let range_len = range_len.span();
let range_stride = stride(range_len);
let mut res_shape = array![sN, sM];
res_shape.append_span(shape_out);
let res_shape = res_shape.span();
let res_strides = stride(res_shape);
let mut res = NullableVecImpl::new();
res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero());
match B {
Option::Some(B) => {
let mut i = 0;
while i != sN {
let mut j = 0;
while j != sM {
let b_j = *B.at(j);
let mut k = 0;
while k != *res_strides.at(1) {
res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j);
k += 1;
};
j += 1;
};
i += 1;
};
},
Option::None => {},
}
let mut n = 0;
while n != sN {
let mut nw = 0;
while nw != sM {
let mut c = 0;
while c != sC {
let w = SpanTrait::slice(
(*W).data, nw * *w_stride.at(0) + c * *w_stride.at(1), *w_stride.at(1)
);
let mut i = 0;
while i != *range_len.at(0) * *range_stride.at(0) {
let mut io_index: Array<i32> = array![];
let mut r_index: Array<i32> = array![];
let mut flatten_index = i;
let mut nx = 0;
while nx != nd {
let (n_index, rem) = DivRem::div_rem(
flatten_index, (*range_stride.at(nx)).try_into().unwrap()
);
flatten_index = rem;
io_index
.append(n |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.