author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
49,706 | 13.08.2020 15:33:48 | -7,200 | 3e9752c5d0049589e5b7c872bb9d4876b018f96a | [MINOR] Fix startLocalFedWorker | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -1307,10 +1307,9 @@ public abstract class AutomatedTestBase {\ntry {\nt = new Thread(() -> {\ntry {\n- DMLScript.main(finalArguments);\n+ main(finalArguments);\n}\ncatch(IOException e) {\n-\n}\n});\nt.start();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix startLocalFedWorker |
49,706 | 13.08.2020 15:50:57 | -7,200 | f3144e818a5d1c5956b8ec3e04e03966009d79d1 | [MINOR] Fix deprication warning Python | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/utils/converters.py",
"new_path": "src/main/python/systemds/utils/converters.py",
"diff": "@@ -42,7 +42,7 @@ def numpy_to_matrix_block(jvm: JVMView, np_arr: np.array):\nelse:\narr = np_arr.ravel().astype(np.float64)\nvalue_type = jvm.org.apache.sysds.common.Types.ValueType.FP64\n- buf = bytearray(arr.tostring())\n+ buf = bytearray(arr.tobytes())\nconvert_method = jvm.org.apache.sysds.runtime.util.Py4jConverterUtils.convertPy4JArrayToMB\nreturn convert_method(buf, rows, cols, value_type)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix deprication warning Python |
49,746 | 13.08.2020 18:58:31 | -7,200 | 2c4bf8816a4b87d0a91dd02ff390bae87cef4b3e | New builtin for obtaining frame column names
New builtin colnames(X) for obtaining a single-row frame holding the
column names by position.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -64,6 +64,7 @@ public enum Builtins {\nCOLMAX(\"colMaxs\", false),\nCOLMEAN(\"colMeans\", false),\nCOLMIN(\"colMins\", false),\n+ COLNAMES(\"colnames\", false),\nCOLPROD(\"colProds\", false),\nCOLSD(\"colSds\", false),\nCOLSUM(\"colSums\", false),\n@@ -182,7 +183,7 @@ public enum Builtins {\nTANH(\"tanh\", false),\nTRACE(\"trace\", false),\nTO_ONE_HOT(\"toOneHot\", true),\n- TYPEOF(\"typeOf\", false),\n+ TYPEOF(\"typeof\", false),\nCOUNT_DISTINCT(\"countDistinct\",false),\nCOUNT_DISTINCT_APPROX(\"countDistinctApprox\",false),\nVAR(\"var\", false),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Types.java",
"new_path": "src/main/java/org/apache/sysds/common/Types.java",
"diff": "@@ -195,7 +195,7 @@ public class Types\nABS, ACOS, ASIN, ASSERT, ATAN, CAST_AS_SCALAR, CAST_AS_MATRIX,\nCAST_AS_FRAME, CAST_AS_DOUBLE, CAST_AS_INT, CAST_AS_BOOLEAN,\nCEIL, CHOLESKY, COS, COSH, CUMMAX, CUMMIN, CUMPROD, CUMSUM,\n- CUMSUMPROD, DETECTSCHEMA, EIGEN, EXISTS, EXP, FLOOR, INVERSE,\n+ CUMSUMPROD, DETECTSCHEMA, COLNAMES, EIGEN, EXISTS, EXP, FLOOR, INVERSE,\nIQM, ISNA, ISNAN, ISINF, LENGTH, LINEAGE, LOG, NCOL, NOT, NROW,\nMEDIAN, PRINT, ROUND, SIN, SINH, SIGN, SOFTMAX, SQRT, STOP, SVD,\nTAN, TANH, TYPEOF,\n@@ -231,6 +231,7 @@ public class Types\ncase CUMPROD: return \"ucum*\";\ncase CUMSUM: return \"ucumk+\";\ncase CUMSUMPROD: return \"ucumk+*\";\n+ case COLNAMES: return \"colnames\";\ncase DETECTSCHEMA: return \"detectSchema\";\ncase MULT2: return \"*2\";\ncase NOT: return \"!\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/UnaryOp.java",
"new_path": "src/main/java/org/apache/sysds/hops/UnaryOp.java",
"diff": "@@ -539,7 +539,8 @@ public class UnaryOp extends MultiThreadedHop\nsetDim1(input.getDim1());\nsetDim2(1);\n}\n- else if(_op == OpOp1.TYPEOF || _op == OpOp1.DETECTSCHEMA) {\n+ else if(_op == OpOp1.TYPEOF || _op == OpOp1.DETECTSCHEMA || _op == OpOp1.COLNAMES) {\n+ //TODO theses three builtins should rather be moved to unary aggregates\nsetDim1(1);\nsetDim2(input.getDim2());\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"diff": "@@ -716,6 +716,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbreak;\ncase TYPEOF:\ncase DETECTSCHEMA:\n+ case COLNAMES:\ncheckNumParameters(1);\ncheckMatrixFrameParam(getFirstExpr());\noutput.setDataType(DataType.FRAME);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"new_path": "src/main/java/org/apache/sysds/parser/DMLTranslator.java",
"diff": "@@ -2656,8 +2656,9 @@ public class DMLTranslator\ncase CHOLESKY:\ncase TYPEOF:\ncase DETECTSCHEMA:\n- currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(), target.getValueType(),\n- OpOp1.valueOf(source.getOpCode().name()), expr);\n+ case COLNAMES:\n+ currBuiltinOp = new UnaryOp(target.getName(), target.getDataType(),\n+ target.getValueType(), OpOp1.valueOf(source.getOpCode().name()), expr);\nbreak;\ncase OUTER:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/CPInstructionParser.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/CPInstructionParser.java",
"diff": "@@ -190,6 +190,7 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"sigmoid\", CPType.Unary);\nString2CPInstructionType.put( \"typeOf\", CPType.Unary);\nString2CPInstructionType.put( \"detectSchema\", CPType.Unary);\n+ String2CPInstructionType.put( \"colnames\", CPType.Unary);\nString2CPInstructionType.put( \"isna\", CPType.Unary);\nString2CPInstructionType.put( \"isnan\", CPType.Unary);\nString2CPInstructionType.put( \"isinf\", CPType.Unary);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/SPInstructionParser.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/SPInstructionParser.java",
"diff": "@@ -250,6 +250,7 @@ public class SPInstructionParser extends InstructionParser\nString2SPInstructionType.put( \"sprop\", SPType.Unary);\nString2SPInstructionType.put( \"sigmoid\", SPType.Unary);\nString2SPInstructionType.put( \"detectSchema\", SPType.Unary);\n+ String2SPInstructionType.put( \"colnames\", SPType.Unary);\nString2SPInstructionType.put( \"isna\", SPType.Unary);\nString2SPInstructionType.put( \"isnan\", SPType.Unary);\nString2SPInstructionType.put( \"isinf\", SPType.Unary);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/UnaryFrameCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/UnaryFrameCPInstruction.java",
"diff": "package org.apache.sysds.runtime.instructions.cp;\nimport org.apache.sysds.lops.Lop;\n+import org.apache.sysds.runtime.DMLScriptException;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.matrix.data.FrameBlock;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\n@@ -37,12 +38,19 @@ public class UnaryFrameCPInstruction extends UnaryCPInstruction {\nec.releaseFrameInput(input1.getName());\nec.setFrameOutput(output.getName(), retBlock);\n}\n- else if(getOpcode().equals(\"detectSchema\"))\n- {\n+ else if(getOpcode().equals(\"detectSchema\")) {\nFrameBlock inBlock = ec.getFrameInput(input1.getName());\nFrameBlock retBlock = inBlock.detectSchemaFromRow(Lop.SAMPLE_FRACTION);\nec.releaseFrameInput(input1.getName());\nec.setFrameOutput(output.getName(), retBlock);\n}\n+ else if(getOpcode().equals(\"colnames\")) {\n+ FrameBlock inBlock = ec.getFrameInput(input1.getName());\n+ FrameBlock retBlock = inBlock.getColumnNamesAsFrame();\n+ ec.releaseFrameInput(input1.getName());\n+ ec.setFrameOutput(output.getName(), retBlock);\n+ }\n+ else\n+ throw new DMLScriptException(\"Opcode '\" + getOpcode() + \"' is not a valid UnaryFrameCPInstruction\");\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/UnaryFrameSPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/UnaryFrameSPInstruction.java",
"diff": "@@ -23,7 +23,9 @@ import org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.function.Function2;\nimport org.apache.spark.api.java.function.PairFunction;\nimport org.apache.sysds.common.Types;\n+import org.apache.sysds.common.Types.OpOp1;\nimport org.apache.sysds.lops.Lop;\n+import org.apache.sysds.runtime.DMLScriptException;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysds.runtime.instructions.InstructionUtils;\n@@ -47,6 +49,23 @@ public class UnaryFrameSPInstruction extends UnarySPInstruction {\n@Override\npublic void processInstruction(ExecutionContext ec) {\nSparkExecutionContext sec = (SparkExecutionContext) ec;\n+ if(getOpcode().equals(OpOp1.DETECTSCHEMA.toString()))\n+ detectSchema(sec);\n+ else if(getOpcode().equals(OpOp1.COLNAMES.toString()))\n+ columnNames(sec);\n+ else\n+ throw new DMLScriptException(\"Opcode '\" + getOpcode() + \"' is not a valid UnaryFrameSPInstruction\");\n+ }\n+\n+ private void columnNames(SparkExecutionContext sec) {\n+ // get input\n+ JavaPairRDD<Long, FrameBlock> in = sec.getFrameBinaryBlockRDDHandleForVariable(input1.getName());\n+ // get the first row block (frames are only blocked rowwise) and get its column names\n+ FrameBlock outFrame = in.lookup(1L).get(0).getColumnNamesAsFrame();\n+ sec.setFrameOutput(output.getName(), outFrame);\n+ }\n+\n+ public void detectSchema(SparkExecutionContext sec) {\n// get input\nJavaPairRDD<Long, FrameBlock> in = sec.getFrameBinaryBlockRDDHandleForVariable(input1.getName());\nJavaPairRDD<Long, FrameBlock> out = in.mapToPair(new DetectSchemaUsingRows());\n@@ -56,6 +75,7 @@ public class UnaryFrameSPInstruction extends UnarySPInstruction {\nprivate static class DetectSchemaUsingRows implements PairFunction<Tuple2<Long, FrameBlock>, Long, FrameBlock> {\nprivate static final long serialVersionUID = 5850400295183766400L;\n+\n@Override\npublic Tuple2<Long, FrameBlock> call(Tuple2<Long, FrameBlock> arg0) throws Exception {\nFrameBlock resultBlock = new FrameBlock(arg0._2.detectSchemaFromRow(Lop.SAMPLE_FRACTION));\n@@ -65,6 +85,7 @@ public class UnaryFrameSPInstruction extends UnarySPInstruction {\nprivate static class MergeFrame implements Function2<FrameBlock, FrameBlock, FrameBlock> {\nprivate static final long serialVersionUID = 942744896521069893L;\n+\n@Override\npublic FrameBlock call(FrameBlock arg0, FrameBlock arg1) throws Exception {\nreturn new FrameBlock(FrameBlock.mergeSchema(arg0, arg1));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -177,6 +177,13 @@ public class FrameBlock implements CacheBlock, Externalizable\nreturn getColumnNames(true);\n}\n+\n+ public FrameBlock getColumnNamesAsFrame() {\n+ FrameBlock fb = new FrameBlock(getNumColumns(), ValueType.STRING);\n+ fb.appendRow(getColumnNames());\n+ return fb;\n+ }\n+\n/**\n* Returns the column names of the frame block. This method\n* allocates default column names if required.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "package org.apache.sysds.test;\nimport static org.junit.Assert.assertEquals;\n-import static org.junit.Assert.assertFalse;\n-import static org.junit.Assert.assertTrue;\nimport static org.junit.Assert.fail;\nimport java.io.ByteArrayOutputStream;\n@@ -197,13 +195,6 @@ public abstract class AutomatedTestBase {\nprivate boolean isOutAndExpectedDeletionDisabled = false;\n- private int iExpectedStdOutState = 0;\n- private int iUnexpectedStdOutState = 0;\n- // private PrintStream originalPrintStreamStd = null;\n-\n- private int iExpectedStdErrState = 0;\n- // private PrintStream originalErrStreamStd = null;\n-\nprivate boolean outputBuffering = true;\n// Timestamp before test start.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/frame/FrameColumnNamesTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.frame;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+import org.apache.sysds.api.DMLScript;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.common.Types.FileFormat;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.io.FileFormatPropertiesCSV;\n+import org.apache.sysds.runtime.io.FrameWriter;\n+import org.apache.sysds.runtime.io.FrameWriterFactory;\n+import org.apache.sysds.runtime.matrix.data.FrameBlock;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+import edu.emory.mathcs.backport.java.util.Collections;\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class FrameColumnNamesTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"ColumnNames\";\n+ private final static String TEST_DIR = \"functions/frame/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + FrameColumnNamesTest.class.getSimpleName() + \"/\";\n+\n+ private final static int _rows = 10000;\n+ @Parameterized.Parameter()\n+ public String[] _columnNames;\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ return Arrays.asList(new Object[][] {{new String[] {\"A\", \"B\", \"C\"}}, {new String[] {\"1\", \"2\", \"3\"}},\n+ {new String[] {\"Hello\", \"hello\", \"Hello\", \"hi\", \"u\", \"w\", \"u\"}},});\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"B\"}));\n+ }\n+\n+ @Test\n+ public void testDetectSchemaDoubleCP() {\n+ runGetColNamesTest(_columnNames, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDetectSchemaDoubleSpark() {\n+ runGetColNamesTest(_columnNames, ExecType.SPARK);\n+ }\n+\n+ @SuppressWarnings(\"unchecked\")\n+ private void runGetColNamesTest(String[] columnNames, ExecType et) {\n+ Types.ExecMode platformOld = setExecMode(et);\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ try {\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-args\", input(\"A\"), String.valueOf(_rows),\n+ Integer.toString(columnNames.length), output(\"B\")};\n+\n+ Types.ValueType[] schema = (Types.ValueType[]) Collections\n+ .nCopies(columnNames.length, Types.ValueType.FP64).toArray(new Types.ValueType[0]);\n+ FrameBlock frame1 = new FrameBlock(schema);\n+ frame1.setColumnNames(columnNames);\n+ FrameWriter writer = FrameWriterFactory.createFrameWriter(FileFormat.CSV,\n+ new FileFormatPropertiesCSV(true, \",\", false));\n+\n+ double[][] A = getRandomMatrix(_rows, schema.length, Double.MIN_VALUE, Double.MAX_VALUE, 0.7, 14123);\n+ TestUtils.initFrameData(frame1, A, schema, _rows);\n+ writer.writeFrameToHDFS(frame1, input(\"A\"), _rows, schema.length);\n+\n+ runTest(true, false, null, -1);\n+ FrameBlock frame2 = readDMLFrameFromHDFS(\"B\", FileFormat.BINARY);\n+\n+ // verify output schema\n+ for(int i = 0; i < schema.length; i++) {\n+ Assert\n+ .assertEquals(\"Wrong result: \" + columnNames[i] + \".\", columnNames[i], frame2.get(0, i).toString());\n+ }\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/frame/ColumnNames.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1, rows=$2, cols=$3, data_type=\"frame\", format=\"csv\", header=TRUE);\n+R = colnames(X);\n+write(R, $4, format=\"binary\");\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/frame/TypeOf.dml",
"new_path": "src/test/scripts/functions/frame/TypeOf.dml",
"diff": "#-------------------------------------------------------------\nX = read($1, rows=$2, cols=$3, data_type=\"frame\", format=\"csv\");\n-R = typeOf(X);\n+R = typeof(X);\nprint(toString(R))\nwrite(R, $4, format=\"binary\");\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2617] New builtin for obtaining frame column names
New builtin colnames(X) for obtaining a single-row frame holding the
column names by position.
Closes #1020. |
49,738 | 15.08.2020 13:20:08 | -7,200 | 41685600ba8370646301ef0997f170c300101cea | New pca builtin function (principal component analysis) | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/pca.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Principal Component Analysis (PCA) for dimensionality reduction\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X Matrix --- Input feature matrix\n+# K Int --- Number of reduced dimensions (i.e., columns)\n+# Center Boolean TRUE Indicates whether or not to center the feature matrix\n+# Scale Boolean TRUE Indicates whether or not to scale the feature matrix\n+# ---------------------------------------------------------------------------------------------\n+# Xout Matrix --- Output feature matrix with K columns\n+# Mout Matrix --- Output dominant eigen vectors (can be used for projections)\n+# ---------------------------------------------------------------------------------------------\n+\n+m_pca = function(Matrix[Double] X, Integer K=2, Boolean center=TRUE, Boolean scale=TRUE)\n+ return (Matrix[Double] Xout, Matrix[Double] Mout)\n+{\n+ N = nrow(X);\n+ D = ncol(X);\n+\n+ # perform z-scoring (centering and scaling)\n+ X = scale(X, center, scale);\n+\n+ # co-variance matrix\n+ mu = colSums(X)/N;\n+ C = (t(X) %*% X)/(N-1) - (N/(N-1))*t(mu) %*% mu;\n+\n+ # compute eigen vectors and values\n+ [evalues, evectors] = eigen(C);\n+\n+ decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+ diagmat = table(seq(1,D),decreasing_Idx);\n+ # sorts eigenvalues by decreasing order\n+ evalues = diagmat %*% evalues;\n+ # sorts eigenvectors column-wise in the order of decreasing eigenvalues\n+ evectors = evectors %*% diagmat;\n+\n+ eval_dominant = evalues[1:K, 1];\n+ evec_dominant = evectors[,1:K];\n+\n+ # Construct new data set by treating computed dominant eigenvectors as the basis vectors\n+ Xout = X %*% evec_dominant;\n+ Mout = evec_dominant;\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -145,6 +145,7 @@ public enum Builtins {\nOUTLIER(\"outlier\", true, false), //TODO parameterize opposite\nOUTLIER_SD(\"outlierBySd\", true),\nOUTLIER_IQR(\"outlierByIQR\", true),\n+ PCA(\"pca\", true),\nPNMF(\"pnmf\", true),\nPPRED(\"ppred\", false),\nPROD(\"prod\", false),\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2619] New pca builtin function (principal component analysis) |
49,738 | 15.08.2020 14:39:45 | -7,200 | a3e3ea949c6af02914356c430756477b948965ce | Federated tsmm operations (e.g., PCA, lmDS, cor)
* Federated tsmm: support for federated tsmm left over row-partioned
federated matrices.
* Performance: aggAdd (e.g., in ba+*, uack+, and tsmm) via nary instead
of binary operations. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"diff": "@@ -29,13 +29,13 @@ import org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.federated.FederatedRequest.RequestType;\nimport org.apache.sysds.runtime.controlprogram.parfor.util.IDSequence;\nimport org.apache.sysds.runtime.functionobjects.KahanFunction;\n-import org.apache.sysds.runtime.instructions.InstructionUtils;\n+import org.apache.sysds.runtime.functionobjects.Plus;\nimport org.apache.sysds.runtime.instructions.cp.CPOperand;\nimport org.apache.sysds.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.runtime.matrix.operators.AggregateUnaryOperator;\n-import org.apache.sysds.runtime.matrix.operators.BinaryOperator;\n+import org.apache.sysds.runtime.matrix.operators.SimpleOperator;\npublic class FederationUtils {\nprivate static final IDSequence _idSeq = new IDSequence();\n@@ -58,13 +58,11 @@ public class FederationUtils {\npublic static MatrixBlock aggAdd(Future<FederatedResponse>[] ffr) {\ntry {\n- BinaryOperator bop = InstructionUtils.parseBinaryOperator(\"+\");\n- MatrixBlock ret = (MatrixBlock) (ffr[0].get().getData()[0]);\n- for (int i=1; i<ffr.length; i++) {\n- MatrixBlock tmp = (MatrixBlock) (ffr[i].get().getData()[0]);\n- ret.binaryOperationsInPlace(bop, tmp);\n- }\n- return ret;\n+ SimpleOperator op = new SimpleOperator(Plus.getPlusFnObject());\n+ MatrixBlock[] in = new MatrixBlock[ffr.length];\n+ for(int i=0; i<ffr.length; i++)\n+ in[i] = (MatrixBlock) ffr[i].get().getData()[0];\n+ return MatrixBlock.naryOperations(op, in, new ScalarObject[0], new MatrixBlock());\n}\ncatch(Exception ex) {\nthrow new DMLRuntimeException(ex);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ComputationFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ComputationFEDInstruction.java",
"diff": "@@ -37,9 +37,8 @@ public abstract class ComputationFEDInstruction extends FEDInstruction implement\npublic final CPOperand output;\npublic final CPOperand input1, input2, input3;\n- protected ComputationFEDInstruction(FEDType type, Operator op, CPOperand in1, CPOperand in2, CPOperand out,\n- String opcode,\n- String istr) {\n+ protected ComputationFEDInstruction(FEDType type, Operator op,\n+ CPOperand in1, CPOperand in2, CPOperand out, String opcode, String istr) {\nsuper(type, op, opcode, istr);\ninput1 = in1;\ninput2 = in2;\n@@ -47,9 +46,8 @@ public abstract class ComputationFEDInstruction extends FEDInstruction implement\noutput = out;\n}\n- protected ComputationFEDInstruction(FEDType type, Operator op, CPOperand in1, CPOperand in2, CPOperand in3,\n- CPOperand out,\n- String opcode, String istr) {\n+ protected ComputationFEDInstruction(FEDType type, Operator op,\n+ CPOperand in1, CPOperand in2, CPOperand in3, CPOperand out, String opcode, String istr) {\nsuper(type, op, opcode, istr);\ninput1 = in1;\ninput2 = in2;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstruction.java",
"diff": "@@ -32,7 +32,8 @@ public abstract class FEDInstruction extends Instruction {\nAppend,\nBinary,\nInit,\n- MultiReturnParameterizedBuiltin\n+ MultiReturnParameterizedBuiltin,\n+ Tsmm,\n}\nprotected final FEDType _fedType;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"diff": "@@ -76,6 +76,12 @@ public class FEDInstructionUtils {\n}\n}\n}\n+ else if( inst instanceof MMTSJCPInstruction ) {\n+ MMTSJCPInstruction linst = (MMTSJCPInstruction) inst;\n+ MatrixObject mo = ec.getMatrixObject(linst.input1);\n+ if( mo.isFederated() )\n+ return TsmmFEDInstruction.parseInstruction(linst.toString());\n+ }\nreturn inst;\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/TsmmFEDInstruction.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.runtime.instructions.fed;\n+\n+import org.apache.sysds.lops.MMTSJ.MMTSJType;\n+import org.apache.sysds.runtime.DMLRuntimeException;\n+import org.apache.sysds.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedRequest;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedResponse;\n+import org.apache.sysds.runtime.controlprogram.federated.FederationUtils;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedRequest.RequestType;\n+import org.apache.sysds.runtime.instructions.InstructionUtils;\n+import org.apache.sysds.runtime.instructions.cp.CPOperand;\n+import org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+\n+import java.util.concurrent.Future;\n+\n+public class TsmmFEDInstruction extends BinaryFEDInstruction {\n+ private final MMTSJType _type;\n+ @SuppressWarnings(\"unused\")\n+ private final int _numThreads;\n+\n+ public TsmmFEDInstruction(CPOperand in, CPOperand out, MMTSJType type, int k, String opcode, String istr) {\n+ super(FEDType.Tsmm, null, in, null, out, opcode, istr);\n+ _type = type;\n+ _numThreads = k;\n+ }\n+\n+ public static TsmmFEDInstruction parseInstruction(String str) {\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+ String opcode = parts[0];\n+ if(!opcode.equalsIgnoreCase(\"tsmm\"))\n+ throw new DMLRuntimeException(\"TsmmFedInstruction.parseInstruction():: Unknown opcode \" + opcode);\n+\n+ InstructionUtils.checkNumFields(parts, 4);\n+ CPOperand in = new CPOperand(parts[1]);\n+ CPOperand out = new CPOperand(parts[2]);\n+ MMTSJType type = MMTSJType.valueOf(parts[3]);\n+ int k = Integer.parseInt(parts[4]);\n+ return new TsmmFEDInstruction(in, out, type, k, opcode, str);\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) {\n+ MatrixObject mo1 = ec.getMatrixObject(input1);\n+\n+ if(mo1.isFederated() && _type.isLeft()) { // left tsmm\n+ //construct commands: fed tsmm, retrieve results\n+ FederatedRequest fr1 = FederationUtils.callInstruction(instString, output,\n+ new CPOperand[]{input1}, new long[]{mo1.getFedMapping().getID()});\n+ FederatedRequest fr2 = new FederatedRequest(RequestType.GET_VAR, fr1.getID());\n+\n+ //execute federated operations and aggregate\n+ Future<FederatedResponse>[] tmp = mo1.getFedMapping().execute(fr1, fr2);\n+ MatrixBlock ret = FederationUtils.aggAdd(tmp);\n+ mo1.getFedMapping().cleanup(fr1.getID());\n+ ec.setMatrixOutput(output.getName(), ret);\n+ }\n+ else { //other combinations\n+ throw new DMLRuntimeException(\"Federated Tsmm not supported with the \"\n+ + \"following federated objects: \"+mo1.isFederated()+\" \"+_fedType);\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedPCATest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.federated;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class FederatedPCATest extends AutomatedTestBase {\n+\n+ private final static String TEST_DIR = \"functions/federated/\";\n+ private final static String TEST_NAME = \"FederatedPCATest\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FederatedPCATest.class.getSimpleName() + \"/\";\n+\n+ private final static int blocksize = 1024;\n+ @Parameterized.Parameter()\n+ public int rows;\n+ @Parameterized.Parameter(1)\n+ public int cols;\n+ @Parameterized.Parameter(2)\n+ public boolean scaleAndShift;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"Z\"}));\n+ }\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ // rows have to be even and > 1\n+ return Arrays.asList(new Object[][] {\n+ {10000, 10, false}, {2000, 50, false}, {1000, 100, false},\n+ //TODO support for federated uacmean, uacvar\n+ //{10000, 10, true}, {2000, 50, true}, {1000, 100, true}\n+ });\n+ }\n+\n+ @Test\n+ public void federatedPCASinglenode() {\n+ federatedL2SVM(Types.ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void federatedPCAHybrid() {\n+ federatedL2SVM(Types.ExecMode.HYBRID);\n+ }\n+\n+ public void federatedL2SVM(Types.ExecMode execMode) {\n+ ExecMode platformOld = setExecMode(execMode);\n+\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ // write input matrices\n+ int halfRows = rows / 2;\n+ // We have two matrices handled by a single federated worker\n+ double[][] X1 = getRandomMatrix(halfRows, cols, 0, 1, 1, 3);\n+ double[][] X2 = getRandomMatrix(halfRows, cols, 0, 1, 1, 7);\n+ writeInputMatrixWithMTD(\"X1\", X1, false, new MatrixCharacteristics(halfRows, cols, blocksize, halfRows * cols));\n+ writeInputMatrixWithMTD(\"X2\", X2, false, new MatrixCharacteristics(halfRows, cols, blocksize, halfRows * cols));\n+\n+ // empty script name because we don't execute any script, just start the worker\n+ fullDMLScriptName = \"\";\n+ int port1 = getRandomAvailablePort();\n+ int port2 = getRandomAvailablePort();\n+ Thread t1 = startLocalFedWorker(port1);\n+ Thread t2 = startLocalFedWorker(port2);\n+\n+ TestConfiguration config = availableTestConfigurations.get(TEST_NAME);\n+ loadTestConfiguration(config);\n+ setOutputBuffering(false);\n+\n+ // Run reference dml script with normal matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \"Reference.dml\";\n+ programArgs = new String[] {\"-args\", input(\"X1\"), input(\"X2\"),\n+ String.valueOf(scaleAndShift).toUpperCase(), expected(\"Z\")};\n+ runTest(true, false, null, -1);\n+\n+ // Run actual dml script with federated matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-stats\",\n+ \"-nvargs\", \"in_X1=\" + TestUtils.federatedAddress(port1, input(\"X1\")),\n+ \"in_X2=\" + TestUtils.federatedAddress(port2, input(\"X2\")), \"rows=\" + rows, \"cols=\" + cols,\n+ \"scaleAndShift=\" + String.valueOf(scaleAndShift).toUpperCase(), \"out=\" + output(\"Z\")};\n+ runTest(true, false, null, -1);\n+\n+ // compare via files\n+ compareResults(1e-9);\n+ TestUtils.shutdownThreads(t1, t2);\n+\n+ // check for federated operations\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_ba+*\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uack+\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_tsmm\"));\n+ if( scaleAndShift ) {\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uacmean\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uacvar\"));\n+ }\n+\n+ resetExecMode(platformOld);\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedPCATest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = federated(addresses=list($in_X1, $in_X2),\n+ ranges=list(list(0, 0), list($rows / 2, $cols), list($rows / 2, 0), list($rows, $cols)))\n+[X2,M] = pca(X=X, K=2, scale=$scaleAndShift, center=$scaleAndShift)\n+write(X2, $out)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedPCATestReference.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rbind(read($1), read($2))\n+[X2,M] = pca(X=X, K=2, scale=$3, center=$3)\n+write(X2, $4)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2620] Federated tsmm operations (e.g., PCA, lmDS, cor)
* Federated tsmm: support for federated tsmm left over row-partioned
federated matrices.
* Performance: aggAdd (e.g., in ba+*, uack+, and tsmm) via nary instead
of binary operations. |
49,722 | 15.08.2020 14:56:44 | -7,200 | fc0e21059e06d1928020657b3882e986363f7fda | DBScan and dist builtin
Implemented dist for euclidean distance matrix.
Implemented DBSCAN
Added R dbscan package
Fixed error for similar points and all noise points
Fixed DBSCAN
Updated DBSCAN and added documentation
Added tests for dist and DBScan.
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "@@ -29,7 +29,9 @@ limitations under the License.\n* [DML-Bodied Built-In functions](#dml-bodied-built-in-functions)\n* [`confusionMatrix`-Function](#confusionmatrix-function)\n* [`cvlm`-Function](#cvlm-function)\n+ * [`DBSCAN`-Function](#DBSCAN-function)\n* [`discoverFD`-Function](#discoverFD-function)\n+ * [`dist`-Function](#dist-function)\n* [`glm`-Function](#glm-function)\n* [`gridSearch`-Function](#gridSearch-function)\n* [`hyperband`-Function](#hyperband-function)\n@@ -212,6 +214,37 @@ y = X %*% rand(rows = ncol(X), cols = 1)\n[predict, beta] = cvlm(X = X, y = y, k = 4)\n```\n+## `DBSCAN`-Function\n+\n+The dbscan() implements the DBSCAN Clustering algorithm using Euclidian distance.\n+\n+### Usage\n+\n+```r\n+Y = dbscan(X = X, eps = 2.5, minPts = 5)\n+```\n+\n+### Arguments\n+\n+| Name | Type | Default | Description |\n+| :--------- | :-------------- | :--------- | :---------- |\n+| X | Matrix[Double] | required | The input Matrix to do DBSCAN on. |\n+| eps | Double | `0.5` | Maximum distance between two points for one to be considered reachable for the other. |\n+| minPts | Int | `5` | Number of points in a neighborhood for a point to be considered as a core point (includes the point itself). |\n+\n+### Returns\n+\n+| Type | Description |\n+| :-----------| :---------- |\n+| Matrix[Integer] | The mapping of records to clusters |\n+\n+### Example\n+\n+```r\n+X = rand(rows=1780, cols=180, min=1, max=20)\n+dbscan(X = X, eps = 2.5, minPts = 360)\n+```\n+\n## `discoverFD`-Function\nThe `discoverFD`-function finds the functional dependencies.\n@@ -236,6 +269,34 @@ discoverFD(X, Mask, threshold)\n| :----- | :---------- |\n| Double | matrix of functional dependencies |\n+## `dist`-Function\n+\n+The `dist`-function is used to compute Euclidian distances between N d-dimensional points.\n+\n+### Usage\n+\n+```r\n+dist(X)\n+```\n+\n+### Arguments\n+\n+| Name | Type | Default | Description |\n+| :--- | :------------- | :------- | :---------- |\n+| X | Matrix[Double] | required | (n x d) matrix of d-dimensional points |\n+\n+### Returns\n+\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | (n x n) symmetric matrix of Euclidian distances |\n+\n+### Example\n+\n+```r\n+X = rand (rows = 5, cols = 5)\n+Y = dist(X)\n+```\n## `glm`-Function\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/dbscan.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+#\n+# Implements the DBSCAN clustering algorithm using Euclidian distance matrix\n+#\n+# INPUT PARAMETERS:\n+# ----------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ----------------------------------------------------------------------------\n+# X Matrix[Double] --- The input Matrix to do DBSCAN on.\n+# eps Double 0.5 Maximum distance between two points for one to be considered reachable for the other.\n+# minPts Int 5 Number of points in a neighborhood for a point to be considered as a core point (includes the point itself).\n+#\n+\n+m_dbscan = function (Matrix[double] X, Double eps = 0.5, Integer minPts = 5)\n+ return (Matrix[double] clusterMembers)\n+{\n+ #check input parameter assertions\n+ if(minPts < 0) { stop(\"DBSCAN: Stopping due to invalid inputs: minPts should be greater than 0\"); }\n+ if(eps < 0) { stop(\"DBSCAN: Stopping due to invalid inputs: Epsilon (eps) should be greater than 0\"); }\n+\n+ UNASSIGNED = 0;\n+\n+ num_records = nrow(X);\n+ num_features = ncol(X);\n+\n+ neighbors = dist(X);\n+\n+ #find same pts and set their distance to the smallest double representation\n+ neighbors = replace(target = neighbors, pattern = 0, replacement = 2.225e-307)\n+ neighbors = neighbors - diag(diag(neighbors));\n+\n+ # neighbors within eps\n+ withinEps = ((neighbors <= eps) * (0 < neighbors));\n+ corePts = rowSums(withinEps) + 1 >= minPts;\n+\n+ clusterMembers = matrix(UNASSIGNED, num_records, 1);\n+\n+ if (sum(corePts) != 0) {\n+ # leave only density reachable pts\n+ neighbors = (neighbors * corePts * withinEps) > 0;\n+\n+ # border pts of multiple clusters\n+ border = neighbors * (t(corePts) == 0 & colSums(neighbors) > 1) * seq(num_records, 1);\n+ border = (border - colMaxs(border)) == 0;\n+ neighbors = neighbors * border;\n+\n+ adjacency = (neighbors + t(neighbors)) > 0;\n+\n+ clusterMembers = components(G=adjacency, verbose=FALSE);\n+ # noise to 0\n+ clusterMembers = clusterMembers * (rowSums(adjacency) > 0);\n+ }\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/dist.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+\n+# Returns Euclidian distance matrix (distances between N n-dimensional points)\n+\n+m_dist = function(Matrix[Double] X) return (Matrix[Double] Y) {\n+ G = X %*% t(X);\n+ I = matrix(1, rows = nrow(G), cols = ncol(G));\n+ Y = -2 * (G) + t(I %*% diag(diag(G))) + t(diag(diag(G)) %*% I);\n+ Y = sqrt(Y);\n+ Y = replace(target = Y, pattern=0/0, replacement = 0);\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -84,9 +84,11 @@ public enum Builtins {\nCUMSUMPROD(\"cumsumprod\", false),\nCONFUSIONMATRIX(\"confusionMatrix\", true),\nCOR(\"cor\", true),\n+ DBSCAN(\"dbscan\", true),\nDETECTSCHEMA(\"detectSchema\", false),\nDIAG(\"diag\", false),\nDISCOVER_FD(\"discoverFD\", true),\n+ DIST(\"dist\", true),\nDROP_INVALID_TYPE(\"dropInvalidType\", false),\nDROP_INVALID_LENGTH(\"dropInvalidLength\", false),\nEIGEN(\"eigen\", false, ReturnType.MULTI_RETURN),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinDBSCANTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import com.google.common.collect.BiMap;\n+import com.google.common.collect.HashBiMap;\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+import java.util.HashMap;\n+\n+public class BuiltinDBSCANTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"dbscan\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinDBSCANTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 1e-3;\n+ private final static int rows = 1700;\n+ private final static double spDense = 0.99;\n+\n+ private final static double epsDBSCAN = 1;\n+ private final static int minPts = 5;\n+\n+ @Override\n+ public void setUp() { addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,new String[]{\"B\"})); }\n+\n+ @Test\n+ public void testDBSCANDefaultCP() { runDBSCAN(true, ExecType.CP); }\n+\n+ @Test\n+ public void testDBSCANDefaultSP() { runDBSCAN(true, ExecType.SPARK); }\n+\n+ private void runDBSCAN(boolean defaultProb, ExecType instType)\n+ {\n+ ExecMode platformOld = setExecMode(instType);\n+\n+ try\n+ {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-nvargs\", \"X=\" + input(\"A\"), \"Y=\" + output(\"B\"), \"eps=\" + epsDBSCAN, \"minPts=\" + minPts};\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = getRCmd(inputDir(), Double.toString(epsDBSCAN), Integer.toString(minPts), expectedDir());\n+\n+ //generate actual dataset\n+ double[][] A = getNonZeroRandomMatrix(rows, 3, -10, 10, 7);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\n+\n+ //map cluster ids\n+ //NOTE: border points that are reachable from more than 1 cluster\n+ // are assigned to lowest point id, not cluster id -> can fail in this case, but it's still correct\n+ BiMap<Double, Double> merged = HashBiMap.create();\n+ rfile.forEach((key, value) -> merged.put(value, dmlfile.get(key)));\n+ dmlfile.replaceAll((k, v) -> merged.inverse().get(v));\n+\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinDistTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.lops.LopProperties.ExecType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+public class BuiltinDistTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"dist\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinDistTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 1e-3;\n+ private final static int rows = 1765;\n+ private final static double spDense = 0.99;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,new String[]{\"B\"}));\n+ }\n+\n+ @Test\n+ public void testDistDefaultCP() { runDist(true, ExecType.CP); }\n+\n+ @Test\n+ public void testDistSP() {\n+ runDist(true, ExecType.SPARK);\n+ }\n+\n+ private void runDist(boolean defaultProb, ExecType instType)\n+ {\n+ ExecMode platformOld = setExecMode(instType);\n+\n+ try\n+ {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"A\"), output(\"B\") };\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + expectedDir();\n+\n+ //generate actual dataset\n+ double[][] A = getRandomMatrix(rows, 10, -1, 1, spDense, 7);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/dbscan.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+args<-commandArgs(TRUE)\n+library(\"Matrix\")\n+options(digits=22)\n+library(\"dbscan\")\n+\n+X = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")));\n+eps = as.double(args[2]);\n+minPts = as.integer(args[3]);\n+Ys = dbscan(X, eps, minPts);\n+Y = as.matrix(Ys$cluster, FALSE);\n+writeMM(as(Y, \"CsparseMatrix\"), paste(args[4], \"B\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/dbscan.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($X);\n+eps = as.double($eps);\n+minPts = as.integer($minPts);\n+Y = dbscan(X, eps, minPts);\n+write(Y, $Y);\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/dist.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")));\n+R = round(as.matrix(dist(X)), 3);\n+diag(R) = 0;\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"B\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/dist.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1);\n+Y = dist(X);\n+write(Y, $2);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2621] DBScan and dist builtin
- Implemented dist for euclidean distance matrix.
- Implemented DBSCAN
- Added R dbscan package
- Fixed error for similar points and all noise points
- Fixed DBSCAN
- Updated DBSCAN and added documentation
- Added tests for dist and DBScan.
Closes #1003. |
49,706 | 15.08.2020 15:08:10 | -7,200 | 6029c07a3ddce313e27d914a9c7dffb53b48b4cc | [MINOR] Federated ops count in stats
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"diff": "@@ -36,6 +36,7 @@ import org.apache.sysds.common.Types;\nimport org.apache.sysds.conf.DMLConfig;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.federated.FederatedRequest.RequestType;\n+import org.apache.sysds.utils.Statistics;\nimport java.net.InetSocketAddress;\nimport java.util.concurrent.Future;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRequest.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRequest.java",
"diff": "@@ -25,6 +25,7 @@ import java.util.Arrays;\nimport java.util.List;\nimport org.apache.sysds.api.DMLScript;\n+import org.apache.sysds.utils.Statistics;\npublic class FederatedRequest implements Serializable {\nprivate static final long serialVersionUID = 5946781306963870394L;\n@@ -57,6 +58,7 @@ public class FederatedRequest implements Serializable {\n}\npublic FederatedRequest(RequestType method, long id, List<Object> data) {\n+ Statistics.incFederated(method);\n_method = method;\n_id = id;\n_data = data;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"new_path": "src/main/java/org/apache/sysds/utils/Statistics.java",
"diff": "@@ -37,6 +37,7 @@ import org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.hops.OptimizerUtils;\nimport org.apache.sysds.runtime.controlprogram.caching.CacheStatistics;\nimport org.apache.sysds.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedRequest.RequestType;\nimport org.apache.sysds.runtime.instructions.Instruction;\nimport org.apache.sysds.runtime.instructions.InstructionUtils;\nimport org.apache.sysds.runtime.instructions.cp.FunctionCallCPInstruction;\n@@ -135,6 +136,13 @@ public class Statistics\nprivate static final LongAdder lTotalLix = new LongAdder();\nprivate static final LongAdder lTotalLixUIP = new LongAdder();\n+ // Federated stats\n+ private static final LongAdder federatedReadCount = new LongAdder();\n+ private static final LongAdder federatedPutCount = new LongAdder();\n+ private static final LongAdder federatedGetCount = new LongAdder();\n+ private static final LongAdder federatedExecuteInstructionCount = new LongAdder();\n+ private static final LongAdder federatedExecuteUDFCount = new LongAdder();\n+\nprivate static LongAdder numNativeFailures = new LongAdder();\npublic static LongAdder numNativeLibMatrixMultCalls = new LongAdder();\npublic static LongAdder numNativeConv2dCalls = new LongAdder();\n@@ -376,6 +384,28 @@ public class Statistics\nparforMergeTime += time;\n}\n+ public static synchronized void incFederated(RequestType rqt){\n+ switch (rqt) {\n+ case READ_VAR:\n+ federatedReadCount.increment();\n+ break;\n+ case PUT_VAR:\n+ federatedPutCount.increment();\n+ break;\n+ case GET_VAR:\n+ federatedGetCount.increment();\n+ break;\n+ case EXEC_INST:\n+ federatedExecuteInstructionCount.increment();\n+ break;\n+ case EXEC_UDF:\n+ federatedExecuteUDFCount.increment();\n+ break;\n+ default:\n+ break;\n+ }\n+ }\n+\npublic static void startCompileTimer() {\nif( DMLScript.STATISTICS )\ncompileStartTime = System.nanoTime();\n@@ -989,6 +1019,15 @@ public class Statistics\nsb.append(\"ParFor result merge time:\\t\" + String.format(\"%.3f\", ((double)getParforMergeTime())/1000) + \" sec.\\n\");\nsb.append(\"ParFor total update in-place:\\t\" + lTotalUIPVar + \"/\" + lTotalLixUIP + \"/\" + lTotalLix + \"\\n\");\n}\n+ if( federatedReadCount.longValue() > 0){\n+ sb.append(\"Federated (Reads,Puts,Gets) :\\t(\" +\n+ federatedReadCount.longValue() + \",\" +\n+ federatedPutCount.longValue() + \",\" +\n+ federatedGetCount.longValue() + \")\\n\");\n+ sb.append(\"Federated Execute (In,UDF) :\\t(\" +\n+ federatedExecuteInstructionCount.longValue() + \",\" +\n+ federatedExecuteUDFCount.longValue() + \")\\n\");\n+ }\nsb.append(\"Total JIT compile time:\\t\\t\" + ((double)getJITCompileTime())/1000 + \" sec.\\n\");\nsb.append(\"Total JVM GC count:\\t\\t\" + getJVMgcCount() + \".\\n\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Federated ops count in stats
Closes #1022. |
49,738 | 15.08.2020 16:01:47 | -7,200 | 351a2e470a94d44409f6c9c69e77ab55721677fa | [MINOR] Various cleanups (replace correctness, imports, warnings) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysds/hops/codegen/SpoofCompiler.java",
"diff": "@@ -30,8 +30,6 @@ import java.util.Map.Entry;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types.ExecMode;\nimport org.apache.sysds.common.Types.OpOp1;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/cost/CostEstimationWrapper.java",
"new_path": "src/main/java/org/apache/sysds/hops/cost/CostEstimationWrapper.java",
"diff": "@@ -23,8 +23,6 @@ import java.util.HashMap;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysds.runtime.controlprogram.Program;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/rewrite/ProgramRewriter.java",
"new_path": "src/main/java/org/apache/sysds/hops/rewrite/ProgramRewriter.java",
"diff": "@@ -22,8 +22,6 @@ package org.apache.sysds.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.List;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.conf.CompilerConfig.ConfigType;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"diff": "@@ -36,7 +36,6 @@ import org.apache.sysds.common.Types;\nimport org.apache.sysds.conf.DMLConfig;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.federated.FederatedRequest.RequestType;\n-import org.apache.sysds.utils.Statistics;\nimport java.net.InetSocketAddress;\nimport java.util.concurrent.Future;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRewriteReuse.java",
"diff": "@@ -26,8 +26,6 @@ import java.util.Map;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types.AggOp;\nimport org.apache.sysds.common.Types.Direction;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -21,8 +21,6 @@ package org.apache.sysds.runtime.matrix.data;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.controlprogram.caching.MatrixObject;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCountDistinct.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixCountDistinct.java",
"diff": "@@ -28,8 +28,6 @@ import java.util.Set;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.api.DMLException;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.matrix.operators.CountDistinctOperator;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/MatrixBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/MatrixBlock.java",
"diff": "@@ -637,15 +637,16 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\npublic boolean containsValue(double pattern) {\n+ //fast paths: infer from meta data only\nif(isEmptyBlock(true))\nreturn pattern==0;\n+ if( nonZeros < getLength() && pattern == 0 )\n+ return true;\n//make a pass over the data to determine if it includes the\n//pattern, with early abort as soon as the pattern is found\nboolean NaNpattern = Double.isNaN(pattern);\nif( isInSparseFormat() ) {\n- if( nonZeros < getLength() && pattern == 0 )\n- return true;\nSparseBlock sb = getSparseBlock();\nfor(int i=0; i<rlen; i++) {\nif( sb.isEmpty(i) ) continue;\n@@ -661,7 +662,8 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nDenseBlock db = getDenseBlock();\nfor(int i=0; i<rlen; i++) {\ndouble[] vals = db.values(i);\n- for(int j=0; j<clen; j++)\n+ int pos = db.pos(i);\n+ for(int j=pos; j<pos+clen; j++)\nif(vals[j]==pattern || (NaNpattern && Double.isNaN(vals[j])))\nreturn true;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysds/utils/NativeHelper.java",
"diff": "@@ -23,8 +23,6 @@ import java.io.IOException;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.conf.DMLConfig;\nimport org.apache.sysds.hops.OptimizerUtils;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinDBSCANTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinDBSCANTest.java",
"diff": "@@ -38,7 +38,7 @@ public class BuiltinDBSCANTest extends AutomatedTestBase\nprivate final static double eps = 1e-3;\nprivate final static int rows = 1700;\n- private final static double spDense = 0.99;\n+ //private final static double spDense = 0.99;\nprivate final static double epsDBSCAN = 1;\nprivate final static int minPts = 5;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Various cleanups (replace correctness, imports, warnings) |
49,738 | 16.08.2020 17:30:03 | -7,200 | 854b4e94f0e8f4c8b8e0f2867558cb90e4e8e552 | [SYSTEMDS-2549,2624] Fix federated binary matrix-vector, var cleanup
This patch fixes two correctness issues related to (1) cleanup of
federated matrices, and (2) federated binary matrix-row vector
operators. Furthermore, this also includes a new federated Kmeans test
and some minor fixes for row aggregates, and improvements of federated
matrix multiplications. | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/kmeans.dml",
"new_path": "scripts/builtin/kmeans.dml",
"diff": "@@ -160,7 +160,7 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\nC_old = C; C = C_new;\n}\n- if(is_verbose == TRUE)\n+ if(is_verbose)\nprint (\"Run \" + run_index + \", Iteration \" + iter_count + \": Terminated with code = \"\n+ term_code + \", Centroid WCSS = \" + wcss);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/context/ExecutionContext.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/context/ExecutionContext.java",
"diff": "@@ -59,9 +59,7 @@ import org.apache.sysds.utils.Statistics;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n-import java.util.HashSet;\nimport java.util.List;\n-import java.util.Set;\nimport java.util.stream.Collectors;\npublic class ExecutionContext {\n@@ -73,7 +71,6 @@ public class ExecutionContext {\n//symbol table\nprotected LocalVariableMap _variables;\nprotected boolean _autoCreateVars;\n- protected Set<String> _guardedFiles = new HashSet<>();\n//lineage map, cache, prepared dedup blocks\nprotected Lineage _lineage;\n@@ -135,10 +132,6 @@ public class ExecutionContext {\n_autoCreateVars = flag;\n}\n- public void addGuardedFilename(String fname) {\n- _guardedFiles.add(fname);\n- }\n-\n/**\n* Get the i-th GPUContext\n* @param index index of the GPUContext\n@@ -758,7 +751,7 @@ public class ExecutionContext {\n//compute ref count only if matrix cleanup actually necessary\nif ( mo.isCleanupEnabled() && !getVariables().hasReferences(mo) ) {\nmo.clearData(); //clean cached data\n- if( fileExists && !_guardedFiles.contains(mo.getFileName()) ) {\n+ if( fileExists ) {\nHDFSTool.deleteFileIfExistOnHDFS(mo.getFileName());\nHDFSTool.deleteFileIfExistOnHDFS(mo.getFileName()+\".mtd\");\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRange.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedRange.java",
"diff": "@@ -41,8 +41,12 @@ public class FederatedRange implements Comparable<FederatedRange> {\n* @param other the <code>FederatedRange</code> to copy\n*/\npublic FederatedRange(FederatedRange other) {\n- _beginDims = other._beginDims.clone();\n- _endDims = other._endDims.clone();\n+ this(other._beginDims.clone(), other._endDims.clone());\n+ }\n+\n+ public FederatedRange(FederatedRange other, long clen) {\n+ this(other._beginDims.clone(), other._endDims.clone());\n+ _endDims[1] = clen;\n}\npublic void setBeginDim(int dim, long value) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -181,7 +181,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\n//TODO spawn async load of data, otherwise on first access\n_ec.setVariable(String.valueOf(id), cd);\n- _ec.addGuardedFilename(filename);\n+ cd.enableCleanup(false); //guard against deletion\nif (dataType == Types.DataType.FRAME) {\nFrameObject frameObject = (FrameObject) cd;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationMap.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationMap.java",
"diff": "@@ -150,6 +150,14 @@ public class FederationMap\nreturn new FederationMap(id, map);\n}\n+ public FederationMap copyWithNewID(long id, long clen) {\n+ Map<FederatedRange, FederatedData> map = new TreeMap<>();\n+ //TODO handling of file path, but no danger as never written\n+ for( Entry<FederatedRange, FederatedData> e : _fedMap.entrySet() )\n+ map.put(new FederatedRange(e.getKey(), clen), new FederatedData(e.getValue(), id));\n+ return new FederationMap(id, map);\n+ }\n+\npublic FederationMap rbind(long offset, FederationMap that) {\nfor( Entry<FederatedRange, FederatedData> e : that._fedMap.entrySet() ) {\n_fedMap.put(\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/AggregateBinaryFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/AggregateBinaryFEDInstruction.java",
"diff": "@@ -66,13 +66,22 @@ public class AggregateBinaryFEDInstruction extends BinaryFEDInstruction {\nFederatedRequest fr1 = mo1.getFedMapping().broadcast(mo2);\nFederatedRequest fr2 = FederationUtils.callInstruction(instString, output,\nnew CPOperand[]{input1, input2}, new long[]{mo1.getFedMapping().getID(), fr1.getID()});\n+ if( mo2.getNumColumns() == 1 ) { //MV\nFederatedRequest fr3 = new FederatedRequest(RequestType.GET_VAR, fr2.getID());\n//execute federated operations and aggregate\nFuture<FederatedResponse>[] tmp = mo1.getFedMapping().execute(fr1, fr2, fr3);\nMatrixBlock ret = FederationUtils.rbind(tmp);\nmo1.getFedMapping().cleanup(fr1.getID(), fr2.getID());\nec.setMatrixOutput(output.getName(), ret);\n- //TODO should remain federated matrix (no need for agg)\n+ }\n+ else { //MM\n+ //execute federated operations and aggregate\n+ mo1.getFedMapping().execute(fr1, fr2);\n+ mo1.getFedMapping().cleanup(fr1.getID());\n+ MatrixObject out = ec.getMatrixObject(output);\n+ out.getDataCharacteristics().set(mo1.getNumRows(), mo2.getNumColumns(), (int)mo1.getBlocksize());\n+ out.setFedMapping(mo1.getFedMapping().copyWithNewID(fr2.getID(), mo2.getNumColumns()));\n+ }\n}\n//#2 vector - federated matrix multiplication\nelse if (mo2.isFederated()) {// VM + MM\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/BinaryMatrixMatrixFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/BinaryMatrixMatrixFEDInstruction.java",
"diff": "@@ -45,13 +45,23 @@ public class BinaryMatrixMatrixFEDInstruction extends BinaryFEDInstruction\n}\n//matrix-matrix binary operations -> lhs fed input -> fed output\n+ FederatedRequest fr2 = null;\n+ if(mo2.getNumRows() > 1 && mo2.getNumColumns() == 1 ) { //MV row vector\n+ FederatedRequest[] fr1 = mo1.getFedMapping().broadcastSliced(mo2, false);\n+ fr2 = FederationUtils.callInstruction(instString, output, new CPOperand[]{input1, input2},\n+ new long[]{mo1.getFedMapping().getID(), fr1[0].getID()});\n+ //execute federated instruction and cleanup intermediates\n+ mo1.getFedMapping().execute(fr1, fr2);\n+ mo1.getFedMapping().cleanup(fr1[0].getID());\n+ }\n+ else { //MM or MV col vector\nFederatedRequest fr1 = mo1.getFedMapping().broadcast(mo2);\n- FederatedRequest fr2 = FederationUtils.callInstruction(instString, output,\n- new CPOperand[]{input1, input2}, new long[]{mo1.getFedMapping().getID(), fr1.getID()});\n-\n+ fr2 = FederationUtils.callInstruction(instString, output, new CPOperand[]{input1, input2},\n+ new long[]{mo1.getFedMapping().getID(), fr1.getID()});\n//execute federated instruction and cleanup intermediates\nmo1.getFedMapping().execute(fr1, fr2);\nmo1.getFedMapping().cleanup(fr1.getID());\n+ }\n//derive new fed mapping for output\nMatrixObject out = ec.getMatrixObject(output);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/meta/DataCharacteristics.java",
"new_path": "src/main/java/org/apache/sysds/runtime/meta/DataCharacteristics.java",
"diff": "@@ -31,7 +31,7 @@ public abstract class DataCharacteristics implements Serializable {\nprotected int _blocksize;\n- public DataCharacteristics set(long nr, long nc, int len) {\n+ public DataCharacteristics set(long nr, long nc, int blen) {\nthrow new DMLRuntimeException(\"DataCharacteristics.set(long, long, int): should never get called in the base class\");\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedKmeansTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.federated;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.util.HDFSTool;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class FederatedKmeansTest extends AutomatedTestBase {\n+\n+ private final static String TEST_DIR = \"functions/federated/\";\n+ private final static String TEST_NAME = \"FederatedKMeansTest\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FederatedKmeansTest.class.getSimpleName() + \"/\";\n+\n+ private final static int blocksize = 1024;\n+ @Parameterized.Parameter()\n+ public int rows;\n+ @Parameterized.Parameter(1)\n+ public int cols;\n+ @Parameterized.Parameter(2)\n+ public int runs;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"Z\"}));\n+ }\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ // rows have to be even and > 1\n+ return Arrays.asList(new Object[][] {\n+ {10000, 10, 1}, {2000, 50, 1}, {1000, 100, 1},\n+ //TODO support for multi-threaded federated interactions\n+ //{10000, 10, 16}, {2000, 50, 16}, {1000, 100, 16}, //concurrent requests\n+ });\n+ }\n+\n+ @Test\n+ public void federatedKmeansSinglenode() {\n+ federatedKmeans(Types.ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void federatedKmeansHybrid() {\n+ federatedKmeans(Types.ExecMode.HYBRID);\n+ }\n+\n+ public void federatedKmeans(Types.ExecMode execMode) {\n+ ExecMode platformOld = setExecMode(execMode);\n+\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ // write input matrices\n+ int halfRows = rows / 2;\n+ // We have two matrices handled by a single federated worker\n+ double[][] X1 = getRandomMatrix(halfRows, cols, 0, 1, 1, 3);\n+ double[][] X2 = getRandomMatrix(halfRows, cols, 0, 1, 1, 7);\n+ writeInputMatrixWithMTD(\"X1\", X1, false, new MatrixCharacteristics(halfRows, cols, blocksize, halfRows * cols));\n+ writeInputMatrixWithMTD(\"X2\", X2, false, new MatrixCharacteristics(halfRows, cols, blocksize, halfRows * cols));\n+\n+ // empty script name because we don't execute any script, just start the worker\n+ fullDMLScriptName = \"\";\n+ int port1 = getRandomAvailablePort();\n+ int port2 = getRandomAvailablePort();\n+ Thread t1 = startLocalFedWorker(port1);\n+ Thread t2 = startLocalFedWorker(port2);\n+\n+ TestConfiguration config = availableTestConfigurations.get(TEST_NAME);\n+ loadTestConfiguration(config);\n+ setOutputBuffering(false);\n+\n+ // Run reference dml script with normal matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \"Reference.dml\";\n+ programArgs = new String[] {\"-args\", input(\"X1\"), input(\"X2\"),\n+ String.valueOf(runs), expected(\"Z\")};\n+ runTest(true, false, null, -1);\n+\n+ // Run actual dml script with federated matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-stats\",\n+ \"-nvargs\", \"in_X1=\" + TestUtils.federatedAddress(port1, input(\"X1\")),\n+ \"in_X2=\" + TestUtils.federatedAddress(port2, input(\"X2\")), \"rows=\" + rows, \"cols=\" + cols,\n+ \"runs=\" + String.valueOf(runs), \"out=\" + output(\"Z\")};\n+ runTest(true, false, null, -1);\n+\n+ // compare via files\n+ //compareResults(1e-9); --> randomized\n+ TestUtils.shutdownThreads(t1, t2);\n+\n+ // check for federated operations\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_ba+*\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uasqk+\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uarmin\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_*\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_+\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_<=\"));\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_/\"));\n+\n+ //check that federated input files are still existing\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X2\")));\n+\n+ resetExecMode(platformOld);\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedPCATest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedPCATest.java",
"diff": "@@ -27,6 +27,7 @@ import org.junit.runners.Parameterized;\nimport org.apache.sysds.common.Types;\nimport org.apache.sysds.common.Types.ExecMode;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.util.HDFSTool;\nimport org.apache.sysds.test.AutomatedTestBase;\nimport org.apache.sysds.test.TestConfiguration;\nimport org.apache.sysds.test.TestUtils;\n@@ -129,6 +130,10 @@ public class FederatedPCATest extends AutomatedTestBase {\nAssert.assertTrue(heavyHittersContainsString(\"fed_replace\"));\n}\n+ //check that federated input files are still existing\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X2\")));\n+\nresetExecMode(platformOld);\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedKmeansTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = federated(addresses=list($in_X1, $in_X2),\n+ ranges=list(list(0, 0), list($rows / 2, $cols), list($rows / 2, 0), list($rows, $cols)))\n+[C,Y] = kmeans(X=X, k=4, runs=$runs)\n+write(C, $out)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedKmeansTestReference.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rbind(read($1), read($2))\n+[C,Y] = kmeans(X=X, k=4, runs=$3)\n+write(C, $4)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2549,2624] Fix federated binary matrix-vector, var cleanup
This patch fixes two correctness issues related to (1) cleanup of
federated matrices, and (2) federated binary matrix-row vector
operators. Furthermore, this also includes a new federated Kmeans test
and some minor fixes for row aggregates, and improvements of federated
matrix multiplications. |
49,738 | 16.08.2020 18:32:52 | -7,200 | 0f54db9c79cd768f76c04007bd9fc5069fed5060 | [MINOR] Fix case-sensitive dml script name (federated kmeans)
While win allows for case-insensitive filenames, this is not true on
linux and macos. | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedKmeansTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedKmeansTest.java",
"diff": "@@ -40,7 +40,7 @@ import java.util.Collection;\npublic class FederatedKmeansTest extends AutomatedTestBase {\nprivate final static String TEST_DIR = \"functions/federated/\";\n- private final static String TEST_NAME = \"FederatedKMeansTest\";\n+ private final static String TEST_NAME = \"FederatedKmeansTest\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FederatedKmeansTest.class.getSimpleName() + \"/\";\nprivate final static int blocksize = 1024;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix case-sensitive dml script name (federated kmeans)
While win allows for case-insensitive filenames, this is not true on
linux and macos. |
49,706 | 17.08.2020 13:30:55 | -7,200 | cacee1131458c97927a23242e9760c038c0e5ddb | Parallel detect schema
Changes the implementation of detect schema to run in
parallel across all columns.
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/BinaryFrameFrameCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/BinaryFrameFrameCPInstruction.java",
"diff": "@@ -39,7 +39,7 @@ public class BinaryFrameFrameCPInstruction extends BinaryCPInstruction\nif(getOpcode().equals(\"dropInvalidType\")) {\n// Perform computation using input frames, and produce the result frame\n- FrameBlock retBlock = inBlock1.dropInvalid(inBlock2);\n+ FrameBlock retBlock = inBlock1.dropInvalidType(inBlock2);\n// Attach result frame with FrameBlock associated with output_name\nec.setFrameOutput(output.getName(), retBlock);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinaryFrameFrameSPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/BinaryFrameFrameSPInstruction.java",
"diff": "@@ -95,7 +95,7 @@ public class BinaryFrameFrameSPInstruction extends BinarySPInstruction {\n@Override\npublic FrameBlock call(FrameBlock arg0) throws Exception {\n- return arg0.dropInvalid(schema_frame);\n+ return arg0.dropInvalidType(schema_frame);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -32,10 +32,17 @@ import java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.Iterator;\n+import java.util.List;\nimport java.util.Map;\n+import java.util.concurrent.Callable;\n+import java.util.concurrent.ExecutionException;\n+import java.util.concurrent.ExecutorService;\n+import java.util.concurrent.Future;\nimport java.util.concurrent.ThreadLocalRandom;\nimport org.apache.commons.lang.ArrayUtils;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.hadoop.io.Writable;\nimport org.apache.sysds.api.DMLException;\nimport org.apache.sysds.common.Types.ValueType;\n@@ -46,12 +53,14 @@ import org.apache.sysds.runtime.instructions.cp.*;\nimport org.apache.sysds.runtime.io.IOUtilFunctions;\nimport org.apache.sysds.runtime.matrix.operators.BinaryOperator;\nimport org.apache.sysds.runtime.transform.encode.EncoderRecode;\n+import org.apache.sysds.runtime.util.CommonThreadPool;\nimport org.apache.sysds.runtime.util.IndexRange;\nimport org.apache.sysds.runtime.util.UtilFunctions;\n@SuppressWarnings({\"rawtypes\",\"unchecked\"}) //allow generic native arrays\n-public class FrameBlock implements CacheBlock, Externalizable\n-{\n+public class FrameBlock implements CacheBlock, Externalizable {\n+ private static final Log LOG = LogFactory.getLog(FrameBlock.class.getName());\n+\nprivate static final long serialVersionUID = -3993450030207130665L;\npublic static final int BUFFER_SIZE = 1 * 1000 * 1000; //1M elements, size of default matrix block\n@@ -1869,9 +1878,21 @@ public class FrameBlock implements CacheBlock, Externalizable\nval = val.trim().toLowerCase().replaceAll(\"\\\"\", \"\");\nif (val.matches(\"(true|false|t|f|0|1)\"))\nreturn ValueType.BOOLEAN;\n- else if (val.matches(\"[-+]?\\\\d+\"))\n+ else if (val.matches(\"[-+]?\\\\d+\")){\n+ long maxValue = Long.parseLong(val);\n+ if ((maxValue >= Integer.MIN_VALUE) && (maxValue <= Integer.MAX_VALUE))\n+ return ValueType.INT32;\n+ else\nreturn ValueType.INT64;\n- else if (val.matches(\"[-+]?[0-9]+\\\\.?[0-9]*([e]?[-+]?[0-9]+)\") || val.equals(\"infinity\") || val.equals(\"-infinity\") || val.equals(\"nan\"))\n+ }\n+ else if (val.matches(\"[-+]?[0-9]+\\\\.?[0-9]*([e]?[-+]?[0-9]+)\")){\n+ double maxValue = Double.parseDouble(val);\n+ if ((maxValue >= (-Float.MAX_VALUE)) && (maxValue <= Float.MAX_VALUE))\n+ return ValueType.FP32;\n+ else\n+ return ValueType.FP64;\n+ }\n+ else if (val.equals(\"infinity\") || val.equals(\"-infinity\") || val.equals(\"nan\"))\nreturn ValueType.FP64;\nelse return ValueType.STRING;\n}\n@@ -1880,54 +1901,76 @@ public class FrameBlock implements CacheBlock, Externalizable\nint rows = this.getNumRows();\nint cols = this.getNumColumns();\nString[] schemaInfo = new String[cols];\n- int sample = (int)Math.min(Math.max(sampleFraction*rows, 1024), rows);\n+ int sample = (int)Math.min(Math.max(sampleFraction*rows, 256), rows);\n+\n+ ExecutorService pool = CommonThreadPool.get(cols);\n+ ArrayList<DetectValueTypeTask> tasks = new ArrayList<>();\nfor (int i = 0; i < cols; i++) {\n- ValueType state = ValueType.UNKNOWN;\n- Array obj = this.getColumn(i);\n- for (int j = 0; j < sample; j++)\n+ FrameBlock.Array obj = this.getColumn(i);\n+ tasks.add(new DetectValueTypeTask(obj,rows, sample));\n+ }\n+\n+ List<Future<String>> ret;\n+\n+ try {\n+ ret = pool.invokeAll(tasks);\n+ pool.shutdown();\n+ for(int i = 0; i < cols; i++){\n+ schemaInfo[i] = ret.get(i).get();\n+ }\n+ } catch (ExecutionException | InterruptedException e) {\n+ throw new DMLRuntimeException(\"Exception Interupted or Exception thrown in Detect Schema\", e);\n+ }\n+\n+ //create output block one row representing the schema as strings\n+ FrameBlock fb = new FrameBlock(UtilFunctions.nCopies(cols, ValueType.STRING));\n+ fb.appendRow(schemaInfo);\n+ return fb;\n+ }\n+\n+ private static class DetectValueTypeTask implements Callable<String>\n{\n- String dataValue = null;\n- //read a not null sample value\n- while (dataValue == null) {\n- int randomIndex = ThreadLocalRandom.current().nextInt(0, rows - 1);\n- dataValue = ((obj.get(randomIndex) != null)?obj.get(randomIndex).toString().trim().replace(\"\\\"\", \"\").toLowerCase():null);\n+ private final Array _obj;\n+ private final int _rows;\n+ private final int _sampleSize;\n+\n+\n+ protected DetectValueTypeTask(Array obj, int rows, int sampleSize ) {\n+ _obj = obj;\n+ _rows = rows;\n+ _sampleSize = sampleSize;\n}\n- if (isType(dataValue) == ValueType.STRING) {\n+ @Override\n+ public String call() {\n+ ValueType state = ValueType.UNKNOWN;\n+ for (int j = 0; j < _sampleSize; j++) {\n+ int randomIndex = ThreadLocalRandom.current().nextInt(0, _rows - 1);\n+ String dataValue = ((_obj.get(randomIndex) != null)?_obj.get(randomIndex).toString().trim().replace(\"\\\"\", \"\").toLowerCase():null);\n+ if(dataValue != null){\n+ ValueType current = isType(dataValue);\n+ if (current == ValueType.STRING) {\nstate = ValueType.STRING;\nbreak;\n}\n- else if (isType(dataValue) == ValueType.FP64) {\n- if (dataValue.equals(\"infinity\") || dataValue.equals(\"-infinity\") || dataValue.equals(\"nan\")) {\n+ else if (current== ValueType.FP64) {\nstate = ValueType.FP64;\n}\n- else {\n- double maxValue = Double.parseDouble(dataValue);\n- if ((maxValue >= (-Float.MAX_VALUE)) && (maxValue <= Float.MAX_VALUE))\n+ else if (current== ValueType.FP32) {\nstate = (state == ValueType.FP64 ? state : ValueType.FP32);\n- else\n- state = ValueType.FP64;\n}\n+ else if (current == ValueType.INT64) {\n+ state = ((state == ValueType.FP64 || state == ValueType.FP32) ? state : ValueType.INT64);\n}\n- else if (isType(dataValue) == ValueType.INT64) {\n- long maxValue = Long.parseLong(dataValue);\n- if ((maxValue >= Integer.MIN_VALUE) && (maxValue <= Integer.MAX_VALUE))\n+ else if (current == ValueType.INT32) {\nstate = ((state == ValueType.FP64 || state == ValueType.FP32 || state == ValueType.INT64) ? state : ValueType.INT32);\n- else\n- state = ((state == ValueType.FP64 || state == ValueType.FP32) ? state : ValueType.INT64);\n}\n- else if (isType(dataValue) == ValueType.BOOLEAN)\n- state = ((new ArrayList<>(Arrays.asList(ValueType.FP64, ValueType.FP32, ValueType.INT64, ValueType.INT32)).contains(state)) ? state : ValueType.BOOLEAN);\n- else if (isType(dataValue) == ValueType.STRING)\n- state = ((new ArrayList<>(Arrays.asList(ValueType.FP64, ValueType.FP32, ValueType.INT64, ValueType.INT32, ValueType.BOOLEAN)).contains(state)) ? state : ValueType.STRING);\n+ else if (current == ValueType.BOOLEAN)\n+ state = ((state == ValueType.FP64 || state == ValueType.FP32 || state == ValueType.INT64 || state == ValueType.INT32) ? state : ValueType.BOOLEAN);\n}\n- schemaInfo[i] = state.name();\n}\n-\n- //create output block one row representing the schema as strings\n- FrameBlock fb = new FrameBlock(UtilFunctions.nCopies(cols, ValueType.STRING));\n- fb.appendRow(schemaInfo);\n- return fb;\n+ return state.name();\n+ }\n}\n/**\n@@ -1935,7 +1978,7 @@ public class FrameBlock implements CacheBlock, Externalizable\n* @param schema of the frame\n* @return original frame where invalid values are replaced with null\n*/\n- public FrameBlock dropInvalid(FrameBlock schema) {\n+ public FrameBlock dropInvalidType(FrameBlock schema) {\n//sanity checks\nif(this.getNumColumns() != schema.getNumColumns())\nthrow new DMLException(\"mismatch in number of columns in frame and its schema \");\n@@ -1943,6 +1986,19 @@ public class FrameBlock implements CacheBlock, Externalizable\nString[] schemaString = schema.getStringRowIterator().next(); // extract the schema in String array\nfor (int i = 0; i < this.getNumColumns(); i++) {\nArray obj = this.getColumn(i);\n+ String schemaCol = schemaString[i];\n+ String type;\n+ if(schemaCol.contains(\"FP\")){\n+ type = \"FP\";\n+ } else if (schemaCol.contains(\"INT\")){\n+ type = \"INT\";\n+ } else if (schemaCol.contains(\"STRING\")){\n+ // In case of String columns, don't do any verification or replacements.\n+ break;\n+ } else{\n+ type = schemaCol;\n+ }\n+\nfor (int j = 0; j < this.getNumRows(); j++)\n{\nif(obj.get(j) == null)\n@@ -1950,21 +2006,11 @@ public class FrameBlock implements CacheBlock, Externalizable\nString dataValue = obj.get(j).toString().trim().replace(\"\\\"\", \"\").toLowerCase() ;\nValueType dataType = isType(dataValue);\n- if (dataType== ValueType.FP64 && schemaString[i].trim().equals(\"FP32\")) {\n- double maxValue = Double.parseDouble(dataValue);\n- if ((maxValue < (-Float.MAX_VALUE)) || (maxValue > Float.MAX_VALUE))\n- this.set(j,i,null);\n- }\n- else if (dataType== ValueType.INT64 && schemaString[i].trim().equals(\"INT32\")) {\n- long maxValue = Long.parseLong(dataValue);\n- if ((maxValue < Integer.MIN_VALUE) || (maxValue > Integer.MAX_VALUE))\n+ if(!dataType.toString().contains(type) && !(dataType == ValueType.BOOLEAN && type == \"INT\")){\n+ LOG.warn(\"Datatype detected: \" + dataType + \" where expected: \" + schemaString[i] + \" index: \" + i + \",\" +j);\n+\nthis.set(j,i,null);\n}\n- else if(dataType == ValueType.BOOLEAN && schemaString[i].trim().equals(\"INT32\")\n- && ((Integer.parseInt(dataValue) == 1 || Integer.parseInt(dataValue) == 0)))\n- continue;\n- else if (!dataType.toString().equals(schemaString[i].trim()))\n- this.set(j,i,null);\n}\n}\nreturn this;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/frame/FrameDropInvalidTypeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/frame/FrameDropInvalidTypeTest.java",
"diff": "@@ -68,45 +68,59 @@ public class FrameDropInvalidTypeTest extends AutomatedTestBase\n@Test\npublic void testDoubleinStringCP() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 1, LopProperties.ExecType.CP);\n+ // This test now verifies floating points are okay in string columns\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 1, LopProperties.ExecType.CP, true);\n}\n@Test\npublic void testDoubleinStringSpark() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 1, LopProperties.ExecType.SPARK);\n+ // This test now verifies floating points are okay in string columns\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 1, LopProperties.ExecType.SPARK, true);\n}\n@Test\npublic void testStringInDouble() {\n+ // This test now verifies strings are removed in float columns\nrunIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 2, LopProperties.ExecType.CP);\n}\n@Test\npublic void testStringInDoubleSpark() {\n+ // This test now verifies strings are removed in float columns\nrunIsCorrectTest(schemaStrings, rows, schemaStrings.length, 3, 2, LopProperties.ExecType.SPARK);\n}\n@Test\npublic void testDoubleInFloat() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 3, LopProperties.ExecType.CP);\n+ // This test now verifies that changing from FP64 to FP32 is okay.\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 3, LopProperties.ExecType.CP,true);\n}\n@Test\npublic void testDoubleInFloatSpark() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 3, LopProperties.ExecType.SPARK);\n+ // This test now verifies that changing from FP64 to FP32 is okay.\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 3, LopProperties.ExecType.SPARK, true);\n}\n@Test\npublic void testLongInInt() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 4, LopProperties.ExecType.CP);\n+ // This test now verifies that changing from INT32 to INT64 is okay.\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 4, LopProperties.ExecType.CP, true);\n}\n@Test\npublic void testLongInIntSpark() {\n- runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 4, LopProperties.ExecType.SPARK);\n+ // This test now verifies that changing from INT32 to INT64 is okay.\n+ runIsCorrectTest(schemaStrings, rows, schemaStrings.length, 5, 4, LopProperties.ExecType.SPARK, true);\n}\n+\n+ private void runIsCorrectTest(ValueType[] schema, int rows, int cols,\n+ int badValues, int test, LopProperties.ExecType et){\n+ runIsCorrectTest(schema, rows, cols, badValues, test, et, false);\n+ }\n+\nprivate void runIsCorrectTest(ValueType[] schema, int rows, int cols,\n- int badValues, int test, LopProperties.ExecType et)\n+ int badValues, int test, LopProperties.ExecType et, boolean ignore)\n{\nTypes.ExecMode platformOld = setExecMode(et);\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n@@ -182,7 +196,7 @@ public class FrameDropInvalidTypeTest extends AutomatedTestBase\nint nullNum = Math.toIntExact(data.stream().filter(s -> s == null).count());\n//verify output schema\n- Assert.assertEquals(\"Wrong result: \" + nullNum + \".\", badValues, nullNum);\n+ Assert.assertEquals(\"Wrong result: \" + nullNum + \".\", ignore ? 0 : badValues, nullNum);\n}\ncatch (Exception ex) {\nthrow new RuntimeException(ex);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2616] Parallel detect schema
Changes the implementation of detect schema to run in
parallel across all columns.
Closes #1012 |
49,698 | 17.08.2020 18:25:54 | -19,080 | 961d4dc80a8a6cde7210fb2bbd7d1c091a0092ff | [MINOR] Remove println in NNTest | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/applications/NNTest.java",
"new_path": "src/test/java/org/apache/sysds/test/applications/NNTest.java",
"diff": "@@ -38,7 +38,6 @@ public class NNTest extends MLContextTestBase {\npublic void testNNLibrary() {\nScript script = dmlFromFile(TEST_SCRIPT);\nString stdOut = executeAndCaptureStdOut(ml, script).getRight();\n- System.out.println(stdOut);\nassertTrue(stdOut, !stdOut.contains(ERROR_STRING));\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove println in NNTest |
49,738 | 20.08.2020 01:23:49 | -7,200 | bcd4ead93b7927aed647024ed6e91ce95e72ffaf | [MINOR] Fix federated component tests and frame csv reader robustness | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/FrameReaderTextCSV.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/FrameReaderTextCSV.java",
"diff": "@@ -48,10 +48,11 @@ import org.apache.sysds.runtime.util.UtilFunctions;\n*\n*/\npublic class FrameReaderTextCSV extends FrameReader {\n- protected FileFormatPropertiesCSV _props;\n+ protected final FileFormatPropertiesCSV _props;\npublic FrameReaderTextCSV(FileFormatPropertiesCSV props) {\n- _props = props;\n+ //if unspecified use default properties for robustness\n+ _props = props != null ? props : new FileFormatPropertiesCSV();\n}\n@Override\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix federated component tests and frame csv reader robustness |
49,698 | 20.08.2020 13:21:54 | -19,080 | 2f5d3fb3ac024bf8581cfc0369eddf56e8235a27 | [WIP][DOC] DML to R translation guide
* some common differences between R and DML
* The examples should be runnable to make the
guide more interesting. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/docs/dml-vs-r-guide.md",
"diff": "+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n+#### `dml` to `R` translation recipes\n+\n+To ease the prototyping of the `dml` scripts from its `R` counterparts, this\n+guide covers various practical functions or operations.\n+\n+NOTE: This document is still a work in progress.\n+\n+## Table of Contents\n+\n+ * [Multiple outputs](#multiple-outputs)\n+ * [Order function](#order-function)\n+ * [Read function](#read-function)\n+ * [`Write` function](#write-function)\n+\n+##### Multiple-outputs\n+\n+```dml\n+# dml\n+\n+A = rand(rows=10, cols=10)\n+C = t(A) %*% A\n+[evalues, evectors] = eigen(C);\n+```\n+\n+```R\n+# R\n+\n+A = rand(rows=10, cols=10)\n+C = t(A) %*% A\n+R <- eigen(C);\n+evalues = R$values;\n+evectors = R$vectors;\n+```\n+\n+##### `order`-function\n+\n+```dml\n+# dml\n+\n+decreasing_Idx = order(target=evalues,by=1,decreasing=TRUE,index.return=TRUE);\n+diagmat = table(seq(1,D),decreasing_Idx);\n+```\n+\n+```R\n+# R\n+\n+decreasing_Idx = order(as.vector(evalues), decreasing=TRUE);\n+diagmat = table(seq(1,D), decreasing_Idx);\n+```\n+\n+##### `Read`-function\n+\n+```dml\n+# dml\n+\n+A = read(\"\")\n+# A = read($INPUT)\n+```\n+\n+```R\n+# R\n+\n+# args[1] will the relative directory path\n+A = readMM(paste(args[1], \"A.mtx\", sep=\"\"))\n+```\n+\n+##### `Write`-function\n+\n+```dml\n+# dml\n+ofmt = \"TEXT\"\n+\n+write(evalues, \"evalues\", format=ofmt)\n+```\n+\n+```R\n+# R\n+\n+# Here args[2] will be a string denoting output directory\n+writeMM(as(evalues, \"CsparseMatrix\"), paste(args[2],\"evalues\", sep=\"\"));\n+```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [WIP][DOC] DML to R translation guide (#1007)
* some common differences between R and DML
* The examples should be runnable to make the
guide more interesting. |
49,706 | 20.08.2020 18:09:14 | -7,200 | 260f44001844a7b231e52eb36f064f4d9808058e | [MINOR] Debugging Update for FederatedWorkerHandler | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -72,7 +72,9 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\n@Override\npublic void channelRead(ChannelHandlerContext ctx, Object msg) {\n+ if( log.isDebugEnabled() ){\nlog.debug(\"Received: \" + msg.getClass().getSimpleName());\n+ }\nif (!(msg instanceof FederatedRequest[]))\nthrow new DMLRuntimeException(\"FederatedWorkerHandler: Received object no instance of 'FederatedRequest[]'.\");\nFederatedRequest[] requests = (FederatedRequest[]) msg;\n@@ -80,15 +82,20 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nfor( int i=0; i<requests.length; i++ ) {\nFederatedRequest request = requests[i];\n- if( log.isDebugEnabled() )\n- log.debug(\"Executing command \"+(i+1)+\"/\"+requests.length + \": \" + request.getType().name());\n+ if( log.isInfoEnabled() ){\n+ log.info(\"Executing command \" + (i+1) + \"/\" + requests.length + \": \" + request.getType().name());\n+ if( log.isDebugEnabled() ){\n+ log.debug(\"full command: \" + request.toString());\n+ }\n+ }\nPrivacyMonitor.setCheckPrivacy(request.checkPrivacy());\nPrivacyMonitor.clearCheckedConstraints();\nresponse = executeCommand(request);\nconditionalAddCheckedConstraints(request, response);\n- if (!response.isSuccessful())\n- log.error(\"Command \" + request.getType() + \" failed: \" + response.getErrorMessage());\n+ if (!response.isSuccessful()){\n+ log.error(\"Command \" + request.getType() + \" failed: \" + response.getErrorMessage() + \"full command: \\n\" + request.toString());\n+ }\n}\nctx.writeAndFlush(response).addListener(new CloseListener());\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Debugging Update for FederatedWorkerHandler |
49,706 | 20.08.2020 18:31:52 | -7,200 | abacd66ecd69ef8f8daa83e555e26f26b33c8e03 | [MINOR] New error object written in case failing Channel | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -293,11 +293,20 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nprivate static class CloseListener implements ChannelFutureListener {\n@Override\n- public void operationComplete(ChannelFuture channelFuture) throws InterruptedException, DMLRuntimeException {\n- if (!channelFuture.isSuccess())\n- throw new DMLRuntimeException(\"Federated Worker Write failed\");\n+ public void operationComplete(ChannelFuture channelFuture) throws InterruptedException {\n+ if (!channelFuture.isSuccess()){\n+ log.fatal(\"Federated Worker Write failed\");\n+ channelFuture\n+ .channel()\n+ .writeAndFlush(\n+ new FederatedResponse(ResponseType.ERROR,\n+ new FederatedWorkerHandlerException(\"Error while sending response.\")))\n+ .channel().close().sync();\n+ }\n+ else {\nPrivacyMonitor.clearCheckedConstraints();\nchannelFuture.channel().close().sync();\n}\n}\n}\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] New error object written in case failing Channel |
49,706 | 20.08.2020 19:48:46 | -7,200 | c9a02a2e9fa3ec50a2a8c3611d62a2a334b5bab3 | [MINOR] Negative test of federatedError | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedResponse.java",
"diff": "@@ -91,6 +91,10 @@ public class FederatedResponse implements Serializable {\nthrow (Exception) potentialException;\n}\n}\n+ String errorMessage = getErrorMessage();\n+ if (getErrorMessage() != \"No readable error message\")\n+ throw new DMLRuntimeException(errorMessage);\n+ else\nthrow new DMLRuntimeException(\"Unknown runtime exception in handling of federated request by federated worker.\");\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -295,7 +295,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\n@Override\npublic void operationComplete(ChannelFuture channelFuture) throws InterruptedException {\nif (!channelFuture.isSuccess()){\n- log.fatal(\"Federated Worker Write failed\");\n+ log.error(\"Federated Worker Write failed\");\nchannelFuture\n.channel()\n.writeAndFlush(\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -1273,7 +1273,7 @@ public abstract class AutomatedTestBase {\nargs.add(\"-gpu\");\n}\n- protected int getRandomAvailablePort() {\n+ public static int getRandomAvailablePort() {\ntry(ServerSocket availableSocket = new ServerSocket(0)) {\nreturn availableSocket.getLocalPort();\n}\n@@ -1312,6 +1312,26 @@ public abstract class AutomatedTestBase {\nreturn t;\n}\n+ public static Thread startLocalFedWorkerWithArgs(String[] args) {\n+ Thread t = null;\n+\n+ try {\n+ t = new Thread(() -> {\n+ try {\n+ main(args);\n+ }\n+ catch(IOException e) {\n+ }\n+ });\n+ t.start();\n+ java.util.concurrent.TimeUnit.MILLISECONDS.sleep(FED_WORKER_WAIT);\n+ }\n+ catch(InterruptedException e) {\n+ // Should happen at closing of the worker so don't print\n+ }\n+ return t;\n+ }\n+\nprivate boolean rCompareException(boolean exceptionExpected, String errMessage, Throwable e, boolean result) {\nif(e.getCause() != null) {\nresult |= rCompareException(exceptionExpected, errMessage, e.getCause(), result);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedNegativeTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.federated;\n+\n+import org.apache.log4j.Level;\n+import org.apache.log4j.Logger;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.runtime.controlprogram.federated.*;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.net.InetSocketAddress;\n+import java.util.HashMap;\n+import java.util.Map;\n+import java.util.concurrent.ExecutionException;\n+import java.util.concurrent.Future;\n+\n+import static org.junit.Assert.assertFalse;\n+import static org.junit.Assert.assertTrue;\n+\[email protected]\n+public class FederatedNegativeTest {\n+ protected static Logger log = Logger.getLogger(FederatedNegativeTest.class);\n+\n+ static {\n+ Logger.getLogger(\"org.apache.sysds\").setLevel(Level.OFF);\n+ }\n+\n+ @Test\n+ public void NegativeTest1() {\n+ int port = AutomatedTestBase.getRandomAvailablePort();\n+ String[] args = {\"-w\", Integer.toString(port)};\n+ Thread t = AutomatedTestBase.startLocalFedWorkerWithArgs(args);\n+ Map<FederatedRange, FederatedData> fedMap = new HashMap<>();\n+ FederatedRange r = new FederatedRange(new long[]{0,0}, new long[]{1,1});\n+ FederatedData d = new FederatedData(\n+ Types.DataType.SCALAR,\n+ new InetSocketAddress(\"localhost\", port),\n+ \"Nowhere\");\n+ fedMap.put(r,d);\n+ FederationMap fedM = new FederationMap(fedMap);\n+ FederatedRequest fr = new FederatedRequest(FederatedRequest.RequestType.GET_VAR);\n+ Future<FederatedResponse>[] res = fedM.execute(0, fr);\n+ try {\n+ FederatedResponse fres = res[0].get();\n+ assertFalse(fres.isSuccessful());\n+ assertTrue(fres.getErrorMessage().contains(\"Variable 0 does not exist at federated worker\"));\n+\n+ } catch (InterruptedException e) {\n+ e.printStackTrace();\n+ } catch (ExecutionException e) {\n+ e.printStackTrace();\n+ } catch (Exception e) {\n+ e.printStackTrace();\n+ }\n+\n+ TestUtils.shutdownThread(t);\n+ }\n+\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Negative test of federatedError |
49,746 | 20.08.2020 22:53:56 | -7,200 | 7eccbfeb047dd029f8e110e65d208543c3d60ee5 | [SYSTEMDS-2554,2558,2561] Federated transform decode (recoding)
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -768,7 +768,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nnew Path(_hdfsFileName), new Path(fName));\n//actual export (note: no direct transfer of local copy in order to ensure blocking (and hence, parallelism))\n- if( isDirty() || !eqScheme ||\n+ if( isDirty() || !eqScheme || isFederated() ||\n(pWrite && !isEqualOutputFormat(outputFormat)) )\n{\n// CASE 1: dirty in-mem matrix or pWrite w/ different format (write matrix to fname; load into memory if evicted)\n@@ -781,12 +781,14 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n{\nif( getRDDHandle()==null || getRDDHandle().allowsShortCircuitRead() )\n_data = readBlobFromHDFS( _hdfsFileName );\n- else\n+ else if( getRDDHandle() != null )\n_data = readBlobFromRDD( getRDDHandle(), new MutableBoolean() );\n+ else\n+ _data = readBlobFromFederated( getFedMapping() );\n+\nsetDirty(false);\n}\n- catch (IOException e)\n- {\n+ catch (IOException e) {\nthrow new DMLRuntimeException(\"Reading of \" + _hdfsFileName + \" (\"+hashCode()+\") failed.\", e);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedData.java",
"diff": "@@ -119,6 +119,7 @@ public class FederatedData {\n/**\n* Executes an federated operation on a federated worker.\n*\n+ * @param address socket address (incl host and port)\n* @param request the requested operation\n* @return the response\n*/\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -256,7 +256,8 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\npb.execute(ec); //execute single instruction\n}\ncatch(Exception ex) {\n- return new FederatedResponse(ResponseType.ERROR, ex.getMessage());\n+ return new FederatedResponse(ResponseType.ERROR, new FederatedWorkerHandlerException(\n+ \"Exception of type \" + ex.getClass() + \" thrown when processing EXEC_INST request\", ex));\n}\nreturn new FederatedResponse(ResponseType.SUCCESS_EMPTY);\n}\n@@ -276,12 +277,19 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nreturn udf.execute(ec, inputs);\n}\ncatch(Exception ex) {\n- return new FederatedResponse(ResponseType.ERROR, ex.getMessage());\n+ return new FederatedResponse(ResponseType.ERROR, new FederatedWorkerHandlerException(\n+ \"Exception of type \" + ex.getClass() + \" thrown when processing EXEC_UDF request\", ex));\n}\n}\nprivate FederatedResponse execClear() {\n+ try {\n_ecm.clear();\n+ }\n+ catch(Exception ex) {\n+ return new FederatedResponse(ResponseType.ERROR, new FederatedWorkerHandlerException(\n+ \"Exception of type \" + ex.getClass() + \" thrown when processing CLEAR request\", ex));\n+ }\nreturn new FederatedResponse(ResponseType.SUCCESS_EMPTY);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"diff": "@@ -445,7 +445,7 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\n}\nelse if (opcode.equalsIgnoreCase(\"transformdecode\")) {\nCPOperand target = getTargetOperand();\n- CPOperand meta = getLiteral(params.get(\"meta\"), DataType.FRAME);\n+ CPOperand meta = getLiteral(\"meta\", ValueType.UNKNOWN, DataType.FRAME);\nCPOperand spec = getStringLiteral(\"spec\");\nreturn Pair.of(output.getName(), new LineageItem(getOpcode(),\nLineageItemUtils.getLineage(ec, target, meta, spec)));\n@@ -477,11 +477,11 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nreturn getLiteral(name, ValueType.BOOLEAN);\n}\n- private CPOperand getLiteral(String name, DataType dt) {\n- return new CPOperand(name, ValueType.UNKNOWN, DataType.FRAME);\n- }\n-\nprivate CPOperand getLiteral(String name, ValueType vt) {\nreturn new CPOperand(params.get(name), vt, DataType.SCALAR, true);\n}\n+\n+ private CPOperand getLiteral(String name, ValueType vt, DataType dt) {\n+ return new CPOperand(params.get(name), vt, dt);\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"diff": "@@ -79,6 +79,9 @@ public class FEDInstructionUtils {\nif(pinst.getOpcode().equals(\"replace\") && pinst.getTarget(ec).isFederated()) {\nfedinst = ParameterizedBuiltinFEDInstruction.parseInstruction(pinst.getInstructionString());\n}\n+ else if(pinst.getOpcode().equals(\"transformdecode\") && pinst.getTarget(ec).isFederated()) {\n+ return ParameterizedBuiltinFEDInstruction.parseInstruction(pinst.getInstructionString());\n+ }\n}\nelse if (inst instanceof MultiReturnParameterizedBuiltinCPInstruction) {\nMultiReturnParameterizedBuiltinCPInstruction minst = (MultiReturnParameterizedBuiltinCPInstruction) inst;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"diff": "package org.apache.sysds.runtime.instructions.fed;\n+import java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.LinkedHashMap;\n+\n+import org.apache.sysds.common.Types;\nimport org.apache.sysds.common.Types.DataType;\nimport org.apache.sysds.common.Types.ValueType;\n+import org.apache.sysds.hops.OptimizerUtils;\nimport org.apache.sysds.lops.Lop;\nimport org.apache.sysds.runtime.DMLRuntimeException;\n+import org.apache.sysds.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysds.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.controlprogram.federated.FederatedRequest;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedResponse;\n+import org.apache.sysds.runtime.controlprogram.federated.FederatedUDF;\n+import org.apache.sysds.runtime.controlprogram.federated.FederationMap;\n+import org.apache.sysds.runtime.controlprogram.federated.FederationUtils;\nimport org.apache.sysds.runtime.functionobjects.ParameterizedBuiltin;\nimport org.apache.sysds.runtime.functionobjects.ValueFunction;\n-import org.apache.sysds.runtime.controlprogram.federated.FederationUtils;\nimport org.apache.sysds.runtime.instructions.InstructionUtils;\nimport org.apache.sysds.runtime.instructions.cp.CPOperand;\n+import org.apache.sysds.runtime.instructions.cp.Data;\n+import org.apache.sysds.runtime.matrix.data.FrameBlock;\n+import org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\nimport org.apache.sysds.runtime.matrix.operators.SimpleOperator;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.meta.MetaDataFormat;\n+import org.apache.sysds.runtime.privacy.PrivacyMonitor;\n+import org.apache.sysds.runtime.transform.decode.Decoder;\n+import org.apache.sysds.runtime.transform.decode.DecoderFactory;\npublic class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstruction {\n-\nprotected final LinkedHashMap<String, String> params;\n- protected ParameterizedBuiltinFEDInstruction(Operator op,\n- LinkedHashMap<String, String> paramsMap, CPOperand out, String opcode, String istr)\n- {\n+ protected ParameterizedBuiltinFEDInstruction(Operator op, LinkedHashMap<String, String> paramsMap, CPOperand out,\n+ String opcode, String istr) {\nsuper(FEDType.ParameterizedBuiltin, op, null, null, out, opcode, istr);\nparams = paramsMap;\n}\n@@ -80,11 +94,13 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\nLinkedHashMap<String, String> paramsMap = constructParameterMap(parts);\n// determine the appropriate value function\n- ValueFunction func = null;\nif( opcode.equalsIgnoreCase(\"replace\") ) {\n- func = ParameterizedBuiltin.getParameterizedBuiltinFnObject(opcode);\n+ ValueFunction func = ParameterizedBuiltin.getParameterizedBuiltinFnObject(opcode);\nreturn new ParameterizedBuiltinFEDInstruction(new SimpleOperator(func), paramsMap, out, opcode, str);\n}\n+ else if(opcode.equals(\"transformapply\") || opcode.equals(\"transformdecode\")) {\n+ return new ParameterizedBuiltinFEDInstruction(null, paramsMap, out, opcode, str);\n+ }\nelse {\nthrow new DMLRuntimeException(\"Unsupported opcode (\" + opcode + \") for ParameterizedBuiltinFEDInstruction.\");\n}\n@@ -106,11 +122,62 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\nout.getDataCharacteristics().set(mo.getDataCharacteristics());\nout.setFedMapping(mo.getFedMapping().copyWithNewID(fr1.getID()));\n}\n+ else if(opcode.equalsIgnoreCase(\"transformdecode\"))\n+ transformDecode(ec);\nelse {\nthrow new DMLRuntimeException(\"Unknown opcode : \" + opcode);\n}\n}\n+ private void transformDecode(ExecutionContext ec) {\n+ // acquire locks\n+ MatrixObject mo = ec.getMatrixObject(params.get(\"target\"));\n+ FrameBlock meta = ec.getFrameInput(params.get(\"meta\"));\n+ String spec = params.get(\"spec\");\n+\n+ FederationMap fedMapping = mo.getFedMapping();\n+\n+ ValueType[] schema = new ValueType[(int) mo.getNumColumns()];\n+ long varID = FederationUtils.getNextFedDataID();\n+ FederationMap decodedMapping = fedMapping.mapParallel(varID, (range, data) -> {\n+ int columnOffset = (int) range.getBeginDims()[1] + 1;\n+\n+ FrameBlock subMeta = new FrameBlock();\n+ synchronized(meta) {\n+ meta.slice(0, meta.getNumRows() - 1, columnOffset - 1, (int) range.getEndDims()[1] - 1, subMeta);\n+ }\n+\n+ FederatedResponse response;\n+ try {\n+ response = data.executeFederatedOperation(new FederatedRequest(FederatedRequest.RequestType.EXEC_UDF,\n+ varID, new DecodeMatrix(data.getVarID(), varID, subMeta, spec, columnOffset))).get();\n+ if(!response.isSuccessful())\n+ response.throwExceptionFromResponse();\n+\n+ ValueType[] subSchema = (ValueType[]) response.getData()[0];\n+ synchronized(schema) {\n+ // It would be possible to assert that different federated workers don't give different value\n+ // types for the same columns, but the performance impact is not worth the effort\n+ System.arraycopy(subSchema, 0, schema, columnOffset - 1, subSchema.length);\n+ }\n+ }\n+ catch(Exception e) {\n+ throw new DMLRuntimeException(e);\n+ }\n+ return null;\n+ });\n+\n+ // construct a federated matrix with the encoded data\n+ FrameObject decodedFrame = ec.getFrameObject(output);\n+ decodedFrame.setSchema(schema);\n+ decodedFrame.getDataCharacteristics().set(mo.getDataCharacteristics());\n+ // set the federated mapping for the matrix\n+ decodedFrame.setFedMapping(decodedMapping);\n+\n+ // release locks\n+ ec.releaseFrameInput(params.get(\"meta\"));\n+ }\n+\npublic MatrixObject getTarget(ExecutionContext ec) {\nreturn ec.getMatrixObject(params.get(\"target\"));\n}\n@@ -118,4 +185,47 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\nprivate CPOperand getTargetOperand() {\nreturn new CPOperand(params.get(\"target\"), ValueType.FP64, DataType.MATRIX);\n}\n+\n+ public static class DecodeMatrix extends FederatedUDF {\n+ private static final long serialVersionUID = 2376756757742169692L;\n+ private final long _outputID;\n+ private final FrameBlock _meta;\n+ private final String _spec;\n+ private final int _globalOffset;\n+\n+ public DecodeMatrix(long input, long outputID, FrameBlock meta, String spec, int globalOffset) {\n+ super(new long[]{input});\n+ _outputID = outputID;\n+ _meta = meta;\n+ _spec = spec;\n+ _globalOffset = globalOffset;\n+ }\n+\n+ @Override\n+ public FederatedResponse execute(ExecutionContext ec, Data... data) {\n+ MatrixObject mo = (MatrixObject) PrivacyMonitor.handlePrivacy(data[0]);\n+ MatrixBlock mb = mo.acquireRead();\n+ String[] colNames = _meta.getColumnNames();\n+\n+ // compute transformdecode\n+ Decoder decoder = DecoderFactory.createDecoder(_spec, colNames, null,\n+ _meta, mb.getNumColumns(), _globalOffset, _globalOffset + mb.getNumColumns());\n+ FrameBlock fbout = decoder.decode(mb, new FrameBlock(decoder.getSchema()));\n+ fbout.setColumnNames(Arrays.copyOfRange(colNames, 0, fbout.getNumColumns()));\n+\n+ // copy characteristics\n+ MatrixCharacteristics mc = new MatrixCharacteristics(mo.getDataCharacteristics());\n+ FrameObject fo = new FrameObject(OptimizerUtils.getUniqueTempFileName(),\n+ new MetaDataFormat(mc, Types.FileFormat.BINARY));\n+ // set the encoded data\n+ fo.acquireModify(fbout);\n+ fo.release();\n+ mo.release();\n+\n+ // add it to the list of variables\n+ ec.setVariable(String.valueOf(_outputID), fo);\n+ // return schema\n+ return new FederatedResponse(FederatedResponse.ResponseType.SUCCESS, new Object[] {fo.getSchema()});\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/decode/DecoderFactory.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/decode/DecoderFactory.java",
"diff": "@@ -37,24 +37,33 @@ import static org.apache.sysds.runtime.util.CollectionUtils.unionDistinct;\npublic class DecoderFactory\n{\npublic static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema, FrameBlock meta) {\n- return createDecoder(spec, colnames, schema, meta, meta.getNumColumns());\n+ return createDecoder(spec, colnames, schema, meta, meta.getNumColumns(), -1, -1);\n}\n- public static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema, FrameBlock meta, int clen)\n+ public static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema, FrameBlock meta, int clen) {\n+ return createDecoder(spec, colnames, schema, meta, clen, -1, -1);\n+ }\n+\n+ public static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema, FrameBlock meta, int minCol,\n+ int maxCol) {\n+ return createDecoder(spec, colnames, schema, meta, meta.getNumColumns(), minCol, maxCol);\n+ }\n+\n+ public static Decoder createDecoder(String spec, String[] colnames, ValueType[] schema,\n+ FrameBlock meta, int clen, int minCol, int maxCol)\n{\nDecoder decoder = null;\n- try\n- {\n+ try {\n//parse transform specification\nJSONObject jSpec = new JSONObject(spec);\nList<Decoder> ldecoders = new ArrayList<>();\n//create decoders 'recode', 'dummy' and 'pass-through'\nList<Integer> rcIDs = Arrays.asList(ArrayUtils.toObject(\n- TfMetaUtils.parseJsonIDList(jSpec, colnames, TfMethod.RECODE.toString())));\n+ TfMetaUtils.parseJsonIDList(jSpec, colnames, TfMethod.RECODE.toString(), minCol, maxCol)));\nList<Integer> dcIDs = Arrays.asList(ArrayUtils.toObject(\n- TfMetaUtils.parseJsonIDList(jSpec, colnames, TfMethod.DUMMYCODE.toString())));\n+ TfMetaUtils.parseJsonIDList(jSpec, colnames, TfMethod.DUMMYCODE.toString(), minCol, maxCol)));\nrcIDs = unionDistinct(rcIDs, dcIDs);\nint len = dcIDs.isEmpty() ? Math.min(meta.getNumColumns(), clen) : meta.getNumColumns();\nList<Integer> ptIDs = except(UtilFunctions.getSeqList(1, len, 1), rcIDs);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/encode/Encoder.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/encode/Encoder.java",
"diff": "@@ -134,7 +134,7 @@ public abstract class Encoder implements Serializable\n*/\npublic Encoder subRangeEncoder(int colStart, int colEnd) {\nthrow new DMLRuntimeException(\n- this.getClass().getName() + \" does not support the creation of a sub-range encoder\");\n+ this.getClass().getSimpleName() + \" does not support the creation of a sub-range encoder\");\n}\n/**\n@@ -145,7 +145,7 @@ public abstract class Encoder implements Serializable\n*/\nprotected void mergeColumnInfo(Encoder other, int col) {\n// update number of columns\n- _clen = Math.max(_colList.length, col - 1 + other.getNumCols());\n+ _clen = Math.max(_clen, col - 1 + other._clen);\n// update the new columns that this encoder operates on\nSet<Integer> colListAgg = new HashSet<>(); // for dedup\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderFactory.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderFactory.java",
"diff": "@@ -85,7 +85,7 @@ public class EncoderFactory\n//create individual encoders\nif( !rcIDs.isEmpty() ) {\n- EncoderRecode ra = new EncoderRecode(jSpec, colnames, clen);\n+ EncoderRecode ra = new EncoderRecode(jSpec, colnames, clen, minCol, maxCol);\nra.setColList(ArrayUtils.toPrimitive(rcIDs.toArray(new Integer[0])));\nlencoders.add(ra);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderRecode.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderRecode.java",
"diff": "@@ -43,11 +43,11 @@ public class EncoderRecode extends Encoder\nprivate HashMap<Integer, HashMap<String, Long>> _rcdMaps = new HashMap<>();\nprivate HashMap<Integer, HashSet<Object>> _rcdMapsPart = null;\n- public EncoderRecode(JSONObject parsedSpec, String[] colnames, int clen)\n+ public EncoderRecode(JSONObject parsedSpec, String[] colnames, int clen, int minCol, int maxCol)\nthrows JSONException\n{\nsuper(null, clen);\n- _colList = TfMetaUtils.parseJsonIDList(parsedSpec, colnames, TfMethod.RECODE.toString());\n+ _colList = TfMetaUtils.parseJsonIDList(parsedSpec, colnames, TfMethod.RECODE.toString(), minCol, maxCol);\n}\nprivate EncoderRecode(int[] colList, int clen) {\n@@ -58,6 +58,11 @@ public class EncoderRecode extends Encoder\nthis(new int[0], 0);\n}\n+ private EncoderRecode(int[] colList, int clen, HashMap<Integer, HashMap<String, Long>> rcdMaps) {\n+ super(colList, clen);\n+ _rcdMaps = rcdMaps;\n+ }\n+\npublic HashMap<Integer, HashMap<String,Long>> getCPRecodeMaps() {\nreturn _rcdMaps;\n}\n@@ -180,9 +185,7 @@ public class EncoderRecode extends Encoder\nreturn null;\nint[] colList = cols.stream().mapToInt(i -> i).toArray();\n- EncoderRecode subRangeEncoder = new EncoderRecode(colList, colEnd - colStart);\n- subRangeEncoder._rcdMaps = rcdMaps;\n- return subRangeEncoder;\n+ return new EncoderRecode(colList, colEnd - colStart, rcdMaps);\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/meta/TfMetaUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/meta/TfMetaUtils.java",
"diff": "@@ -135,7 +135,7 @@ public class TfMetaUtils\n}\nif(ix <= 0) {\nif (minCol == -1 && maxCol == -1) {\n- // only if we cut of some columns, ix -1 is expected\n+ // only if we remove some columns, ix -1 is expected\nthrow new RuntimeException(\"Specified column '\"\n+ attrs.get(i)+\"' does not exist.\");\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedNegativeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedNegativeTest.java",
"diff": "package org.apache.sysds.test.functions.federated;\n-import org.apache.log4j.Level;\n-import org.apache.log4j.Logger;\nimport org.apache.sysds.common.Types;\nimport org.apache.sysds.runtime.controlprogram.federated.*;\nimport org.apache.sysds.test.AutomatedTestBase;\n@@ -30,7 +28,6 @@ import org.junit.Test;\nimport java.net.InetSocketAddress;\nimport java.util.HashMap;\nimport java.util.Map;\n-import java.util.concurrent.ExecutionException;\nimport java.util.concurrent.Future;\nimport static org.junit.Assert.assertFalse;\n@@ -38,23 +35,16 @@ import static org.junit.Assert.assertTrue;\[email protected]\npublic class FederatedNegativeTest {\n- protected static Logger log = Logger.getLogger(FederatedNegativeTest.class);\n-\n- static {\n- Logger.getLogger(\"org.apache.sysds\").setLevel(Level.OFF);\n- }\n-\n@Test\npublic void NegativeTest1() {\nint port = AutomatedTestBase.getRandomAvailablePort();\nString[] args = {\"-w\", Integer.toString(port)};\nThread t = AutomatedTestBase.startLocalFedWorkerWithArgs(args);\n+ FederationUtils.resetFedDataID(); //ensure expected ID when tests run in single JVM\nMap<FederatedRange, FederatedData> fedMap = new HashMap<>();\nFederatedRange r = new FederatedRange(new long[]{0,0}, new long[]{1,1});\n- FederatedData d = new FederatedData(\n- Types.DataType.SCALAR,\n- new InetSocketAddress(\"localhost\", port),\n- \"Nowhere\");\n+ FederatedData d = new FederatedData(Types.DataType.SCALAR,\n+ new InetSocketAddress(\"localhost\", port), \"Nowhere\");\nfedMap.put(r,d);\nFederationMap fedM = new FederationMap(fedMap);\nFederatedRequest fr = new FederatedRequest(FederatedRequest.RequestType.GET_VAR);\n@@ -62,17 +52,11 @@ public class FederatedNegativeTest {\ntry {\nFederatedResponse fres = res[0].get();\nassertFalse(fres.isSuccessful());\n- assertTrue(fres.getErrorMessage().contains(\"Variable 0 does not exist at federated worker\"));\n-\n- } catch (InterruptedException e) {\n- e.printStackTrace();\n- } catch (ExecutionException e) {\n- e.printStackTrace();\n- } catch (Exception e) {\n+ assertTrue(fres.getErrorMessage().contains(\"Variable 1 does not exist at federated worker\"));\n+ }\n+ catch (Exception e) {\ne.printStackTrace();\n}\n-\nTestUtils.shutdownThread(t);\n}\n-\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/transform/TransformFederatedEncodeDecodeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/transform/TransformFederatedEncodeDecodeTest.java",
"diff": "@@ -49,7 +49,8 @@ public class TransformFederatedEncodeDecodeTest extends AutomatedTestBase {\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] {\"FO\"}));\n+ addTestConfiguration(TEST_NAME1,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] {\"FO1\", \"FO2\"}));\n}\n@Test\n@@ -126,14 +127,26 @@ public class TransformFederatedEncodeDecodeTest extends AutomatedTestBase {\n\"in_AL=\" + TestUtils.federatedAddress(\"localhost\", port2, input(\"AL\")),\n\"in_BU=\" + TestUtils.federatedAddress(\"localhost\", port3, input(\"BU\")),\n\"in_BL=\" + TestUtils.federatedAddress(\"localhost\", port4, input(\"BL\")), \"rows=\" + rows, \"cols=\" + cols,\n- \"spec_file=\" + SCRIPT_DIR + TEST_DIR + SPEC, \"out=\" + output(\"FO\"), \"format=\" + format.toString()};\n+ \"spec_file=\" + SCRIPT_DIR + TEST_DIR + SPEC, \"out1=\" + output(\"FO1\"), \"out2=\" + output(\"FO2\"),\n+ \"format=\" + format.toString()};\n// run test\nrunTest(true, false, null, -1);\n- // compare matrices (values recoded to identical codes)\n+ // compare frame before and after encode and decode\nFrameReader reader = FrameReaderFactory.createFrameReader(format);\n- FrameBlock FO = reader.readFrameFromHDFS(output(\"FO\"), 15, 2);\n+ FrameBlock OUT = reader.readFrameFromHDFS(output(\"FO2\"), rows, cols);\n+ for(int r = 0; r < rows; r++) {\n+ for(int c = 0; c < cols; c++) {\n+ String expected = c < cols / 2 ? Double.toString(A[r][c]) : \"Str\" + B[r][c - cols / 2];\n+ String val = (String) OUT.get(r, c);\n+ Assert.assertEquals(\"Enc- and Decoded frame does not match the source frame: \" + expected + \" vs \"\n+ + val, expected, val);\n+ }\n+ }\n+ // TODO federate the aggregated result so that the decode is applied in a federated environment\n+ // compare matrices (values recoded to identical codes)\n+ FrameBlock FO = reader.readFrameFromHDFS(output(\"FO1\"), 15, 2);\nHashMap<String, Long> cFA = getCounts(A, B);\nIterator<String[]> iterFO = FO.getStringRowIterator();\nwhile(iterFO.hasNext()) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/transform/TransformFederatedEncodeDecode.dml",
"new_path": "src/test/scripts/functions/transform/TransformFederatedEncodeDecode.dml",
"diff": "#\n#-------------------------------------------------------------\n-F1 = federated(type=\"frame\", addresses=list($in_AU, $in_AL, $in_BU, $in_BL), ranges=\n+F = federated(type=\"frame\", addresses=list($in_AU, $in_AL, $in_BU, $in_BL), ranges=\nlist(list(0,0), list($rows / 2, $cols / 2), # AUpper range\nlist($rows / 2, 0), list($rows, $cols / 2), # ALower range\nlist(0, $cols / 2), list($rows / 2, $cols), # BUpper range\nlist($rows / 2, $cols / 2), list($rows, $cols))); # BLower range\njspec = read($spec_file, data_type=\"scalar\", value_type=\"string\");\n-[X, M] = transformencode(target=F1, spec=jspec);\n+[X, M] = transformencode(target=F, spec=jspec);\nA = aggregate(target=X[,1], groups=X[,2], fn=\"count\");\nAg = cbind(A, seq(1,nrow(A)));\n-F2 = transformdecode(target=Ag, spec=jspec, meta=M);\n-\n-write(F2, $out, format=$format);\n+FO1 = transformdecode(target=Ag, spec=jspec, meta=M);\n+FO2 = transformdecode(target=X, spec=jspec, meta=M);\n+write(FO1, $out1, format=$format);\n+write(FO2, $out2, format=$format);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2554,2558,2561] Federated transform decode (recoding)
Closes #1027. |
49,706 | 21.08.2020 17:11:18 | -7,200 | cf5dc8d3b0e0b87b282413d93c4de2e973f887a8 | [MINOR] Fix federated test
This commit fixes the test added in #19b4929b3b23e01c3d62e09875891e4f6cfb6e5b
fix enabling execution of sum square federated operations.
fix test name in federated LogRegTest.
fix asserted federated operations in LogRegTest | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"diff": "@@ -136,7 +136,7 @@ public class FederationUtils {\n}\npublic static ScalarObject aggScalar(AggregateUnaryOperator aop, Future<FederatedResponse>[] ffr) {\n- if(!(aop.aggOp.increOp.fn instanceof KahanPlus || (aop.aggOp.increOp.fn instanceof Builtin &&\n+ if(!(aop.aggOp.increOp.fn instanceof KahanFunction || (aop.aggOp.increOp.fn instanceof Builtin &&\n(((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MIN ||\n((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MAX)))) {\nthrow new DMLRuntimeException(\"Unsupported aggregation operator: \"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedGLMTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedGLMTest.java",
"diff": "@@ -99,7 +99,7 @@ public class FederatedGLMTest extends AutomatedTestBase {\nTestConfiguration config = availableTestConfigurations.get(TEST_NAME);\nloadTestConfiguration(config);\n- setOutputBuffering(false);\n+ // setOutputBuffering(false);\n// Run reference dml script with normal matrix\nfullDMLScriptName = HOME + TEST_NAME + \"Reference.dml\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedLogRegTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedLogRegTest.java",
"diff": "@@ -40,7 +40,7 @@ public class FederatedLogRegTest extends AutomatedTestBase {\nprivate final static String TEST_DIR = \"functions/federated/\";\nprivate final static String TEST_NAME = \"FederatedLogRegTest\";\n- private final static String TEST_CLASS_DIR = TEST_DIR + FederatedGLMTest.class.getSimpleName() + \"/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FederatedLogRegTest.class.getSimpleName() + \"/\";\nprivate final static int blocksize = 1024;\[email protected]()\n@@ -108,7 +108,7 @@ public class FederatedLogRegTest extends AutomatedTestBase {\n// Run actual dml script with federated matrix\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[] {\"-stats\",\n+ programArgs = new String[] {\"-stats\", \"30\",\n\"-nvargs\", \"in_X1=\" + TestUtils.federatedAddress(port1, input(\"X1\")),\n\"in_X2=\" + TestUtils.federatedAddress(port2, input(\"X2\")), \"rows=\" + rows, \"cols=\" + cols,\n\"in_Y=\" + input(\"Y\"), \"out=\" + output(\"Z\")};\n@@ -120,11 +120,9 @@ public class FederatedLogRegTest extends AutomatedTestBase {\nTestUtils.shutdownThreads(t1, t2);\n// check for federated operations\n- Assert.assertTrue(heavyHittersContainsString(\"fed_ba+*\"));\n- Assert.assertTrue(heavyHittersContainsString(\"fed_uark+\",\"fed_uarsqk+\"));\n- Assert.assertTrue(heavyHittersContainsString(\"fed_uack+\"));\n- Assert.assertTrue(heavyHittersContainsString(\"fed_uak+\"));\n- Assert.assertTrue(heavyHittersContainsString(\"fed_mmchain\"));\n+ Assert.assertTrue(\"contains federated matrix mult\",heavyHittersContainsString(\"fed_ba+*\"));\n+ Assert.assertTrue(\"contains federated row unary aggregate\",heavyHittersContainsString(\"fed_uark+\",\"fed_uarsqk+\"));\n+ Assert.assertTrue(\"contains federated matrix mult chain or transpose\",heavyHittersContainsString(\"fed_mmchain\", \"fed_r'\"));\n//check that federated input files are still existing\nAssert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X1\")));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/federated/FederatedLogRegTest.dml",
"new_path": "src/test/scripts/functions/federated/FederatedLogRegTest.dml",
"diff": "X = federated(addresses=list($in_X1, $in_X2),\nranges=list(list(0, 0), list($rows / 2, $cols), list($rows / 2, 0), list($rows, $cols)))\nY = read($in_Y)\n-model = multiLogReg(X=X, Y=Y, tol=1e-5, maxi=30)\n+model = multiLogReg(X=X, Y=Y, tol=1e-5, maxi=30, icpt = 0)\nwrite(model, $out)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/federated/FederatedLogRegTestReference.dml",
"new_path": "src/test/scripts/functions/federated/FederatedLogRegTestReference.dml",
"diff": "X = rbind(read($1), read($2))\nY = read($3)\n-model = multiLogReg(X=X, Y=Y, tol=1e-5, maxi=30)\n+model = multiLogReg(X=X, Y=Y, tol=1e-5, maxi=30, icpt = 0)\nwrite(model, $4)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix federated test
This commit fixes the test added in #19b4929b3b23e01c3d62e09875891e4f6cfb6e5b
- fix enabling execution of sum square federated operations.
- fix test name in federated LogRegTest.
- fix asserted federated operations in LogRegTest |
49,722 | 24.08.2020 10:59:37 | -7,200 | 3f05aa9f59f663df105307da3483d0de8f046c08 | Built-in function for univariate statistics
Added univar and bivar builtins, federated tests, fixed fed matrix max
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/univar.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+#\n+# Computes univariate statistics for all attributes in a given data set\n+#\n+# INPUT PARAMETERS:\n+# -------------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# -------------------------------------------------------------------------------------------------\n+# X Matrix[Double] --- Input matrix of the shape (N, D)\n+# TYPES Matrix[Integer] --- Matrix of the shape (1, D) with features types:\n+# 1 for scale, 2 for nominal, 3 for ordinal\n+# -------------------------------------------------------------------------------------------------\n+# OUTPUT: Matrix of summary statistics\n+\n+m_univar = function(Matrix[Double] X, Matrix[Double] types)\n+return(Matrix[Double] univarStats)\n+{\n+ max_kind = max(types);\n+ N = nrow(X);\n+ D = ncol(X);\n+\n+ # Number of statistics (14 scale, 3 categorical)\n+ numBaseStats = 17;\n+ univarStats = matrix(0, rows=numBaseStats, cols=D);\n+\n+ # Compute max domain size among all categorical attributes\n+ maxDomain = as.integer(max((types > 1) * colMaxs(X)));\n+\n+ parfor(i in 1:D, check=0) {\n+ F = X[,i];\n+\n+ type = as.scalar(types[1,i]);\n+ minF = min(F);\n+ maxF = max(F);\n+\n+ if (type == 1) {\n+ # compute SCALE statistics on the projected column\n+ rng = maxF - minF;\n+\n+ mu = mean(F);\n+ m2 = moment(F, 2);\n+ m3 = moment(F, 3);\n+ m4 = moment(F, 4);\n+\n+ var = N/(N-1.0)*m2;\n+ std_dev = sqrt(var);\n+ se = std_dev/sqrt(N);\n+ cv = std_dev/mu;\n+\n+ g1 = m3/(std_dev^3);\n+ g2 = m4/(std_dev^4) - 3;\n+ se_g1=sqrt( (6/(N-2.0)) * (N/(N+1.0)) * ((N-1.0)/(N+3.0)) );\n+ se_g2=sqrt( (4/(N+5.0)) * ((N^2-1)/(N-3.0)) * se_g1^2 );\n+\n+ md = median(F);\n+ iqm = interQuartileMean(F);\n+\n+ univarStats[1:14,i] = as.matrix(list(minF, maxF, rng,\n+ mu, var, std_dev, se, cv, g1, g2, se_g1, se_g2, md, iqm));\n+ }\n+\n+ if (type == 2 | type == 3) {\n+ # check if the categorical column has valid values\n+ if( minF <= 0 ) {\n+ print(\"ERROR: Categorical attributes can only take values starting from 1. Encountered a value \" + minF + \" in attribute \" + i);\n+ }\n+\n+ # compute CATEGORICAL statistics on the projected column\n+ cat_counts = table(F, 1, maxDomain, 1);\n+ mode = as.scalar(rowIndexMax(t(cat_counts)));\n+ numModes = sum(cat_counts == max(cat_counts));\n+ univarStats[15:17,i] = as.matrix(list(maxF, mode, numModes));\n+ }\n+ }\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -191,6 +191,7 @@ public enum Builtins {\nCOUNT_DISTINCT_APPROX(\"countDistinctApprox\",false),\nVAR(\"var\", false),\nXOR(\"xor\", false),\n+ UNIVAR(\"univar\", true),\nWINSORIZE(\"winsorize\", true, false), //TODO parameterize w/ prob, min/max val\n//parameterized builtin functions\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederationUtils.java",
"diff": "@@ -109,6 +109,21 @@ public class FederationUtils {\n}\n}\n+ public static DoubleObject aggMinMax(Future<FederatedResponse>[] ffr, boolean isMin, boolean isScalar) {\n+ try {\n+ double res = isMin ? Double.MAX_VALUE : - Double.MAX_VALUE;\n+ for (Future<FederatedResponse> fr: ffr){\n+ double v = isScalar ? ((ScalarObject)fr.get().getData()[0]).getDoubleValue() :\n+ isMin ? ((MatrixBlock) fr.get().getData()[0]).min() : ((MatrixBlock) fr.get().getData()[0]).max();\n+ res = isMin ? Math.min(res, v) : Math.max(res, v);\n+ }\n+ return new DoubleObject(res);\n+ }\n+ catch (Exception ex) {\n+ throw new DMLRuntimeException(ex);\n+ }\n+ }\n+\npublic static MatrixBlock[] getResults(Future<FederatedResponse>[] ffr) {\ntry {\nMatrixBlock[] ret = new MatrixBlock[ffr.length];\n@@ -146,13 +161,7 @@ public class FederationUtils {\nif(aop.aggOp.increOp.fn instanceof Builtin){\n// then we know it is a Min or Max based on the previous check.\nboolean isMin = ((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MIN;\n- double res = isMin ? Double.MAX_VALUE: - Double.MAX_VALUE;\n- double v;\n- for (Future<FederatedResponse> fr: ffr){\n- v = ((ScalarObject)fr.get().getData()[0]).getDoubleValue();\n- res = isMin ? Math.min(res, v) : Math.max(res, v);\n- }\n- return new DoubleObject(res);\n+ return aggMinMax(ffr, isMin, true);\n}\nelse {\ndouble sum = 0; //uak+\n@@ -172,12 +181,17 @@ public class FederationUtils {\n//independent of aggregation function for row-partitioned federated matrices\nreturn rbind(ffr);\n}\n-\n// handle col aggregate\nif( aop.aggOp.increOp.fn instanceof KahanFunction )\nreturn aggAdd(ffr);\nelse if( aop.aggOp.increOp.fn instanceof Mean )\nreturn aggMean(ffr, map);\n+ else if (aop.aggOp.increOp.fn instanceof Builtin &&\n+ (((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MIN ||\n+ ((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MAX)) {\n+ boolean isMin = ((Builtin) aop.aggOp.increOp.fn).getBuiltinCode() == BuiltinCode.MIN;\n+ return new MatrixBlock(1,1,aggMinMax(ffr, isMin, false).getDoubleValue());\n+ }\nelse\nthrow new DMLRuntimeException(\"Unsupported aggregation operator: \"\n+ aop.aggOp.increOp.fn.getClass().getSimpleName());\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedBivarTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.federated;\n+\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.util.HDFSTool;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Ignore;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class FederatedBivarTest extends AutomatedTestBase {\n+ private final static String TEST_DIR = \"functions/federated/\";\n+ private final static String TEST_NAME = \"FederatedBivarTest\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FederatedUnivarTest.class.getSimpleName() + \"/\";\n+ private final static int blocksize = 1024;\n+ @Parameterized.Parameter()\n+ public int rows;\n+ @Parameterized.Parameter(1)\n+ public int cols;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"B\"}));\n+ }\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ return Arrays.asList(new Object[][] {\n+ {10000, 10},\n+// {2000, 50}, {1000, 10},\n+// {10000, 10}, {2000, 50}, {1000, 100}\n+ });\n+ }\n+\n+ @Test\n+ @Ignore\n+ public void federatedBivarSinglenode() {\n+ federatedL2SVM(Types.ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ @Ignore\n+ public void federatedBivarHybrid() {\n+ federatedL2SVM(Types.ExecMode.HYBRID);\n+ }\n+\n+ public void federatedL2SVM(Types.ExecMode execMode) {\n+ Types.ExecMode platformOld = setExecMode(execMode);\n+\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ // write input matrices\n+ int quarterRows = rows / 4;\n+ // We have two matrices handled by a single federated worker\n+ double[][] X1 = getRandomMatrix(quarterRows, cols, 1, 5, 1, 3);\n+ double[][] X2 = getRandomMatrix(quarterRows, cols, 1, 5, 1, 7);\n+ double[][] X3 = getRandomMatrix(quarterRows, cols, 1, 5, 1, 8);\n+ double[][] X4 = getRandomMatrix(quarterRows, cols, 1, 5, 1, 9);\n+\n+ // write types matrix shape of (1, D)\n+ double [][] T1 = getRandomMatrix(1, cols, 0, 2, 1, 9);\n+ Arrays.stream(T1[0]).forEach(n -> Math.ceil(n));\n+\n+ double [][] T2 = getRandomMatrix(1, cols, 0, 2, 1, 9);\n+ Arrays.stream(T2[0]).forEach(n -> Math.ceil(n));\n+\n+ double [][] S1 = getRandomMatrix(1, (int) cols/5, 1, cols, 1, 3);\n+ Arrays.stream(S1[0]).forEach(n -> Math.ceil(n));\n+\n+ double [][] S2 = getRandomMatrix(1, (int) cols/4, 1, cols, 1, 9);\n+ Arrays.stream(S2[0]).forEach(n -> Math.ceil(n));\n+\n+ MatrixCharacteristics mc= new MatrixCharacteristics(quarterRows, cols, blocksize, quarterRows * cols);\n+ writeInputMatrixWithMTD(\"X1\", X1, false, mc);\n+ writeInputMatrixWithMTD(\"X2\", X2, false, mc);\n+ writeInputMatrixWithMTD(\"X3\", X3, false, mc);\n+ writeInputMatrixWithMTD(\"X4\", X4, false, mc);\n+ writeInputMatrixWithMTD(\"S1\", S1, false);\n+ writeInputMatrixWithMTD(\"S2\", S2, false);\n+ writeInputMatrixWithMTD(\"T1\", T1, false);\n+ writeInputMatrixWithMTD(\"T2\", T2, false);\n+\n+ // empty script name because we don't execute any script, just start the worker\n+ fullDMLScriptName = \"\";\n+ int port1 = getRandomAvailablePort();\n+ int port2 = getRandomAvailablePort();\n+ int port3 = getRandomAvailablePort();\n+ int port4 = getRandomAvailablePort();\n+ Thread t1 = startLocalFedWorker(port1);\n+ Thread t2 = startLocalFedWorker(port2);\n+ Thread t3 = startLocalFedWorker(port3);\n+ Thread t4 = startLocalFedWorker(port4);\n+\n+ TestConfiguration config = availableTestConfigurations.get(TEST_NAME);\n+ loadTestConfiguration(config);\n+ setOutputBuffering(false);\n+\n+ // Run reference dml script with normal matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \"Reference.dml\";\n+ programArgs = new String[] {\"-stats\", \"-args\", input(\"X1\"), input(\"X2\"), input(\"X3\"), input(\"X4\"), input(\"S1\"), input(\"S2\"), input(\"T1\"), input(\"T2\"), expected(\"B\")};\n+ runTest(true, false, null, -1);\n+\n+ // Run actual dml script with federated matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-stats\", \"-nvargs\",\n+ \"in_X1=\" + TestUtils.federatedAddress(port1, input(\"X1\")),\n+ \"in_X2=\" + TestUtils.federatedAddress(port2, input(\"X2\")),\n+ \"in_X3=\" + TestUtils.federatedAddress(port3, input(\"X3\")),\n+ \"in_X4=\" + TestUtils.federatedAddress(port4, input(\"X4\")),\n+ \"in_S1=\" + input(\"S1\"),\n+ \"in_S2=\" + input(\"S2\"),\n+ \"in_T1=\" + input(\"T1\"),\n+ \"in_T2=\" + input(\"T2\"),\n+ \"rows=\" + rows, \"cols=\" + cols,\n+ \"out=\" + output(\"B\")};\n+ runTest(true, false, null, -1);\n+\n+ // compare via files\n+// compareResults(1e-9);\n+ TestUtils.shutdownThreads(t1, t2, t3, t4);\n+\n+ // check for federated operations\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_ba+*\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_uack+\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_tsmm\"));\n+// if( scaleAndShift ) {\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_uacsqk+\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_uacmean\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_-\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_/\"));\n+// Assert.assertTrue(heavyHittersContainsString(\"fed_replace\"));\n+// }\n+\n+ //check that federated input files are still existing\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X2\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X3\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X4\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"S1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"S2\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"T1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"T2\")));\n+\n+ resetExecMode(platformOld);\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/FederatedUnivarTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.federated;\n+\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.util.HDFSTool;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.junit.runner.RunWith;\n+import org.junit.runners.Parameterized;\n+\n+import java.util.Arrays;\n+import java.util.Collection;\n+\n+@RunWith(value = Parameterized.class)\[email protected]\n+public class FederatedUnivarTest extends AutomatedTestBase {\n+ private final static String TEST_DIR = \"functions/federated/\";\n+ private final static String TEST_NAME = \"FederatedUnivarTest\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + FederatedUnivarTest.class.getSimpleName() + \"/\";\n+\n+ private final static int blocksize = 1024;\n+ @Parameterized.Parameter()\n+ public int rows;\n+ @Parameterized.Parameter(1)\n+ public int cols;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"B\"}));\n+ }\n+\n+ @Parameterized.Parameters\n+ public static Collection<Object[]> data() {\n+ return Arrays.asList(new Object[][] {\n+ {10000, 16},\n+ {2000, 32}, {1000, 64}, {10000, 128}\n+ });\n+ }\n+\n+ @Test\n+ public void federatedUnivarSinglenode() {\n+ federatedL2SVM(Types.ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void federatedUnivarHybrid() {\n+ federatedL2SVM(Types.ExecMode.HYBRID);\n+ }\n+\n+ public void federatedL2SVM(Types.ExecMode execMode) {\n+ Types.ExecMode platformOld = setExecMode(execMode);\n+\n+ getAndLoadTestConfiguration(TEST_NAME);\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ // write input matrices\n+ int quarterCols = cols / 4;\n+ // We have two matrices handled by a single federated worker\n+ double[][] X1 = getRandomMatrix(rows, quarterCols, 1, 5, 1, 3);\n+ double[][] X2 = getRandomMatrix(rows, quarterCols, 1, 5, 1, 7);\n+ double[][] X3 = getRandomMatrix(rows, quarterCols, 1, 5, 1, 8);\n+ double[][] X4 = getRandomMatrix(rows, quarterCols, 1, 5, 1, 9);\n+\n+ // write types matrix shape of (1, D)\n+ double [][] Y = getRandomMatrix(1, cols, 0, 3, 1, 9);\n+ Arrays.stream(Y[0]).forEach(Math::ceil);\n+\n+ MatrixCharacteristics mc= new MatrixCharacteristics(rows, quarterCols, blocksize, rows * quarterCols);\n+ writeInputMatrixWithMTD(\"X1\", X1, false, mc);\n+ writeInputMatrixWithMTD(\"X2\", X2, false, mc);\n+ writeInputMatrixWithMTD(\"X3\", X3, false, mc);\n+ writeInputMatrixWithMTD(\"X4\", X4, false, mc);\n+ writeInputMatrixWithMTD(\"Y\", Y, false);\n+\n+ // empty script name because we don't execute any script, just start the worker\n+ fullDMLScriptName = \"\";\n+ int port1 = getRandomAvailablePort();\n+ int port2 = getRandomAvailablePort();\n+ int port3 = getRandomAvailablePort();\n+ int port4 = getRandomAvailablePort();\n+ Thread t1 = startLocalFedWorker(port1);\n+ Thread t2 = startLocalFedWorker(port2);\n+ Thread t3 = startLocalFedWorker(port3);\n+ Thread t4 = startLocalFedWorker(port4);\n+\n+ TestConfiguration config = availableTestConfigurations.get(TEST_NAME);\n+ loadTestConfiguration(config);\n+ setOutputBuffering(false);\n+\n+ // Run reference dml script with normal matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \"Reference.dml\";\n+ programArgs = new String[] {\"-stats\", \"100\", \"-args\", input(\"X1\"), input(\"X2\"), input(\"X3\"), input(\"X4\"), input(\"Y\"), expected(\"B\")};\n+ runTest(true, false, null, -1);\n+\n+ // Run actual dml script with federated matrix\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-stats\", \"100\", \"-nvargs\",\n+ \"in_X1=\" + TestUtils.federatedAddress(port1, input(\"X1\")),\n+ \"in_X2=\" + TestUtils.federatedAddress(port2, input(\"X2\")),\n+ \"in_X3=\" + TestUtils.federatedAddress(port3, input(\"X3\")),\n+ \"in_X4=\" + TestUtils.federatedAddress(port4, input(\"X4\")),\n+ \"in_Y=\" + input(\"Y\"), // types\n+ \"rows=\" + rows, \"cols=\" + cols,\n+ \"out=\" + output(\"B\")};\n+ runTest(true, false, null, -1);\n+\n+ // compare via files\n+ compareResults(1e-9);\n+ TestUtils.shutdownThreads(t1, t2, t3, t4);\n+\n+ // check for federated operations\n+ Assert.assertTrue(heavyHittersContainsString(\"fed_uacmax\"));\n+\n+ //check that federated input files are still existing\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X1\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X2\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X3\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"X4\")));\n+ Assert.assertTrue(HDFSTool.existsFileOnHDFS(input(\"Y\")));\n+\n+ resetExecMode(platformOld);\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedBivarTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = federated(addresses=list($in_X1, $in_X2, $in_X3, $in_X4),\n+ ranges=list(list(0, 0), list($rows/4, $cols), list($rows/4, 0), list(2*$rows/4, $cols),\n+ list(2*$rows/4, 0), list(3*$rows/4, $cols), list(3*$rows/4, 0), list($rows, $cols)));\n+S1 = read($in_S1);\n+S2 = read($in_S2);\n+T1 = read($in_T1);\n+T2 = read($in_T2);\n+B = bivar(X=X, S1=S1, S2=S2, T1=T1, T2=T2);\n+write(B, $out);\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedBivarTestReference.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rbind(read($1), read($2), read($3), read($4));\n+S1 = read($5);\n+S2 = read($6);\n+T1 = read($7);\n+T2 = read($8);\n+B = bivar(X=X, S1=S1, S2=S2, T1=T1, T2=T2);\n+write(B, $9);\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedUnivarTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#X = federated(addresses=list($in_X1, $in_X2, $in_X3, $in_X4),\n+# ranges=list(list(0, 0), list($rows/4, $cols), list($rows/4, 0), list(2*$rows/4, $cols),\n+# list(2*$rows/4, 0), list(3*$rows/4, $cols), list(3*$rows/4, 0), list($rows, $cols)));\n+\n+X = federated(addresses=list($in_X1, $in_X2, $in_X3, $in_X4),\n+ ranges=list(list(0, 0), list($rows, $cols/4), list(0,$cols/4), list($rows, $cols/2), list(0,$cols/2), list($rows, 3*($cols/4)), list(0, 3*($cols/4)), list($rows, $cols)));\n+Y = read($in_Y);\n+B = univar(X=X, types=Y);\n+write(B, $out);\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/federated/FederatedUnivarTestReference.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#X = rbind(read($1), read($2), read($3), read($4));\n+X = cbind(read($1), read($2), read($3), read($4));\n+types = read($5);\n+B = univar(X=X, types=types);\n+write(B, $6);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2636] Built-in function for univariate statistics
Added univar and bivar builtins, federated tests, fixed fed matrix max
Closes #1035. |
49,684 | 17.08.2020 18:20:06 | -7,200 | c0643923206616b7503d8b74884b3e8d40ccfae3 | Leaky relu NN layer, incl grad check and 2NN example
Closes | [
{
"change_type": "RENAME",
"old_path": "scripts/nn/examples/Example-MNIST_2NN_ReLu_Softmax.dml",
"new_path": "scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml",
"diff": "#-------------------------------------------------------------\n/*\n+ * This Example trains a feed forward neural network with one input layer, two hidden affine layers (200 neurons) with\n+ * leaky relu activations and one affine output layer with a softmax activation\n+ *\n+ * The reason for this example is to test the performance differences between single threaded, with a parameter server\n+ * for parallelization and finally federated across multiple SystemDS instances\n+ *\n+ * Inputs:\n+ * - train: The file containing the training data\n+ * - test: the file containing the test data\n+ *\n* The MNIST Data can be downloaded as follows:\n* mkdir -p data/mnist/\n* cd data/mnist/\n* curl -O https://pjreddie.com/media/files/mnist_train.csv\n* curl -O https://pjreddie.com/media/files/mnist_test.csv\n+ *\n+ * Sample Invocation\n+ *\n+ * systemds \"<path to systemds repo>/systemds/scripts/nn/examples/Example-MNIST_2NN_Leaky_ReLu_Softmax.dml\"\n+ * -nvargs train=<path to data>/mnist_data/mnist_train.csv test=<path to data>/mnist_data/mnist_test.csv\n*/\nsource(\"nn/examples/mnist_2NN.dml\") as mnist_2NN\n# Read training data\n-data = read(\"mnist_data/mnist_train.csv\", format=\"csv\")\n+data = read($train, format=\"csv\")\nn = nrow(data)\n# Extract images and labels\n@@ -48,11 +63,11 @@ y = labels[5001:nrow(images),]\ny_val = labels[1:5000,]\n# Train\n-epochs = 5\n+epochs = 1\n[W_1, b_1, W_2, b_2, W_3, b_3] = mnist_2NN::train(X, y, X_val, y_val, epochs)\n# Read test data\n-data = read(\"mnist_data/mnist_test.csv\", format=\"csv\")\n+data = read($test, format=\"csv\")\nn = nrow(data)\n# Extract images and labels\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_2NN.dml",
"new_path": "scripts/nn/examples/mnist_2NN.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * MNIST 2NN Relu Example\n+ * MNIST 2NN Leaky Relu Example\n*/\n# Imports\nsource(\"nn/layers/affine.dml\") as affine\nsource(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\n-source(\"nn/layers/relu.dml\") as relu\n+source(\"nn/layers/leaky_relu.dml\") as leaky_relu\nsource(\"nn/layers/softmax.dml\") as softmax\nsource(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\n-train = function(matrix[double] X, matrix[double] Y,\n- matrix[double] X_val, matrix[double] Y_val,\n- int epochs)\n- return (matrix[double] W_1, matrix[double] b_1,\n- matrix[double] W_2, matrix[double] b_2,\n- matrix[double] W_3, matrix[double] b_3) {\n+train = function(matrix[double] X, matrix[double] Y, matrix[double] X_val,\n+ matrix[double] Y_val, int epochs)\n+ return (matrix[double] W_1, matrix[double] b_1, matrix[double] W_2,\n+ matrix[double] b_2, matrix[double] W_3, matrix[double] b_3)\n+{\n/*\n- * Trains a 2NN relu softmax classifier.\n+ * Trains a 2 hidden layer leaky relu softmax classifier.\n*\n* The input matrix, X, has N examples, each with D features.\n* The targets, Y, have K classes, and are one-hot encoded.\n@@ -53,12 +52,13 @@ train = function(matrix[double] X, matrix[double] Y,\n* - W: Weights (parameters) matrix, of shape (D, M, 3).\n* - b: Biases vector, of shape (1, M, 3).\n*/\n+\nN = nrow(X) # num examples\nD = ncol(X) # num features\nK = ncol(Y) # num classes\n# Create the network:\n- # input -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n+ # input -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax\n[W_1, b_1] = affine::init(D, 200)\n[W_2, b_2] = affine::init(200, 200)\n[W_3, b_3] = affine::init(200, K)\n@@ -87,12 +87,12 @@ train = function(matrix[double] X, matrix[double] Y,\ny_batch = Y[beg:end,]\n# Compute forward pass\n- ## input D -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n+ ## input D -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax\nout_1 = affine::forward(X_batch, W_1, b_1)\n- out_1_relu = relu::forward(out_1)\n- out_2 = affine::forward(out_1_relu, W_2, b_2)\n- out_2_relu = relu::forward(out_2)\n- out_3 = affine::forward(out_2_relu, W_3, b_3)\n+ out_1_leaky_relu = leaky_relu::forward(out_1)\n+ out_2 = affine::forward(out_1_leaky_relu, W_2, b_2)\n+ out_2_leaky_relu = leaky_relu::forward(out_2)\n+ out_3 = affine::forward(out_2_leaky_relu, W_3, b_3)\nprobs = softmax::forward(out_3)\n# Compute loss & accuracy for training & validation data\n@@ -108,10 +108,10 @@ train = function(matrix[double] X, matrix[double] Y,\n## loss:\ndprobs = cross_entropy_loss::backward(probs, y_batch)\ndout_3 = softmax::backward(dprobs, out_3)\n- [dout_2_relu, dW_3, db_3] = affine::backward(dout_3, out_2_relu, W_3, b_3)\n- dout_2 = relu::backward(dout_2_relu, out_2)\n- [dout_1_relu, dW_2, db_2] = affine::backward(dout_2, out_1_relu, W_2, b_2)\n- dout_1 = relu::backward(dout_1_relu, out_1)\n+ [dout_2_leaky_relu, dW_3, db_3] = affine::backward(dout_3, out_2_leaky_relu, W_3, b_3)\n+ dout_2 = leaky_relu::backward(dout_2_leaky_relu, out_2)\n+ [dout_1_leaky_relu, dW_2, db_2] = affine::backward(dout_2, out_1_leaky_relu, W_2, b_2)\n+ dout_1 = leaky_relu::backward(dout_1_leaky_relu, out_1)\n[dX_batch, dW_1, db_1] = affine::backward(dout_1, X_batch, W_1, b_1)\n# Optimize with SGD\n@@ -122,6 +122,7 @@ train = function(matrix[double] X, matrix[double] Y,\n[W_1, vW_1] = sgd_nesterov::update(W_1, dW_1, lr, mu, vW_1)\n[b_1, vb_1] = sgd_nesterov::update(b_1, db_1, lr, mu, vb_1)\n}\n+\n# Anneal momentum towards 0.999\nmu = mu + (0.999 - mu)/(1+epochs-e)\n# Decay learning rate\n@@ -148,10 +149,10 @@ predict = function(matrix[double] X,\n* - probs: Class probabilities, of shape (N, K).\n*/\n# Compute forward pass\n- ## input -> 200 neuron affine -> relu -> 200 neuron affine -> relu -> K neurons affine -> softmax\n- out_1_relu = relu::forward(affine::forward(X, W_1, b_1))\n- out_2_relu = relu::forward(affine::forward(out_1_relu, W_2, b_2))\n- probs = softmax::forward(affine::forward(out_2_relu, W_3, b_3))\n+ ## input -> 200 neuron affine -> leaky_relu -> 200 neuron affine -> leaky_relu -> K neurons affine -> softmax\n+ out_1_leaky_relu = leaky_relu::forward(affine::forward(X, W_1, b_1))\n+ out_2_leaky_relu = leaky_relu::forward(affine::forward(out_1_leaky_relu, W_2, b_2))\n+ probs = softmax::forward(affine::forward(out_2_leaky_relu, W_3, b_3))\n}\neval = function(matrix[double] probs, matrix[double] Y)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/layers/leaky_relu.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * Leaky Rectified Linear Unit (ReLU) nonlinearity layer.\n+ */\n+\n+forward = function(matrix[double] X)\n+ return (matrix[double] out) {\n+ /*\n+ * Computes the forward pass for a Leaky ReLU nonlinearity layer.\n+ *\n+ * Inputs:\n+ * - X: Inputs, of shape (any, any).\n+ *\n+ * Outputs:\n+ * - out: Outputs, of same shape as `X`.\n+ */\n+ # compute ifelse(X > 0, X, 0.01 * X)\n+ out = max(X, 0.01 * X)\n+}\n+\n+backward = function(matrix[double] dout, matrix[double] X)\n+ return (matrix[double] dX) {\n+ /*\n+ * Computes the backward pass for a Leaky ReLU nonlinearity layer.\n+ *\n+ * Inputs:\n+ * - dout: Gradient wrt `out` from upstream, of same shape as `X`.\n+ * - X: Previous input data matrix, of shape (any, any).\n+ *\n+ * Outputs:\n+ * - dX: Gradient wrt `X`, of same shape as `X`.\n+ */\n+ dX = ifelse(X > 0, dout, 0.01 * dout)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/grad_check.dml",
"new_path": "scripts/nn/test/grad_check.dml",
"diff": "@@ -46,6 +46,7 @@ source(\"scripts/nn/layers/max_pool2d_builtin.dml\") as max_pool2d_builtin\nsource(\"scripts/nn/layers/avg_pool2d_builtin.dml\") as avg_pool2d_builtin\nsource(\"scripts/nn/layers/upsample2d.dml\") as upsample2d\nsource(\"scripts/nn/layers/relu.dml\") as relu\n+source(\"scripts/nn/layers/leaky_relu.dml\") as leaky_relu\nsource(\"scripts/nn/layers/rnn.dml\") as rnn\nsource(\"scripts/nn/layers/scale_shift1d.dml\") as scale_shift1d\nsource(\"scripts/nn/layers/scale_shift2d.dml\") as scale_shift2d\n@@ -1827,6 +1828,50 @@ relu = function() {\n}\n}\n+leaky_relu = function() {\n+ /*\n+ * Gradient check for the ReLU nonlinearity layer.\n+ *\n+ * NOTE: This could result in a false-negative in which the test\n+ * fails due to a kink being crossed in the nonlinearity. This\n+ * occurs when the tests, f(x-h) and f(x+h), end up on opposite\n+ * sides of the zero threshold of max(0, fx). For now, just run\n+ * the tests again. In the future, we can explicitly check for\n+ * this and rerun the test automatically.\n+ */\n+ print(\"Grad checking the Leaky ReLU nonlinearity layer with L2 loss.\")\n+\n+ # Generate data\n+ N = 3 # num examples\n+ M = 10 # num neurons\n+ X = rand(rows=N, cols=M, min=-5, max=5)\n+ y = rand(rows=N, cols=M)\n+ # Compute analytical gradients of loss wrt parameters\n+ out = leaky_relu::forward(X)\n+ dout = l2_loss::backward(out, y)\n+ dX = leaky_relu::backward(dout, X)\n+\n+ # Grad check\n+ h = 1e-5\n+ for (i in 1:nrow(X)) {\n+ for (j in 1:ncol(X)) {\n+ # Compute numerical derivative\n+ old = as.scalar(X[i,j])\n+ X[i,j] = old - h\n+ outmh = leaky_relu::forward(X)\n+ lossmh = l2_loss::forward(outmh, y)\n+ X[i,j] = old + h\n+ outph = leaky_relu::forward(X)\n+ lossph = l2_loss::forward(outph, y)\n+ X[i,j] = old # reset\n+ dX_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ }\n+ }\n+}\n+\nrnn = function() {\n/*\n* Gradient check for the simple RNN layer.\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/run_tests.dml",
"new_path": "scripts/nn/test/run_tests.dml",
"diff": "@@ -60,6 +60,7 @@ grad_check::max_pool2d_simple()\ngrad_check::avg_pool2d_builtin()\ngrad_check::upsample2d()\ngrad_check::relu()\n+grad_check::leaky_relu()\ngrad_check::rnn()\ngrad_check::scale_shift1d()\ngrad_check::scale_shift2d()\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2637] Leaky relu NN layer, incl grad check and 2NN example
Closes #1026. |
49,706 | 24.08.2020 13:43:48 | -7,200 | 8b0dff6a03c92e680c6fda3d551e599eed2c2f8e | [MINOR] Add arguments to worker in bin/systemds | [
{
"change_type": "MODIFY",
"old_path": "bin/systemds",
"new_path": "bin/systemds",
"diff": "@@ -156,6 +156,7 @@ elif echo \"$1\" | grep -q \"WORKER\"; then\necho \"error: Port is not a number\"\nprintUsageExit\nfi\n+ shift\nelse\n# handle optional '-f' before DML file (for consistency)\nif echo \"$1\" | grep -q \"\\-f\"; then\n@@ -291,7 +292,8 @@ if [ $WORKER == 1 ]; then\n-cp $CLASSPATH \\\n$LOG4JPROP \\\norg.apache.sysds.api.DMLScript \\\n- -w $PORT\"\n+ -w $PORT \\\n+ $*\"\nelif [ $SYSDS_DISTRIBUTED == 0 ]; then\nprint_out \"#\"\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add arguments to worker in bin/systemds |
49,706 | 24.08.2020 14:36:41 | -7,200 | ce3c49e65a04e2dc5d807eafe70cc277c4cf657d | [MINOR] DML config enabled for Federated Worker | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysds/api/DMLScript.java",
"diff": "@@ -256,6 +256,7 @@ public class DMLScript\n}\nif (dmlOptions.fedWorker) {\n+ loadConfiguration(fnameOptConfig);\nnew FederatedWorker(dmlOptions.fedWorkerPort).run();\nreturn true;\n}\n@@ -355,6 +356,15 @@ public class DMLScript\n// (core compilation and execute)\n////////\n+ private static void loadConfiguration(String fnameOptConfig) throws IOException {\n+ DMLConfig dmlconf = DMLConfig.readConfigurationFile(fnameOptConfig);\n+ ConfigurationManager.setGlobalConfig(dmlconf);\n+ CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(dmlconf);\n+ ConfigurationManager.setGlobalConfig(cconf);\n+ LOG.debug(\"\\nDML config: \\n\" + dmlconf.getConfigInfo());\n+ setGlobalFlags(dmlconf);\n+ }\n+\n/**\n* The running body of DMLScript execution. This method should be called after execution properties have been correctly set,\n* and customized parameters have been put into _argVals\n@@ -372,13 +382,7 @@ public class DMLScript\nprintStartExecInfo( dmlScriptStr );\n//Step 1: parse configuration files & write any configuration specific global variables\n- DMLConfig dmlconf = DMLConfig.readConfigurationFile(fnameOptConfig);\n- ConfigurationManager.setGlobalConfig(dmlconf);\n- CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(dmlconf);\n- ConfigurationManager.setGlobalConfig(cconf);\n- LOG.debug(\"\\nDML config: \\n\" + dmlconf.getConfigInfo());\n-\n- setGlobalFlags(dmlconf);\n+ loadConfiguration(fnameOptConfig);\n//Step 3: parse dml script\nStatistics.startCompileTimer();\n@@ -392,7 +396,7 @@ public class DMLScript\ndmlt.constructHops(prog);\n//init working directories (before usage by following compilation steps)\n- initHadoopExecution( dmlconf );\n+ initHadoopExecution( ConfigurationManager.getDMLConfig() );\n//Step 5: rewrite HOP DAGs (incl IPA and memory estimates)\ndmlt.rewriteHopsDAG(prog);\n@@ -401,7 +405,7 @@ public class DMLScript\ndmlt.constructLops(prog);\n//Step 7: generate runtime program, incl codegen\n- Program rtprog = dmlt.getRuntimeProgram(prog, dmlconf);\n+ Program rtprog = dmlt.getRuntimeProgram(prog, ConfigurationManager.getDMLConfig());\n//Step 9: prepare statistics [and optional explain output]\n//count number compiled MR jobs / SP instructions\n@@ -421,14 +425,14 @@ public class DMLScript\nExecutionContext ec = null;\ntry {\nec = ExecutionContextFactory.createContext(rtprog);\n- ScriptExecutorUtils.executeRuntimeProgram(rtprog, ec, dmlconf, STATISTICS ? STATISTICS_COUNT : 0, null);\n+ ScriptExecutorUtils.executeRuntimeProgram(rtprog, ec, ConfigurationManager.getDMLConfig(), STATISTICS ? STATISTICS_COUNT : 0, null);\n}\nfinally {\nif(ec != null && ec instanceof SparkExecutionContext)\n((SparkExecutionContext) ec).close();\nLOG.info(\"END DML run \" + getDateTime() );\n//cleanup scratch_space and all working dirs\n- cleanupHadoopExecution( dmlconf );\n+ cleanupHadoopExecution( ConfigurationManager.getDMLConfig());\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] DML config enabled for Federated Worker |
49,706 | 24.08.2020 16:59:26 | -7,200 | 7d2e6bb7e6005637e9afc4c02d3fda5ae0bf34ec | [MINOR] Debug print reading
This commit adds a debug print in reading from file to detect if the
reading is done in parallel.
Also in this commit the delay for worker startup in federated tests is
increased to 2 sec from 1 sec. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/io/MatrixReaderFactory.java",
"new_path": "src/main/java/org/apache/sysds/runtime/io/MatrixReaderFactory.java",
"diff": "package org.apache.sysds.runtime.io;\n-import org.apache.sysds.conf.ConfigurationManager;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysds.common.Types.FileFormat;\nimport org.apache.sysds.conf.CompilerConfig.ConfigType;\n+import org.apache.sysds.conf.ConfigurationManager;\nimport org.apache.sysds.runtime.DMLRuntimeException;\nimport org.apache.sysds.runtime.data.SparseBlock;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\n-public class MatrixReaderFactory\n-{\n+public class MatrixReaderFactory {\n+ private static final Log LOG = LogFactory.getLog(MatrixReaderFactory.class.getName());\npublic static MatrixReader createMatrixReader(FileFormat fmt) {\nMatrixReader reader = null;\nboolean par = ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS);\nboolean mcsr = MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR;\n+ if (LOG.isDebugEnabled()){\n+ LOG.debug(\"reading parallel: \" + par + \" mcsr: \" + mcsr);\n+ }\n+\nswitch(fmt) {\ncase TEXT:\ncase MM:\n@@ -72,6 +78,10 @@ public class MatrixReaderFactory\nboolean par = ConfigurationManager.getCompilerConfigFlag(ConfigType.PARALLEL_CP_READ_TEXTFORMATS);\nboolean mcsr = MatrixBlock.DEFAULT_SPARSEBLOCK == SparseBlock.Type.MCSR;\n+ if (LOG.isDebugEnabled()){\n+ LOG.debug(\"reading parallel: \" + par + \" mcsr: \" + mcsr);\n+ }\n+\nswitch(fmt) {\ncase TEXT:\ncase MM:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -101,7 +101,7 @@ public abstract class AutomatedTestBase {\npublic static final boolean TEST_GPU = false;\npublic static final double GPU_TOLERANCE = 1e-9;\n- public static final int FED_WORKER_WAIT = 1000; // in ms\n+ public static final int FED_WORKER_WAIT = 2000; // in ms\n// With OpenJDK 8u242 on Windows, the new changes in JDK are not allowing\n// to set the native library paths internally thus breaking the code.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Debug print reading
This commit adds a debug print in reading from file to detect if the
reading is done in parallel.
Also in this commit the delay for worker startup in federated tests is
increased to 2 sec from 1 sec. |
49,684 | 25.08.2020 09:26:08 | -7,200 | 46bc56e259207d70ddb3f9252bf58c9172a9d05c | [MINOR] Bug Fix in /bin/systemds
This commit fixes a :bug: in /bin/systemds that made the command for
executing java not print correctly.
closes | [
{
"change_type": "MODIFY",
"old_path": "bin/systemds",
"new_path": "bin/systemds",
"diff": "@@ -283,10 +283,6 @@ if [ $WORKER == 1 ]; then\nprint_out \"#\"\nprint_out \"# starting Fedederated worker on port $PORT\"\nprint_out \"###############################################################################\"\n-\n- print_out \"Executing command: $CMD\"\n- print_out \"\"\n-\nCMD=\" \\\njava $SYSTEMDS_STANDALONE_OPTS \\\n-cp $CLASSPATH \\\n@@ -294,6 +290,8 @@ if [ $WORKER == 1 ]; then\norg.apache.sysds.api.DMLScript \\\n-w $PORT \\\n$*\"\n+ print_out \"Executing command: $CMD\"\n+ print_out \"\"\nelif [ $SYSDS_DISTRIBUTED == 0 ]; then\nprint_out \"#\"\n@@ -314,7 +312,6 @@ else\nprint_out \"#\"\nprint_out \"# Running script $SCRIPT_FILE distributed with opts: $*\"\nprint_out \"###############################################################################\"\n-\nexport SPARK_MAJOR_VERSION=2\nCMD=\" \\\nspark-submit $SYSTEMDS_DISTRIBUTED_OPTS \\\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Bug Fix in /bin/systemds
This commit fixes a :bug: in /bin/systemds that made the command for
executing java not print correctly.
closes #1038 |
49,706 | 25.08.2020 20:58:56 | -7,200 | 60d41a8a774f868812b8ca7cae5c987afc789b1b | [MINOR] Fix read non zeros from mdt file
In federated workers, the IO time was off, compared to local execution.
This was due to a missing reading of the NonZeros.
This commit adds this 1 LINE! | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -195,6 +195,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nnew FederatedWorkerHandlerException(\"Could not parse metadata file\"));\nmc.setRows(mtd.getLong(DataExpression.READROWPARAM));\nmc.setCols(mtd.getLong(DataExpression.READCOLPARAM));\n+ mc.setNonZeros(mtd.getLong(DataExpression.READNNZPARAM));\ncd = (CacheableData<?>) PrivacyPropagator.parseAndSetPrivacyConstraint(cd, mtd);\nfmt = FileFormat.safeValueOf(mtd.getString(DataExpression.FORMAT_TYPE));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix read non zeros from mdt file
In federated workers, the IO time was off, compared to local execution.
This was due to a missing reading of the NonZeros.
This commit adds this 1 LINE! |
49,706 | 26.08.2020 16:43:02 | -7,200 | 9c0d0b731be06f3112661a9c04740b91a3870e7e | [MINOR] Check if NNZ is in mdt before parsing
This commit change the behavior of a federated worker if the
metadata file does not contain the number of non zeros.
It makes it default to parse into a dense representation when the input
is small, and sparse representation if large. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -195,7 +195,18 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nnew FederatedWorkerHandlerException(\"Could not parse metadata file\"));\nmc.setRows(mtd.getLong(DataExpression.READROWPARAM));\nmc.setCols(mtd.getLong(DataExpression.READCOLPARAM));\n+ if(mtd.containsKey(DataExpression.READNNZPARAM)){\nmc.setNonZeros(mtd.getLong(DataExpression.READNNZPARAM));\n+ }\n+ else if (mc.getCols() * mc.getRows() < 8000000){\n+ // force dense allocation.\n+ mc.setNonZeros(mc.getCols() *mc.getRows());\n+ }\n+ else{\n+ // force sparse allocation\n+ mc.setNonZeros((long)(mc.getCols() * mc.getRows() * 0.35));\n+ }\n+\ncd = (CacheableData<?>) PrivacyPropagator.parseAndSetPrivacyConstraint(cd, mtd);\nfmt = FileFormat.safeValueOf(mtd.getString(DataExpression.FORMAT_TYPE));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Check if NNZ is in mdt before parsing
This commit change the behavior of a federated worker if the
metadata file does not contain the number of non zeros.
It makes it default to parse into a dense representation when the input
is small, and sparse representation if large. |
49,738 | 27.08.2020 21:55:33 | -7,200 | 5febadc75be76c4365f20e01364610f283881c1e | [MINOR] Enable multi-threaded I/O and operations for new Python API | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/api/PythonDMLScript.java",
"new_path": "src/main/java/org/apache/sysds/api/PythonDMLScript.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysds.api;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysds.api.jmlc.Connection;\n+import org.apache.sysds.conf.CompilerConfig;\nimport py4j.GatewayServer;\nimport py4j.GatewayServerListener;\n@@ -56,7 +57,17 @@ public class PythonDMLScript {\n}\nprivate PythonDMLScript() {\n- _connection = new Connection();\n+ // we enable multi-threaded I/O and operations for a single JMLC\n+ // connection because the calling Python process is unlikely to run\n+ // multi-threaded streams of operations on the same shared context\n+ _connection = new Connection(\n+ CompilerConfig.ConfigType.PARALLEL_CP_READ_TEXTFORMATS,\n+ CompilerConfig.ConfigType.PARALLEL_CP_WRITE_TEXTFORMATS,\n+ CompilerConfig.ConfigType.PARALLEL_CP_READ_BINARYFORMATS,\n+ CompilerConfig.ConfigType.PARALLEL_CP_WRITE_BINARYFORMATS,\n+ CompilerConfig.ConfigType.PARALLEL_CP_MATRIX_OPERATIONS,\n+ CompilerConfig.ConfigType.PARALLEL_LOCAL_OR_REMOTE_PARFOR,\n+ CompilerConfig.ConfigType.ALLOW_DYN_RECOMPILATION);\n}\npublic Connection getConnection() {\n@@ -108,5 +119,4 @@ class DMLGateWayListener implements GatewayServerListener {\npublic void serverStopped() {\nSystem.out.println(\"GatewayServer Stopped\");\n}\n-\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Enable multi-threaded I/O and operations for new Python API |
49,706 | 28.08.2020 12:17:10 | -7,200 | 39e64c1996650af24963daea08289255cb8cea31 | Python API K-Means algorithm
This commit introduce the K Means algorithm to the python API.
Currently the python interface only allows to return one value not a
list, therefore only the clusters are returned from the call to kmeans. | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/context/systemds_context.py",
"new_path": "src/main/python/systemds/context/systemds_context.py",
"diff": "@@ -106,22 +106,24 @@ class SystemDSContext(object):\nself.java_gateway = JavaGateway(\ngateway_parameters=gateway_parameters, java_process=process)\n- def get_stdout(self, lines: int = 1):\n+ def get_stdout(self, lines: int = -1):\n\"\"\"Getter for the stdout of the java subprocess\nThe output is taken from the stdout queue and returned in a new list.\n- :param lines: The number of lines to try to read from the stdout queue\n+ :param lines: The number of lines to try to read from the stdout queue.\n+ default -1 prints all current lines in the queue.\n\"\"\"\n- if self.__stdout.qsize() < lines:\n+ if lines == -1 or self.__stdout.qsize() < lines:\nreturn [self.__stdout.get() for x in range(self.__stdout.qsize())]\nelse:\nreturn [self.__stdout.get() for x in range(lines)]\n- def get_stderr(self, lines: int = 1):\n+ def get_stderr(self, lines: int = -1):\n\"\"\"Getter for the stderr of the java subprocess\nThe output is taken from the stderr queue and returned in a new list.\n- :param lines: The number of lines to try to read from the stderr queue\n+ :param lines: The number of lines to try to read from the stderr queue.\n+ default -1 prints all current lines in the queue.\n\"\"\"\n- if self.__stderr.qsize() < lines:\n+ if lines == -1 or self.__stderr.qsize() < lines:\nreturn [self.__stderr.get() for x in range(self.__stderr.qsize())]\nelse:\nreturn [self.__stderr.get() for x in range(lines)]\n@@ -148,7 +150,6 @@ class SystemDSContext(object):\nfor line in iter(out.readline, b\"\"):\nqueue.put(line.decode(\"utf-8\").strip())\n-\ndef __get_open_port(self):\n\"\"\"Get a random available port.\"\"\"\n# TODO Verify that it is not taking some critical ports change to select a good port range.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "# -------------------------------------------------------------\nfrom typing import Dict\n+\n+from systemds.operator import OperationNode\nfrom systemds.script_building.dag import DAGNode\nfrom systemds.utils.consts import VALID_INPUT_TYPES\n-from systemds.operator import OperationNode\n__all__ = ['l2svm', 'lm']\n+\ndef l2svm(x: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n\"\"\"\nPerform L2SVM on matrix with labels given.\n@@ -63,3 +65,30 @@ def lm(x :DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Operat\nparams_dict.update(kwargs)\nreturn OperationNode(x.sds_context, 'lm', named_input_nodes=params_dict)\n+\n+def kmeans(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n+ \"\"\"\n+ Perfoms KMeans on matrix input.\n+\n+ :param x: Input dataset to perform K-Means on.\n+ :param k: The Number of centroids to use for the algorithm.\n+ :param runs: The Number of concurrent instances of K-Means to run (with different initial centroids).\n+ :param max_iter: The Maximum number of iterations to run the K-Means algorithm for.\n+ :param eps: Tolerance for the algorithm to declare convergence using WCSS change ratio.\n+ :param is_verbose: Boolean flag if the algorithm should be run in a verbose manner.\n+ :param avg_sample_size_per_centroid: The Average Number of records per centroid in the data samples.\n+ \"\"\"\n+\n+ x._check_matrix_op()\n+ if x._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=x._np_array.shape))\n+\n+ if 'k' in kwargs.keys() and kwargs.get('k') < 1:\n+ raise ValueError(\"Invalid number of clusters in K means, number must be integer above 0\")\n+\n+\n+\n+ params_dict = {'X': x}\n+ params_dict.update(kwargs)\n+ return OperationNode(x.sds_context, 'kmeans', named_input_nodes=params_dict)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "#-------------------------------------------------------------\nfrom typing import Union, Optional, Iterable, Dict, Sequence, Tuple, TYPE_CHECKING\n+from multiprocessing import Process\nimport numpy as np\nfrom py4j.java_gateway import JVMView, JavaObject\n@@ -30,6 +31,8 @@ from systemds.utils.converters import matrix_block_to_numpy\nfrom systemds.script_building.script import DMLScript\nfrom systemds.script_building.dag import OutputType, DAGNode\n+\n+\nif TYPE_CHECKING:\n# to avoid cyclic dependencies during runtime\nfrom systemds.context import SystemDSContext\n@@ -70,6 +73,10 @@ class OperationNode(DAGNode):\ndef compute(self, verbose: bool = False, lineage: bool = False) -> \\\nUnion[float, np.array, Tuple[Union[float, np.array], str]]:\n+\n+\n+\n+\nif self._result_var is None or self._lineage_trace is None:\nself._script = DMLScript(self.sds_context)\nself._script.build_code(self)\n@@ -77,14 +84,21 @@ class OperationNode(DAGNode):\nresult_variables, self._lineage_trace = self._script.execute(lineage)\nelse:\nresult_variables = self._script.execute(lineage)\n+\n+ if verbose:\n+ print(\"SCRIPT:\")\n+ print(self._script.dml_script)\n+\nif self.output_type == OutputType.DOUBLE:\nself._result_var = result_variables.getDouble(self._script.out_var_name)\nelif self.output_type == OutputType.MATRIX:\nself._result_var = matrix_block_to_numpy(self.sds_context.java_gateway.jvm,\nresult_variables.getMatrixBlock(self._script.out_var_name))\nif verbose:\n- print(self._script.dml_script)\n- # TODO further info\n+ for x in self.sds_context.get_stdout():\n+ print(x)\n+ for y in self.sds_context.get_stderr():\n+ print(y)\nif lineage:\nreturn self._result_var, self._lineage_trace\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/algorithms/__init__.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/algorithms/test_kmeans.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+import unittest\n+\n+import numpy as np\n+from systemds.context import SystemDSContext\n+from systemds.matrix import Matrix\n+from systemds.operator.algorithm import kmeans\n+\n+\n+class TestKMeans(unittest.TestCase):\n+\n+ sds: SystemDSContext = None\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ cls.sds = SystemDSContext()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ cls.sds.close()\n+\n+ def test_500x2(self):\n+ \"\"\"\n+ This test is based on statistics, that if we run kmeans, on a normal distributed dataset, centered around 0\n+ and use 4 clusters then they will be located in each one corner.\n+ \"\"\"\n+ features = self.generate_matrices_for_k_means((500, 2), seed=1304)\n+ res = kmeans(features, k=4).compute()\n+\n+ corners = set()\n+ for x in res:\n+ if x[0] > 0 and x[1] > 0:\n+ corners.add(\"pp\")\n+ elif x[0] > 0 and x[1] < 0:\n+ corners.add(\"pn\")\n+ elif x[0] < 0 and x[1] > 0:\n+ corners.add(\"np\")\n+ else:\n+ corners.add(\"nn\")\n+ self.assertTrue(len(corners) == 4)\n+\n+ def test_invalid_input_1(self):\n+ features = Matrix(self.sds, np.array([]))\n+ with self.assertRaises(ValueError) as context:\n+ kmeans(features)\n+\n+ def test_invalid_input_2(self):\n+ features = Matrix(self.sds, np.array([1]))\n+ with self.assertRaises(ValueError) as context:\n+ kmeans(features, k=-1)\n+\n+ def generate_matrices_for_k_means(self, dims: (int, int), seed: int = 1234):\n+ np.random.seed(seed)\n+ mu, sigma = 0, 0.1\n+ s = np.random.normal(mu, sigma, dims[0] * dims[1])\n+ m1 = np.array(s, dtype=np.double)\n+ m1 = np.reshape(m1, (dims[0], dims[1]))\n+\n+ return Matrix(self.sds, m1)\n+\n+\n+if __name__ == \"__main__\":\n+ unittest.main(exit=False)\n"
},
{
"change_type": "RENAME",
"old_path": "src/main/python/tests/test_l2svm.py",
"new_path": "src/main/python/tests/algorithms/test_l2svm.py",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "src/main/python/tests/test_lm.py",
"new_path": "src/main/python/tests/algorithms/test_lm.py",
"diff": ""
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/onnx_systemds/__init__.py",
"new_path": "src/main/python/tests/onnx_systemds/__init__.py",
"diff": "+# -------------------------------------------------------------\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# specific language governing permissions and limitations\n# under the License.\n#\n+# -------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/onnx_systemds/test_simple.py",
"new_path": "src/main/python/tests/onnx_systemds/test_simple.py",
"diff": "-# Licensed to the Apache Software Foundation (ASF) under one or more\n-# contributor license agreements. See the NOTICE file distributed with\n-# this work for additional information regarding copyright ownership.\n-# The ASF licenses this file to you under the Apache License, Version 2.0\n-# (the \"License\"); you may not use this file except in compliance with\n-# the License. You may obtain a copy of the License at\n+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\nimport unittest\nimport tests.onnx_systemds.util as util\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/onnx_systemds/util.py",
"new_path": "src/main/python/tests/onnx_systemds/util.py",
"diff": "-# Licensed to the Apache Software Foundation (ASF) under one or more\n-# contributor license agreements. See the NOTICE file distributed with\n-# this work for additional information regarding copyright ownership.\n-# The ASF licenses this file to you under the Apache License, Version 2.0\n-# (the \"License\"); you may not use this file except in compliance with\n-# the License. You may obtain a copy of the License at\n+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\nimport os\nimport subprocess\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2645] Python API K-Means algorithm
This commit introduce the K Means algorithm to the python API.
Currently the python interface only allows to return one value not a
list, therefore only the clusters are returned from the call to kmeans. |
49,706 | 28.08.2020 13:37:54 | -7,200 | 2f60b71dc43a2ec76ec99af2e2507cddabdc5fe8 | [MINOR] fix federated transform test | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -101,7 +101,7 @@ public abstract class AutomatedTestBase {\npublic static final boolean TEST_GPU = false;\npublic static final double GPU_TOLERANCE = 1e-9;\n- public static final int FED_WORKER_WAIT = 800; // in ms\n+ public static final int FED_WORKER_WAIT = 1000; // in ms\n// With OpenJDK 8u242 on Windows, the new changes in JDK are not allowing\n// to set the native library paths internally thus breaking the code.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/transform/TransformFederatedEncodeDecodeTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/transform/TransformFederatedEncodeDecodeTest.java",
"diff": "@@ -87,18 +87,18 @@ public class TransformFederatedEncodeDecodeTest extends AutomatedTestBase {\nExecMode platformOld = rtplatform;\nrtplatform = ExecMode.SINGLE_NODE;\n- Process t1 = null, t2 = null, t3 = null, t4 = null;\n+ Thread t1 = null, t2 = null, t3 = null, t4 = null;\ntry {\ngetAndLoadTestConfiguration(TEST_NAME1);\nint port1 = getRandomAvailablePort();\n- t1 = startLocalFedWorker(port1);\n+ t1 = startLocalFedWorkerThread(port1);\nint port2 = getRandomAvailablePort();\n- t2 = startLocalFedWorker(port2);\n+ t2 = startLocalFedWorkerThread(port2);\nint port3 = getRandomAvailablePort();\n- t3 = startLocalFedWorker(port3);\n+ t3 = startLocalFedWorkerThread(port3);\nint port4 = getRandomAvailablePort();\n- t4 = startLocalFedWorker(port4);\n+ t4 = startLocalFedWorkerThread(port4);\n// schema\nTypes.ValueType[] schema = new Types.ValueType[cols / 2];\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] fix federated transform test |
49,706 | 28.08.2020 16:37:51 | -7,200 | c0b46b917971567a4bed984b744b46c1a2cf7f87 | Python API MultiLogReg Algorithm | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "@@ -85,7 +85,8 @@ def kmeans(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n.format(s=x._np_array.shape))\nif 'k' in kwargs.keys() and kwargs.get('k') < 1:\n- raise ValueError(\"Invalid number of clusters in K means, number must be integer above 0\")\n+ raise ValueError(\n+ \"Invalid number of clusters in K-Means, number must be integer above 0\")\nparams_dict = {'X': x}\nparams_dict.update(kwargs)\n@@ -108,7 +109,8 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n.format(s=x._np_array.shape))\nif 'K' in kwargs.keys() and kwargs.get('K') < 1:\n- raise ValueError(\"Invalid number of clusters in K means, number must be integer above 0\")\n+ raise ValueError(\n+ \"Invalid number of clusters in K means, number must be integer above 0\")\nif 'scale' in kwargs.keys():\nif kwargs.get('scale') == True:\n@@ -126,3 +128,34 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\nparams_dict.update(kwargs)\nreturn OperationNode(x.sds_context, 'pca', named_input_nodes=params_dict)\n+\n+def multiLogReg(x: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n+ \"\"\"\n+ Performs Multiclass Logistic Regression on the matrix input\n+ using Trust Region method.\n+\n+ See: Trust Region Newton Method for Logistic Regression, Lin, Weng and Keerthi, JMLR 9 (2008) 627-650)\n+\n+ :param x: Input dataset to perform logstic regression on\n+ :param y: Labels rowaligned with the input dataset\n+ :param icpt: Intercept, default 2, Intercept presence, shifting and rescaling X columns:\n+ 0 = no intercept, no shifting, no rescaling;\n+ 1 = add intercept, but neither shift nor rescale X;\n+ 2 = add intercept, shift & rescale X columns to mean = 0, variance = 1\n+ :param tol: float tolerance for the algorithm.\n+ :param reg: Regularization parameter (lambda = 1/C); intercept settings are not regularized.\n+ :param maxi: Maximum outer iterations of the algorithm\n+ :param maxii: Maximum inner iterations of the algorithm\n+ \"\"\"\n+\n+ x._check_matrix_op()\n+ if x._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=x._np_array.shape))\n+ if y._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=y._np_array.shape))\n+\n+ params_dict = {'X': x, 'Y': y}\n+ params_dict.update(kwargs)\n+ return OperationNode(x.sds_context, 'multiLogReg', named_input_nodes=params_dict)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/algorithms/test_multiLogReg.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+import unittest\n+\n+import numpy as np\n+from systemds.context import SystemDSContext\n+from systemds.matrix import Matrix\n+from systemds.operator.algorithm import multiLogReg\n+\n+\n+class TestMultiLogReg(unittest.TestCase):\n+\n+ sds: SystemDSContext = None\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ cls.sds = SystemDSContext()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ cls.sds.close()\n+\n+ def test_simple(self):\n+ \"\"\"\n+ Test simple, if the log reg splits a dataset where everything over 1 is label 1 and under 1 is 0.\n+ \"\"\"\n+ # Generate data\n+ mu, sigma = 1, 0.1\n+ X = np.reshape(np.random.normal(mu, sigma, 500), (2,250))\n+ # All over 1 is true\n+ f = lambda x: x[0] > 1\n+ labels = f(X)\n+ # Y labels as double\n+ Y = np.array(labels, dtype=np.double)\n+ # Transpose X to fit input format.\n+ X = X.transpose()\n+\n+ # Call algorithm\n+ bias = multiLogReg(Matrix(self.sds,X),Matrix(self.sds,Y)).compute()\n+\n+ # Calculate result.\n+ res = np.reshape(np.dot(X, bias[:len(X[0])]) + bias[len(X[0])], (250))\n+\n+ f2 = lambda x: x > 0\n+ accuracy = np.sum(labels == f2(res)) / 250 * 100\n+\n+ self.assertTrue(accuracy > 98)\n+\n+\n+if __name__ == \"__main__\":\n+ unittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2647] Python API MultiLogReg Algorithm |
49,706 | 29.08.2020 14:21:32 | -7,200 | f60b790e3571f401be2626f222aeec4a5a9fe53d | Multi return for algorithms in python
This commit change the python interface to allow multi return builtin
algorithms. Such that when we have many returns, it will parse out each
of these into a list. | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "from typing import Dict\nfrom systemds.operator import OperationNode\n-from systemds.script_building.dag import DAGNode\n+from systemds.script_building.dag import DAGNode, OutputType\nfrom systemds.utils.consts import VALID_INPUT_TYPES\n__all__ = ['l2svm', 'lm', 'kmeans', 'pca']\n@@ -77,6 +77,7 @@ def kmeans(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n:param eps: Tolerance for the algorithm to declare convergence using WCSS change ratio.\n:param is_verbose: Boolean flag if the algorithm should be run in a verbose manner.\n:param avg_sample_size_per_centroid: The average number of records per centroid in the data samples.\n+ :return: `OperationNode` List containing two outputs 1. the clusters, 2 the cluster ID associated with each row in x.\n\"\"\"\nx._check_matrix_op()\n@@ -90,7 +91,7 @@ def kmeans(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\nparams_dict = {'X': x}\nparams_dict.update(kwargs)\n- return OperationNode(x.sds_context, 'kmeans', named_input_nodes=params_dict)\n+ return OperationNode(x.sds_context, 'kmeans', named_input_nodes=params_dict, output_type=OutputType.LIST, number_of_outputs=2)\ndef pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n@@ -101,6 +102,7 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n:param K: The number of reduced dimensions.\n:param center: Boolean specifying if the input values should be centered.\n:param scale: Boolean specifying if the input values should be scaled.\n+ :return: `OperationNode` List containing two outputs 1. The dimensionality reduced X input, 2. A matrix to reduce dimensionality similarly on unseen data.\n\"\"\"\nx._check_matrix_op()\n@@ -126,7 +128,7 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\nparams_dict = {'X': x}\nparams_dict.update(kwargs)\n- return OperationNode(x.sds_context, 'pca', named_input_nodes=params_dict)\n+ return OperationNode(x.sds_context, 'pca', named_input_nodes=params_dict, output_type=OutputType.LIST, number_of_outputs=2)\ndef multiLogReg(x: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n@@ -146,6 +148,7 @@ def multiLogReg(x: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES])\n:param reg: Regularization parameter (lambda = 1/C); intercept settings are not regularized.\n:param maxi: Maximum outer iterations of the algorithm\n:param maxii: Maximum inner iterations of the algorithm\n+ :return: `OperationNode` of a matrix containing the regression parameters trained.\n\"\"\"\nx._check_matrix_op()\n@@ -159,3 +162,35 @@ def multiLogReg(x: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES])\nparams_dict = {'X': x, 'Y': y}\nparams_dict.update(kwargs)\nreturn OperationNode(x.sds_context, 'multiLogReg', named_input_nodes=params_dict)\n+\n+\n+def multiLogRegPredict(x: DAGNode, b: DAGNode, y: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n+ \"\"\"\n+ Performs prediction on input data x using the model trained, b.\n+\n+ :param x: The data to perform classification on.\n+ :param b: The regression parameters trained from multiLogReg.\n+ :param y: The Labels expected to be contained in the X dataset, to calculate accuracy.\n+ :param verbose: Boolean specifying if the prediction should be verbose.\n+ :return: `OperationNode` List containing three outputs.\n+ 1. The predicted means / probabilities\n+ 2. The predicted response vector\n+ 3. The scalar value of accuracy\n+ \"\"\"\n+\n+ x._check_matrix_op()\n+ b._check_matrix_op()\n+ y._check_matrix_op()\n+ if x._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=x._np_array.shape))\n+ if b._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=y._np_array.shape))\n+ if y._np_array.size == 0:\n+ raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n+ .format(s=y._np_array.shape))\n+\n+ params_dict = {'X': x, 'B': b, 'Y': y}\n+ params_dict.update(kwargs)\n+ return OperationNode(x.sds_context, 'multiLogRegPredict', named_input_nodes=params_dict, output_type=OutputType.LIST, number_of_outputs=3, output_types=[OutputType.MATRIX,OutputType.MATRIX,OutputType.DOUBLE])\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "@@ -32,21 +32,25 @@ from systemds.script_building.script import DMLScript\nfrom systemds.script_building.dag import OutputType, DAGNode\n-\nif TYPE_CHECKING:\n# to avoid cyclic dependencies during runtime\nfrom systemds.context import SystemDSContext\n+\nclass OperationNode(DAGNode):\n\"\"\"A Node representing an operation in SystemDS\"\"\"\n_result_var: Optional[Union[float, np.array]]\n_lineage_trace: Optional[str]\n_script: Optional[DMLScript]\n+ _output_types: Optional[Iterable[VALID_INPUT_TYPES]]\ndef __init__(self, sds_context: 'SystemDSContext', operation: str,\nunnamed_input_nodes: Iterable[VALID_INPUT_TYPES] = None,\nnamed_input_nodes: Dict[str, VALID_INPUT_TYPES] = None,\n- output_type: OutputType = OutputType.MATRIX, is_python_local_data: bool = False):\n+ output_type: OutputType = OutputType.MATRIX,\n+ is_python_local_data: bool = False,\n+ number_of_outputs=1,\n+ output_types: Iterable[OutputType] = None):\n\"\"\"\nCreate general `OperationNode`\n@@ -55,7 +59,11 @@ class OperationNode(DAGNode):\n:param unnamed_input_nodes: inputs identified by their position, not name\n:param named_input_nodes: inputs with their respective parameter name\n:param output_type: type of the output in DML (double, matrix etc.)\n- :param is_python_local_data: if the data is local in python e.g. numpy arrays\n+ :param is_python_local_data: if the data is local in python e.g. Numpy arrays\n+ :param number_of_outputs: If set to other value than 1 then it is expected\n+ that this operation node returns multiple values. If set remember to set the output_types value as well.\n+ :param output_types: The types of output in a multi output scenario.\n+ Default is None, and means every multi output is a matrix.\n\"\"\"\nself.sds_context = sds_context\nif unnamed_input_nodes is None:\n@@ -70,30 +78,43 @@ class OperationNode(DAGNode):\nself._result_var = None\nself._lineage_trace = None\nself._script = None\n+ self._number_of_outputs = number_of_outputs\n+ self._output_types = output_types\ndef compute(self, verbose: bool = False, lineage: bool = False) -> \\\nUnion[float, np.array, Tuple[Union[float, np.array], str]]:\n-\n-\n-\nif self._result_var is None or self._lineage_trace is None:\nself._script = DMLScript(self.sds_context)\nself._script.build_code(self)\n- if lineage:\n- result_variables, self._lineage_trace = self._script.execute(lineage)\n- else:\n- result_variables = self._script.execute(lineage)\n-\nif verbose:\nprint(\"SCRIPT:\")\nprint(self._script.dml_script)\n+ if lineage:\n+ result_variables, self._lineage_trace = self._script.execute(\n+ lineage)\n+ else:\n+ result_variables = self._script.execute(lineage)\n+\nif self.output_type == OutputType.DOUBLE:\n- self._result_var = result_variables.getDouble(self._script.out_var_name)\n+ self._result_var = result_variables.getDouble(\n+ self._script.out_var_name[0])\nelif self.output_type == OutputType.MATRIX:\nself._result_var = matrix_block_to_numpy(self.sds_context.java_gateway.jvm,\n- result_variables.getMatrixBlock(self._script.out_var_name))\n+ result_variables.getMatrixBlock(self._script.out_var_name[0]))\n+ elif self.output_type == OutputType.LIST:\n+ self._result_var = []\n+ for idx, v in enumerate(self._script.out_var_name):\n+ if(self._output_types == None):\n+ self._result_var.append(matrix_block_to_numpy(self.sds_context.java_gateway.jvm,\n+ result_variables.getMatrixBlock(v)))\n+ elif(self._output_types[idx] == OutputType.MATRIX):\n+ self._result_var.append(matrix_block_to_numpy(self.sds_context.java_gateway.jvm,\n+ result_variables.getMatrixBlock(v)))\n+ else:\n+ self._result_var.append(result_variables.getDouble(\n+ self._script.out_var_name[idx]))\nif verbose:\nfor x in self.sds_context.get_stdout():\nprint(x)\n@@ -120,15 +141,27 @@ class OperationNode(DAGNode):\ndef code_line(self, var_name: str, unnamed_input_vars: Sequence[str],\nnamed_input_vars: Dict[str, str]) -> str:\nif self.operation in BINARY_OPERATIONS:\n- assert len(named_input_vars) == 0, 'Named parameters can not be used with binary operations'\n- assert len(unnamed_input_vars) == 2, 'Binary Operations need exactly two input variables'\n+ assert len(\n+ named_input_vars) == 0, 'Named parameters can not be used with binary operations'\n+ assert len(\n+ unnamed_input_vars) == 2, 'Binary Operations need exactly two input variables'\nreturn f'{var_name}={unnamed_input_vars[0]}{self.operation}{unnamed_input_vars[1]}'\n+\n+ inputs_comma_sep = create_params_string(\n+ unnamed_input_vars, named_input_vars)\n+\n+ if self.output_type == OutputType.LIST:\n+ output = \"[\"\n+ for idx in range(self._number_of_outputs):\n+ output += f'{var_name}_{idx},'\n+ output = output[:-1] + \"]\"\n+ return f'{output}={self.operation}({inputs_comma_sep});'\nelse:\n- inputs_comma_sep = create_params_string(unnamed_input_vars, named_input_vars)\nreturn f'{var_name}={self.operation}({inputs_comma_sep});'\ndef pass_python_data_to_prepared_script(self, jvm: JVMView, var_name: str, prepared_script: JavaObject) -> None:\n- raise NotImplementedError('Operation node has no python local data. Missing implementation in derived class?')\n+ raise NotImplementedError(\n+ 'Operation node has no python local data. Missing implementation in derived class?')\ndef _check_matrix_op(self):\n\"\"\"Perform checks to assure operation is allowed to be performed on data type of this `OperationNode`\n@@ -137,40 +170,40 @@ class OperationNode(DAGNode):\n\"\"\"\nassert self.output_type == OutputType.MATRIX, f'{self.operation} only supported for matrices'\n- def __add__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __add__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '+', [self, other])\n- def __sub__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __sub__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '-', [self, other])\n- def __mul__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __mul__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '*', [self, other])\n- def __truediv__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __truediv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '/', [self, other])\n- def __floordiv__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __floordiv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '//', [self, other])\ndef __lt__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '<', [self, other])\n- def __le__(self, other):\n+ def __le__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '<=', [self, other])\n- def __gt__(self, other):\n+ def __gt__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '>', [self, other])\n- def __ge__(self, other):\n+ def __ge__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '>=', [self, other])\n- def __eq__(self, other):\n+ def __eq__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '==', [self, other])\n- def __ne__(self, other):\n+ def __ne__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '!=', [self, other])\n- def __matmul__(self, other: VALID_ARITHMETIC_TYPES):\n+ def __matmul__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '%*%', [self, other])\ndef sum(self, axis: int = None) -> 'OperationNode':\n@@ -186,7 +219,8 @@ class OperationNode(DAGNode):\nreturn OperationNode(self.sds_context, 'rowSums', [self])\nelif axis is None:\nreturn OperationNode(self.sds_context, 'sum', [self], output_type=OutputType.DOUBLE)\n- raise ValueError(f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\n+ raise ValueError(\n+ f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\ndef mean(self, axis: int = None) -> 'OperationNode':\n\"\"\"Calculate mean of matrix.\n@@ -201,7 +235,8 @@ class OperationNode(DAGNode):\nreturn OperationNode(self.sds_context, 'rowMeans', [self])\nelif axis is None:\nreturn OperationNode(self.sds_context, 'mean', [self], output_type=OutputType.DOUBLE)\n- raise ValueError(f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\n+ raise ValueError(\n+ f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\ndef var(self, axis: int = None) -> 'OperationNode':\n\"\"\"Calculate variance of matrix.\n@@ -216,7 +251,8 @@ class OperationNode(DAGNode):\nreturn OperationNode(self.sds_context, 'rowVars', [self])\nelif axis is None:\nreturn OperationNode(self.sds_context, 'var', [self], output_type=OutputType.DOUBLE)\n- raise ValueError(f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\n+ raise ValueError(\n+ f\"Axis has to be either 0, 1 or None, for column, row or complete {self.operation}\")\ndef abs(self) -> 'OperationNode':\n\"\"\"Calculate absolute.\n@@ -287,14 +323,6 @@ class OperationNode(DAGNode):\n:return: `OperationNode` representing operation\n\"\"\"\nreturn OperationNode(self.sds_context, 'tanh', [self])\n- '''\n- def rev(self) -> 'OperationNode':\n- \"\"\"Calculate tan.\n-\n- :return: `OperationNode` representing operation\n- \"\"\"\n- return OperationNode(self.sds_context, 'rev', [self])\n- '''\ndef moment(self, moment, weights: DAGNode = None) -> 'OperationNode':\n# TODO write tests\n@@ -304,19 +332,3 @@ class OperationNode(DAGNode):\nunnamed_inputs.append(weights)\nunnamed_inputs.append(moment)\nreturn OperationNode(self.sds_context, 'moment', unnamed_inputs, output_type=OutputType.DOUBLE)\n-\n- def lm(self, y: DAGNode, **kwargs) -> 'OperationNode':\n- self._check_matrix_op()\n-\n- if self._np_array.size == 0:\n- raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n- .format(s=self._np_array.shape))\n-\n- if y._np_array.size == 0:\n- raise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n- .format(s=y._np_array.shape))\n-\n- params_dict = {'X': self, 'y': y}\n- params_dict.update(kwargs)\n-\n- return OperationNode(self.sds_context, 'lm', named_input_nodes=params_dict)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/script_building/dag.py",
"new_path": "src/main/python/systemds/script_building/dag.py",
"diff": "@@ -33,6 +33,7 @@ if TYPE_CHECKING:\nclass OutputType(Enum):\nMATRIX = auto()\nDOUBLE = auto()\n+ LIST = auto()\nclass DAGNode(ABC):\n@@ -42,6 +43,7 @@ class DAGNode(ABC):\n_named_input_nodes: Dict[str, Union['DAGNode', str, int, float, bool]]\n_output_type: OutputType\n_is_python_local_data: bool\n+ _number_of_outputs: int\ndef compute(self, verbose: bool = False, lineage: bool = False) -> Any:\n\"\"\"Get result of this operation. Builds the dml script and executes it in SystemDS, before this method is called\n@@ -90,3 +92,7 @@ class DAGNode(ABC):\n@property\ndef is_python_local_data(self):\nreturn self._is_python_local_data\n+\n+ @property\n+ def number_of_outputs(self):\n+ return self._number_of_outputs\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/script_building/script.py",
"new_path": "src/main/python/systemds/script_building/script.py",
"diff": "@@ -38,8 +38,6 @@ class DMLScript:\nTODO caching\n- TODO multiple outputs\n-\nTODO rerun with different inputs without recompilation\n\"\"\"\nsds_context: 'SystemDSContext'\n@@ -54,7 +52,7 @@ class DMLScript:\nself.dml_script = ''\nself.inputs = {}\nself.prepared_script = None\n- self.out_var_name = ''\n+ self.out_var_name = []\nself._variable_counter = 0\ndef add_code(self, code: str) -> None:\n@@ -88,7 +86,7 @@ class DMLScript:\nself.prepared_script = connection.prepareScript(\nself.dml_script,\n_list_to_java_array(gateway, input_names),\n- _list_to_java_array(gateway, [self.out_var_name]))\n+ _list_to_java_array(gateway, self.out_var_name))\nfor (name, input_node) in self.inputs.items():\ninput_node.pass_python_data_to_prepared_script(\ngateway.jvm, name, self.prepared_script)\n@@ -98,7 +96,13 @@ class DMLScript:\nret = self.prepared_script.executeScript()\nif lineage:\n- return ret, self.prepared_script.getLineageTrace(self.out_var_name)\n+ if len(self.out_var_name) == 1:\n+ return ret, self.prepared_script.getLineageTrace(self.out_var_name[0])\n+ else:\n+ traces = []\n+ for output in self.out_var_name:\n+ traces.append(self.prepared_script.getLineageTrace(output))\n+ return ret, traces\nreturn ret\n@@ -111,7 +115,7 @@ class DMLScript:\nself.prepared_script = connection.prepareScript(\nself.dml_script,\n_list_to_java_array(gateway, input_names),\n- _list_to_java_array(gateway, [self.out_var_name]))\n+ _list_to_java_array(gateway, self.out_var_name))\nfor (name, input_node) in self.inputs.items():\ninput_node.pass_python_data_to_prepared_script(\ngateway.jvm, name, self.prepared_script)\n@@ -119,16 +123,29 @@ class DMLScript:\nconnection.setLineage(True)\nself.prepared_script.executeScript()\n- lineage = self.prepared_script.getLineageTrace(self.out_var_name)\n- return lineage\n+ if len(self.out_var_name) == 1:\n+ return self.prepared_script.getLineageTrace(self.out_var_name[0])\n+ else:\n+ traces = []\n+ for output in self.out_var_name:\n+ traces.append(self.prepared_script.getLineageTrace(output))\n+ return traces\n+\ndef build_code(self, dag_root: DAGNode) -> None:\n\"\"\"Builds code from our DAG\n:param dag_root: the topmost operation of our DAG, result of operation will be output\n\"\"\"\n- self.out_var_name = self._dfs_dag_nodes(dag_root)\n- self.add_code(f'write({self.out_var_name}, \\'./tmp\\');')\n+ baseOutVarString = self._dfs_dag_nodes(dag_root)\n+ if(dag_root.number_of_outputs > 1):\n+ self.out_var_name = []\n+ for idx in range(dag_root.number_of_outputs):\n+ self.add_code(f'write({baseOutVarString}_{idx}, \\'./tmp_{idx}\\');')\n+ self.out_var_name.append(f'{baseOutVarString}_{idx}')\n+ else:\n+ self.out_var_name.append(baseOutVarString)\n+ self.add_code(f'write({baseOutVarString}, \\'./tmp\\');')\ndef _dfs_dag_nodes(self, dag_node: VALID_INPUT_TYPES) -> str:\n\"\"\"Uses Depth-First-Search to create code from DAG\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/algorithms/test_kmeans.py",
"new_path": "src/main/python/tests/algorithms/test_kmeans.py",
"diff": "@@ -45,7 +45,7 @@ class TestKMeans(unittest.TestCase):\nand use 4 clusters then they will be located in each one corner.\n\"\"\"\nfeatures = self.generate_matrices_for_k_means((500, 2), seed=1304)\n- res = kmeans(features, k=4).compute()\n+ [res, classifications] = kmeans(features, k=4).compute()\ncorners = set()\nfor x in res:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/algorithms/test_multiLogReg.py",
"new_path": "src/main/python/tests/algorithms/test_multiLogReg.py",
"diff": "@@ -24,7 +24,7 @@ import unittest\nimport numpy as np\nfrom systemds.context import SystemDSContext\nfrom systemds.matrix import Matrix\n-from systemds.operator.algorithm import multiLogReg\n+from systemds.operator.algorithm import multiLogReg, multiLogRegPredict\nclass TestMultiLogReg(unittest.TestCase):\n@@ -41,30 +41,48 @@ class TestMultiLogReg(unittest.TestCase):\ndef test_simple(self):\n\"\"\"\n- Test simple, if the log reg splits a dataset where everything over 1 is label 1 and under 1 is 0.\n+ Test simple, if the log reg splits a dataset where everything over 1 is label 2 and under 1 is 1.\n+ With manual classification.\n\"\"\"\n- # Generate data\n- mu, sigma = 1, 0.1\n- X = np.reshape(np.random.normal(mu, sigma, 500), (2,250))\n- # All over 1 is true\n- f = lambda x: x[0] > 1\n- labels = f(X)\n- # Y labels as double\n- Y = np.array(labels, dtype=np.double)\n- # Transpose X to fit input format.\n- X = X.transpose()\n+ [X, labels, Y] = self.gen_data()\n# Call algorithm\nbias = multiLogReg(Matrix(self.sds,X),Matrix(self.sds,Y)).compute()\n# Calculate result.\nres = np.reshape(np.dot(X, bias[:len(X[0])]) + bias[len(X[0])], (250))\n-\n- f2 = lambda x: x > 0\n+ f2 = lambda x: (x < 0) + 1\naccuracy = np.sum(labels == f2(res)) / 250 * 100\nself.assertTrue(accuracy > 98)\n+ def test_using_predict(self):\n+ \"\"\"\n+ Test the algorithm using the predict function.\n+ With builtin classification\n+ \"\"\"\n+ [X, labels, Y] = self.gen_data()\n+ # Call algorithm\n+ bias = multiLogReg(Matrix(self.sds,X),Matrix(self.sds,Y)).compute()\n+\n+ [m, y_pred, acc] = multiLogRegPredict(Matrix(self.sds,X),Matrix(self.sds,bias), Matrix(self.sds,Y)).compute()\n+\n+ self.assertTrue(acc > 98)\n+\n+\n+ def gen_data(self):\n+ np.random.seed(13241)\n+ # Generate data\n+ mu, sigma = 1, 0.1\n+ X = np.reshape(np.random.normal(mu, sigma, 500), (2,250))\n+ # All over 1 is true\n+ f = lambda x: (x[0] > 1) + 1\n+ labels = f(X)\n+ # Y labels as double\n+ Y = np.array(labels, dtype=np.double)\n+ # Transpose X to fit input format.\n+ X = X.transpose()\n+ return X, labels, Y\nif __name__ == \"__main__\":\nunittest.main(exit=False)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/algorithms/test_pca.py",
"new_path": "src/main/python/tests/algorithms/test_pca.py",
"diff": "@@ -49,7 +49,7 @@ class TestPCA(unittest.TestCase):\nm1 = self.generate_matrices_for_pca(30, seed=1304)\nX = Matrix(self.sds, m1)\n# print(features)\n- res = pca(X, K=1, scale=\"FALSE\", center=\"FALSE\").compute(verbose=True)\n+ [res, model] = pca(X, K=1, scale=\"FALSE\", center=\"FALSE\").compute()\nfor (x, y) in zip(m1, res):\nself.assertTrue((x[0] > 0 and y > 0) or (x[0] < 0 and y < 0))\n@@ -58,7 +58,7 @@ class TestPCA(unittest.TestCase):\nline of numbers. Here the pca should return values that are double or close to double of the last value\n\"\"\"\nm1 = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n- res = pca(Matrix(self.sds, m1), K=1,\n+ [res, model] = pca(Matrix(self.sds, m1), K=1,\nscale=False, center=False).compute()\nfor x in range(len(m1) - 1):\nself.assertTrue(abs(res[x + 1] - res[0] * (x + 2)) < 0.001)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2648] Multi return for algorithms in python
This commit change the python interface to allow multi return builtin
algorithms. Such that when we have many returns, it will parse out each
of these into a list. |
49,720 | 01.09.2020 12:23:38 | -7,200 | b3ef333a164a279abeccad50ca0fab268a308a3e | [MINOR] cleanup map built-in | [
{
"change_type": "MODIFY",
"old_path": "docs/site/dml-language-reference.md",
"new_path": "docs/site/dml-language-reference.md",
"diff": "@@ -2026,13 +2026,13 @@ The following example uses <code>transformapply()</code> with the input matrix a\n### Processing Frames\n-Built-In functions <code>dml_map()</code> is supported for frames to execute any arbitrary Java code on a frame.\n+The built-in function <code>map()</code> provides support for the lambda expressions.\n-**Table F5**: Frame dml_map Built-In Function\n+**Table F5**: Frame map built-in function\nFunction | Description | Parameters | Example\n-------- | ----------- | ---------- | -------\n-dml_map() | It will execute the given java code on a frame (column-vector).| Input: (X <frame>, y <String>) <br/>Output: <frame>. <br/> X is a frame and y is a String containing the Java code to be executed on frame X. where X is a column vector. | X = read(\"file1\", data_type=\"frame\", rows=2, cols=3, format=\"binary\") <br/> y = \"Java code\" <br/> Z = dml_map(X, y) <br/> # Dimensions of Z = Dimensions of X; <br/> example: Z = dml_map(X, \"x.charAt(2)\")\n+map() | It will execute the given lambda expression on a frame.| Input: (X <frame>, y <String>) <br/>Output: <frame>. <br/> X is a frame and y is a String containing the lambda expression to be executed on frame X. | X = read(\"file1\", data_type=\"frame\", rows=2, cols=3, format=\"binary\") <br/> y = \"lambda expression\" <br/> Z = map(X, y) <br/> # Dimensions of Z = Dimensions of X; <br/> example: Z = map(X, \"x -> x.charAt(2)\")\nExample let X =\n##### FRAME: nrow = 10, ncol = 1 <br/>\n@@ -2049,7 +2049,7 @@ Example let X =\nwest\neast\n-Z = dml_map(X, \"x.toUpperCase()\") <br/>\n+Z = map(X, \"x -> x.toUpperCase()\") <br/>\nprint(toString(Z))\n##### FRAME: nrow = 10, ncol = 1 <br/>\n# C1\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Types.java",
"new_path": "src/main/java/org/apache/sysds/common/Types.java",
"diff": "@@ -317,7 +317,7 @@ public class Types\ncase BITWSHIFTR: return \"bitwShiftR\";\ncase DROP_INVALID_TYPE: return \"dropInvalidType\";\ncase DROP_INVALID_LENGTH: return \"dropInvalidLength\";\n- case MAP: return \"dml_map\";\n+ case MAP: return \"_map\";\ndefault: return name().toLowerCase();\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/functionobjects/Builtin.java",
"new_path": "src/main/java/org/apache/sysds/runtime/functionobjects/Builtin.java",
"diff": "@@ -50,7 +50,7 @@ public class Builtin extends ValueFunction\npublic enum BuiltinCode { SIN, COS, TAN, SINH, COSH, TANH, ASIN, ACOS, ATAN, LOG, LOG_NZ, MIN,\nMAX, ABS, SIGN, SQRT, EXP, PLOGP, PRINT, PRINTF, NROW, NCOL, LENGTH, LINEAGE, ROUND, MAXINDEX, MININDEX,\nSTOP, CEIL, FLOOR, CUMSUM, CUMPROD, CUMMIN, CUMMAX, CUMSUMPROD, INVERSE, SPROP, SIGMOID, EVAL, LIST,\n- TYPEOF, DETECTSCHEMA, ISNA, ISNAN, ISINF, DROP_INVALID_TYPE, DROP_INVALID_LENGTH, DML_MAP,\n+ TYPEOF, DETECTSCHEMA, ISNA, ISNAN, ISINF, DROP_INVALID_TYPE, DROP_INVALID_LENGTH, MAP,\nCOUNT_DISTINCT, COUNT_DISTINCT_APPROX}\n@@ -107,7 +107,7 @@ public class Builtin extends ValueFunction\nString2BuiltinCode.put( \"isinf\", BuiltinCode.ISINF);\nString2BuiltinCode.put( \"dropInvalidType\", BuiltinCode.DROP_INVALID_TYPE);\nString2BuiltinCode.put( \"dropInvalidLength\", BuiltinCode.DROP_INVALID_LENGTH);\n- String2BuiltinCode.put( \"dml_map\", BuiltinCode.DML_MAP);\n+ String2BuiltinCode.put( \"_map\", BuiltinCode.MAP);\n}\nprivate Builtin(BuiltinCode bf) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/CPInstructionParser.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/CPInstructionParser.java",
"diff": "@@ -154,7 +154,7 @@ public class CPInstructionParser extends InstructionParser\nString2CPInstructionType.put( \"min\" , CPType.Binary);\nString2CPInstructionType.put( \"dropInvalidType\" , CPType.Binary);\nString2CPInstructionType.put( \"dropInvalidLength\" , CPType.Binary);\n- String2CPInstructionType.put( \"dml_map\" , CPType.Binary);\n+ String2CPInstructionType.put( \"_map\" , CPType.Binary); // _map represents the operation map\nString2CPInstructionType.put( \"nmax\", CPType.BuiltinNary);\nString2CPInstructionType.put( \"nmin\", CPType.BuiltinNary);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/SPInstructionParser.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/SPInstructionParser.java",
"diff": "@@ -179,7 +179,7 @@ public class SPInstructionParser extends InstructionParser\nString2SPInstructionType.put( \"map-*\", SPType.Binary);\nString2SPInstructionType.put( \"dropInvalidType\", SPType.Binary);\nString2SPInstructionType.put( \"mapdropInvalidLength\", SPType.Binary);\n- String2SPInstructionType.put( \"dml_map\", SPType.Binary);\n+ String2SPInstructionType.put( \"_map\", SPType.Binary); // _map refers to the operation map\n// Relational Instruction Opcodes\nString2SPInstructionType.put( \"==\" , SPType.Binary);\nString2SPInstructionType.put( \"!=\" , SPType.Binary);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/binary/frame/FrameMapTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/binary/frame/FrameMapTest.java",
"diff": "@@ -33,7 +33,7 @@ import org.junit.BeforeClass;\nimport org.junit.Test;\npublic class FrameMapTest extends AutomatedTestBase {\n- private final static String TEST_NAME = \"dmlMap\";\n+ private final static String TEST_NAME = \"map\";\nprivate final static String TEST_DIR = \"functions/binary/frame/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + FrameMapTest.class.getSimpleName() + \"/\";\n"
},
{
"change_type": "RENAME",
"old_path": "src/test/scripts/functions/binary/frame/dmlMap.dml",
"new_path": "src/test/scripts/functions/binary/frame/map.dml",
"diff": ""
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] cleanup map built-in |
49,720 | 01.09.2020 22:47:36 | -7,200 | 36eaaeb961130471c7d8f19456a7848312ff25b5 | imputeByFD now accepts the matrix input
The initial version of imputeByFD accepts the frame input then internally recodes the
frame and performs imputations. Now, the method accepts the matrix input
(recoded matrix for non-numeric data) and directly perform imputations on matrix values. | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/imputeByFD.dml",
"new_path": "scripts/builtin/imputeByFD.dml",
"diff": "# ---------------------------------------------------------------------------------------------\n# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n-# F String -- Data frame\n+# X Double -- Matrix X\n# source Integer -- source attribute to use for imputation and error correction\n# target Integer -- attribute to be fixed\n# threshold Double -- threshold value in interval [0, 1] for robust FDs\n# ---------------------------------------------------------------------------------------------\n# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n-# imputed_F String --- Frame with possible imputations\n+# X Double --- Matrix with possible imputations\n-s_imputeByFD = function(Frame[String] F, Integer sourceAttribute, Integer targetAttribute, Double threshold)\n- return(Frame[String] imputed_F)\n+m_imputeByFD = function(Matrix[Double] X, Integer sourceAttribute, Integer targetAttribute, Double threshold)\n+ return(Matrix[Double] X)\n{\n-\n# sanity checks\nif( threshold < 0 | threshold > 1 )\nstop(\"Stopping due to invalid input, threshold required in interval [0, 1] found \"+threshold)\n- if(sourceAttribute < 0 | sourceAttribute > ncol(F) | targetAttribute < 0 | targetAttribute > ncol(F))\n+ if(sourceAttribute < 0 | sourceAttribute > ncol(X) | targetAttribute < 0 | targetAttribute > ncol(X))\nstop(\"Stopping due to invalid source and target\")\n-\n- # detect schema for transformation\n- schema = detectSchema(F)\n- s=\"\"\n- for(i in 1: ncol(F)) {\n- if(as.scalar(schema[1,i]) == \"STRING\" | as.scalar(schema[1,i]) == \"BOOLEAN\" )\n- s = s+as.integer(i)+\",\";\n- }\n-\n- # recode data frame\n- jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n- [X, M] = transformencode(target=F, spec=jspecR);\n-\n# impute missing values and fix errors\nX[,targetAttribute] = imputeAndCorrect(X[,sourceAttribute], X[,targetAttribute], threshold)\n-\n- # getting the actual data back\n- dF = transformdecode(target=X, spec=jspecR, meta=M);\n- imputed_F = dF;\n}\nimputeAndCorrect = function(Matrix[Double] X, Matrix[Double] Y, Double threshold)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"new_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"diff": "#\n#-------------------------------------------------------------\n-X = read($1, data_type=\"frame\", format=\"csv\", header=FALSE);\n+F = read($1, data_type=\"frame\", format=\"csv\", header=FALSE);\n+# as the method accepts the matrix so convert the non-numeric data into matrix\n+\n+# detect schema for transformation\n+schema = detectSchema(F)\n+s=\"\"\n+for(i in 1: ncol(F)) {\n+ if(as.scalar(schema[1,i]) == \"STRING\" | as.scalar(schema[1,i]) == \"BOOLEAN\" )\n+ s = s+as.integer(i)+\",\";\n+}\n+\n+# recode data frame\n+jspecR = \"{ids:true, recode:[\"+s+\"]}\";\n+[X, M] = transformencode(target=F, spec=jspecR);\n+# call the method\nY = imputeByFD(X, $2, $3, $4);\n-write(Y, $5, format=\"binary\")\n+\n+# getting the actual data back\n+dF = transformdecode(target=Y, spec=jspecR, meta=M);\n+\n+write(dF, $5, format=\"binary\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2659] imputeByFD now accepts the matrix input
The initial version of imputeByFD accepts the frame input then internally recodes the
frame and performs imputations. Now, the method accepts the matrix input
(recoded matrix for non-numeric data) and directly perform imputations on matrix values. |
49,720 | 02.09.2020 14:20:50 | -7,200 | f4cc41c8c90dc9787573af36db69cbd3b66a6bd2 | Synthetic Minority Over-sampling Technique (SMOTE)
Technique for handling unbalance classes by oversampling the minority class
Date: Wed Sep 2 14:16:32 2020 +0200
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/smote.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+# Builtin function for handing class imbalance using Synthetic Minority Over-sampling Technique (SMOTE)\n+#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X Double --- Matrix of minority class samples\n+# s Integer 25 Amount of SMOTE (percentage of oversampling), integral multiple of 100\n+# k Integer 1 Number of nearest neighbour\n+# ---------------------------------------------------------------------------------------------\n+\n+\n+#Output(s)\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# Y Double --- Matrix of (N/100)-1 * nrow(X) synthetic minority class samples\n+\n+m_smote = function(Matrix[Double] X, Integer s = 200, Integer k = 1, Boolean verbose = FALSE)\n+return (Matrix[Double] Y) {\n+\n+ if(s < 100 | (s%%100) != 0)\n+ {\n+ print(\"the number of samples should be an integral multiple of 100. Setting s = 100\")\n+ s = 100\n+ }\n+ # matrix to keep the index of KNN for each minority sample\n+ knn_index = matrix(0,k,0)\n+ # find nearest neighbour\n+ for(i in 1:nrow(X))\n+ {\n+ knn = nn(X, X[i, ], k)\n+ knn_index = cbind(knn_index, knn)\n+ }\n+\n+ # number of synthetic samples from each minority class sample\n+ iter = (s/100)-1\n+ # matrix to store synthetic samples\n+ synthetic_samples = matrix(0, 0, ncol(X))\n+ while(iter > 0)\n+ {\n+ # generate a random number\n+ # TODO avoid duplicate random numbers\n+ rand_index = as.integer(as.scalar(Rand(rows=1, cols=1, min=1, max=k)))\n+ # pick the random NN\n+ knn_sample = knn_index[rand_index,]\n+ # generate sample\n+ for(i in 1:ncol(knn_index))\n+ {\n+ index = as.scalar(knn_sample[1,i])\n+ X_diff = X[index,] - X[i, ]\n+ gap = as.scalar(Rand(rows=1, cols=1, min=0, max=1))\n+ X_sys = X[i, ] + (gap*X_diff)\n+ synthetic_samples = rbind(synthetic_samples, X_sys)\n+ }\n+ iter = iter - 1\n+ }\n+\n+ Y = synthetic_samples\n+}\n+\n+\n+\n+nn = function(Matrix[Double] X, Matrix[Double] instance, Integer k )\n+return (Matrix[Double] knn_)\n+{\n+ if(nrow(X) < k)\n+ stop(\"can not pick \"+k+\" nearest neighbours from \"+nrow(X)+\" total instances\")\n+\n+ # compute the euclidean distance\n+ diff = X - instance\n+ square_diff = diff^2\n+ distance = sqrt(rowSums(square_diff))\n+ sort_dist = order(target = distance, by = 1, decreasing= FALSE, index.return = TRUE)\n+ knn_ = sort_dist[2:k+1,]\n+}\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -177,6 +177,7 @@ public enum Builtins {\nSINH(\"sinh\", false),\nSTEPLM(\"steplm\",true, ReturnType.MULTI_RETURN),\nSLICEFINDER(\"slicefinder\", true),\n+ SMOTE(\"smote\", true),\nSOLVE(\"solve\", false),\nSQRT(\"sqrt\", false),\nSUM(\"sum\", false),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSmoteTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.api.DMLScript;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.hops.OptimizerUtils;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+public class BuiltinSmoteTest extends AutomatedTestBase {\n+\n+ private final static String TEST_NAME = \"smote\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinSmoteTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 20;\n+ private final static int colsX = 20;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"C\"}));\n+ }\n+\n+ @Test\n+ public void testSmote1CP() {\n+ runSmoteTest(300, 3, LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testSmote2CP() {\n+ runSmoteTest(400, 5, LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testSmote1Spark() {\n+ runSmoteTest(300, 3, LopProperties.ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testSmote2Spark() { runSmoteTest(400, 5, LopProperties.ExecType.SPARK); }\n+\n+\n+ private void runSmoteTest(int sample, int nn, LopProperties.ExecType instType) {\n+ Types.ExecMode platformOld = setExecMode(instType);\n+\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = false;\n+ try {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-nvargs\", \"X=\" + input(\"X\"), \"S=\" + sample, \"K=\" + nn , \"Z=\"+output(\"Sum\"), \"T=\"+input(\"T\")};\n+\n+ double[][] X = getRandomMatrix(rows, colsX, 0, 1, 0.3, 1);\n+\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+\n+ double[][] T = getRandomMatrix(rows, colsX, 2, 3.0, 0.3, 3);\n+\n+ writeInputMatrixWithMTD(\"T\", T, true);\n+\n+ runTest(true, false, null, -1);\n+ HashMap<MatrixValue.CellIndex, Double> value = readDMLMatrixFromHDFS(\"Sum\");\n+ Assert.assertEquals(\"synthetic samples does not fall into minority class cluster\",1,\n+ value.get(new MatrixValue.CellIndex(1,1)), 0.000001);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+}\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/smote.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = read($X);\n+B = smote(X = A, s=$S, k=$K);\n+\n+# test if all point fall in same cluster (closed to each other)\n+# read some new data T != A\n+T = read($T);\n+# bind all instanced of minority class\n+A_B = rbind(A, B)\n+n = nrow(A_B)\n+# group data into k=2 clusters\n+[C, Y] = kmeans(rbind(A_B, T), 2, 10, 100, 0.000001, FALSE, 50)\n+# check if the instances of A and B fall in same cluster\n+check = matrix(as.scalar(Y[1,1]), n, 1)\n+testSum = sum(check - Y[1:n,])\n+# hack for avoiding null pointer exception while reading a single zero in HashMap\n+testSum = ifelse(testSum == 0, 1, testSum)\n+testSum = as.matrix(testSum)\n+write(testSum, $Z);\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2658] Synthetic Minority Over-sampling Technique (SMOTE)
Technique for handling unbalance classes by oversampling the minority class
Date: Wed Sep 2 14:16:32 2020 +0200
Closes #988 |
49,706 | 02.09.2020 14:38:58 | -7,200 | 0bdf9ac52e26bdb73d4a93e8294158bbe5ca070b | [DOCS] Fix typo in pca algortihm python | [
{
"change_type": "MODIFY",
"old_path": "docs/api/python/api/script_building/script.html",
"new_path": "docs/api/python/api/script_building/script.html",
"diff": "<dd><p>DMLScript is the class used to describe our intended behavior in DML. This script can be then executed to\nget the results.</p>\n<p>TODO caching</p>\n-<p>TODO multiple outputs</p>\n<p>TODO rerun with different inputs without recompilation</p>\n<dl class=\"py method\">\n<dt id=\"systemds.script_building.script.DMLScript.add_code\">\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/api/python/searchindex.js",
"new_path": "docs/api/python/searchindex.js",
"diff": "-Search.setIndex({docnames:[\"api/context/systemds_context\",\"api/matrix/data_gen\",\"api/matrix/federated\",\"api/matrix/matrix\",\"api/onnx_systemds/convert\",\"api/onnx_systemds/onnx_helper\",\"api/onnx_systemds/operator_gen\",\"api/onnx_systemds/render\",\"api/onnx_systemds/util\",\"api/operator/algorithms\",\"api/operator/operation_node\",\"api/script_building/dag\",\"api/script_building/script\",\"api/utils/converters\",\"api/utils/helpers\",\"getting_started/install\",\"getting_started/simple_examples\",\"guide/algorithms\",\"guide/federated\",\"index\",\"onnx_systemds/onnx_systemds\",\"onnx_systemds/onnx_systemds_design\"],envversion:{\"sphinx.domains.c\":2,\"sphinx.domains.changeset\":1,\"sphinx.domains.citation\":1,\"sphinx.domains.cpp\":3,\"sphinx.domains.index\":1,\"sphinx.domains.javascript\":2,\"sphinx.domains.math\":2,\"sphinx.domains.python\":2,\"sphinx.domains.rst\":2,\"sphinx.domains.std\":1,\"sphinx.ext.todo\":2,sphinx:56},filenames:[\"api/context/systemds_context.rst\",\"api/matrix/data_gen.rst\",\"api/matrix/federated.rst\",\"api/matrix/matrix.rst\",\"api/onnx_systemds/convert.rst\",\"api/onnx_systemds/onnx_helper.rst\",\"api/onnx_systemds/operator_gen.rst\",\"api/onnx_systemds/render.rst\",\"api/onnx_systemds/util.rst\",\"api/operator/algorithms.rst\",\"api/operator/operation_node.rst\",\"api/script_building/dag.rst\",\"api/script_building/script.rst\",\"api/utils/converters.rst\",\"api/utils/helpers.rst\",\"getting_started/install.rst\",\"getting_started/simple_examples.rst\",\"guide/algorithms.rst\",\"guide/federated.rst\",\"index.rst\",\"onnx_systemds/onnx_systemds.rst\",\"onnx_systemds/onnx_systemds_design.rst\"],objects:{\"systemds.context\":{SystemDSContext:[0,0,1,\"\"]},\"systemds.context.SystemDSContext\":{__init__:[0,1,1,\"\"],close:[0,1,1,\"\"],get_stderr:[0,1,1,\"\"],get_stdout:[0,1,1,\"\"]},\"systemds.matrix\":{Federated:[2,0,1,\"\"],Matrix:[3,0,1,\"\"],data_gen:[1,2,0,\"-\"]},\"systemds.matrix.Federated\":{__init__:[2,1,1,\"\"]},\"systemds.matrix.Matrix\":{__init__:[3,1,1,\"\"],cholesky:[3,1,1,\"\"],code_line:[3,1,1,\"\"],compute:[3,1,1,\"\"],order:[3,1,1,\"\"],pass_python_data_to_prepared_script:[3,1,1,\"\"],rev:[3,1,1,\"\"],t:[3,1,1,\"\"]},\"systemds.matrix.data_gen\":{full:[1,3,1,\"\"],rand:[1,3,1,\"\"],seq:[1,3,1,\"\"]},\"systemds.onnx_systemds\":{convert:[4,2,0,\"-\"],onnx_helper:[5,2,0,\"-\"],operator_gen:[6,2,0,\"-\"],render:[7,2,0,\"-\"],util:[8,2,0,\"-\"]},\"systemds.onnx_systemds.convert\":{onnx2systemds:[20,3,1,\"\"]},\"systemds.onnx_systemds.onnx_helper\":{NodeTree:[5,0,1,\"\"],PreparedValue:[5,0,1,\"\"],get_graph_inputs_with_initializers:[5,3,1,\"\"],get_graph_inputs_without_initializers:[5,3,1,\"\"],get_value_info:[5,3,1,\"\"],load_model:[5,3,1,\"\"]},\"systemds.onnx_systemds.onnx_helper.NodeTree\":{remove_end_node:[5,1,1,\"\"]},\"systemds.onnx_systemds.operator_gen\":{gen_1input_1output_mat_operator:[6,3,1,\"\"],gen_2input_1output_operator:[6,3,1,\"\"],gen_simple_function_call:[6,3,1,\"\"]},\"systemds.onnx_systemds.render\":{gen_graph_functions:[7,3,1,\"\"],gen_model_header:[7,3,1,\"\"],gen_node_script:[7,3,1,\"\"],gen_script:[7,3,1,\"\"],render_function:[7,3,1,\"\"]},\"systemds.onnx_systemds.util\":{generate_function_name:[8,3,1,\"\"],resolve_systemds_root:[8,3,1,\"\"]},\"systemds.operator\":{OperationNode:[10,0,1,\"\"],algorithm:[9,2,0,\"-\"]},\"systemds.operator.OperationNode\":{\"var\":[10,1,1,\"\"],__init__:[10,1,1,\"\"],abs:[10,1,1,\"\"],acos:[10,1,1,\"\"],asin:[10,1,1,\"\"],atan:[10,1,1,\"\"],code_line:[10,1,1,\"\"],compute:[10,1,1,\"\"],cos:[10,1,1,\"\"],cosh:[10,1,1,\"\"],get_lineage_trace:[10,1,1,\"\"],mean:[10,1,1,\"\"],pass_python_data_to_prepared_script:[10,1,1,\"\"],sin:[10,1,1,\"\"],sinh:[10,1,1,\"\"],sum:[10,1,1,\"\"],tan:[10,1,1,\"\"],tanh:[10,1,1,\"\"]},\"systemds.operator.algorithm\":{kmeans:[9,3,1,\"\"],l2svm:[9,3,1,\"\"],lm:[9,3,1,\"\"],pca:[9,3,1,\"\"]},\"systemds.script_building\":{dag:[11,2,0,\"-\"],script:[12,2,0,\"-\"]},\"systemds.script_building.dag\":{DAGNode:[11,0,1,\"\"],OutputType:[11,0,1,\"\"]},\"systemds.script_building.dag.DAGNode\":{code_line:[11,1,1,\"\"],compute:[11,1,1,\"\"],get_lineage_trace:[11,1,1,\"\"],pass_python_data_to_prepared_script:[11,1,1,\"\"]},\"systemds.script_building.script\":{DMLScript:[12,0,1,\"\"]},\"systemds.script_building.script.DMLScript\":{add_code:[12,1,1,\"\"],add_input_from_python:[12,1,1,\"\"],build_code:[12,1,1,\"\"],execute:[12,1,1,\"\"]},\"systemds.utils\":{converters:[13,2,0,\"-\"],helpers:[14,2,0,\"-\"]},\"systemds.utils.converters\":{matrix_block_to_numpy:[13,3,1,\"\"],numpy_to_matrix_block:[13,3,1,\"\"]},\"systemds.utils.helpers\":{create_params_string:[14,3,1,\"\"],get_module_dir:[14,3,1,\"\"]}},objnames:{\"0\":[\"py\",\"class\",\"Python class\"],\"1\":[\"py\",\"method\",\"Python method\"],\"2\":[\"py\",\"module\",\"Python module\"],\"3\":[\"py\",\"function\",\"Python function\"]},objtypes:{\"0\":\"py:class\",\"1\":\"py:method\",\"2\":\"py:module\",\"3\":\"py:function\"},terms:{\"0011352\":16,\"0014692\":16,\"00324092\":16,\"00616902\":16,\"0095087\":16,\"01039221\":16,\"01686351\":16,\"02033445\":16,\"02649209\":16,\"03839821\":16,\"04078623\":9,\"09961844\":9,\"0_242\":15,\"0_xxx\":15,\"100\":16,\"11538199\":9,\"18954599\":9,\"20386541\":9,\"2205885\":9,\"242\":15,\"26812763\":9,\"37957689\":9,\"39956035\":9,\"4327084\":9,\"43386048\":9,\"49858968\":9,\"54638565\":9,\"55358873\":9,\"57000751\":9,\"5x10\":16,\"8001\":18,\"8002\":18,\"8003\":18,\"abstract\":19,\"boolean\":9,\"case\":15,\"class\":[0,2,3,5,7,10,11,12,21],\"default\":[0,3],\"export\":[20,21],\"float\":[1,2,3,9,10],\"function\":[3,6,7,8,10,14,16,21],\"import\":[0,6,7,9,15,16,18,20],\"int\":[0,1,2,3,9,10],\"new\":[0,16],\"return\":[0,1,2,3,5,6,7,8,9,10,11,12,14,16],\"true\":3,\"try\":0,\"var\":10,\"while\":10,For:[11,12,13,14,21],Not:21,OPS:[11,12,13,14],One:16,The:[0,5,6,7,8,9,10,13,15,16,18,19,21],Then:[15,20],There:21,These:[19,21],Using:18,__eq__:10,__init__:[0,2,3,10],__lt__:10,abl:[18,21],abs:10,absolut:10,access:21,aco:10,action:[3,10,11],activ:[3,10,11],actual:[3,10,11,16,21],acycl:11,adapt:21,add:[6,12,21],add_cod:12,add_input_from_python:12,adding:21,addit:[3,10,11],addition:[6,15],addr1:18,addr2:18,addr3:18,address:[2,18],ads:21,after:[15,18,21],again:[0,21],algorithm:16,all:[0,1,3,5,7,10,11,14,15,18,21],allow:21,alreadi:[12,21],also:[0,7,18,20,21],although:3,alwai:0,ani:[0,11,21],annoi:0,anoth:16,apach:[15,19],appli:3,arcco:10,arcsin:10,arctan:10,arg:[2,3],argument:[6,9,21],arrai:[3,10,11,13,16,19],asarrai:18,asin:10,associ:5,atan:10,attribut:21,automat:[0,16,21],avail:21,averag:9,avg_sample_size_per_centroid:9,axi:10,b08:15,base:21,basic:14,becaus:[0,15,21],becom:[18,21],befor:[3,10,11],begin:2,behavior:12,below:[3,21],besid:21,best:21,bin:21,bind:19,bit:15,block:[0,13],bodi:7,bool:[2,3,9,10,11,12],both:14,bottom:[18,21],box:9,buffer:21,build:[3,10,11,12,15,21],build_cod:12,built:16,builtin:[3,10,11],cach:12,calcul:[0,10,16],call:[3,6,10,11,14,16,18,20,21],can:[0,3,9,10,11,12,14,15,16,18,20,21],cell:1,center:9,centroid:9,chang:9,check:[3,5,15,18],choleski:3,clean:19,cleanup:0,clone:15,close:[0,16],code:[3,10,11,12,15,16,21],code_lin:[3,10,11],col:[1,18],column:[1,3,9,10],com:15,combin:21,command:[15,18],comment:21,common:[5,21],compar:21,comparison:10,compat:[15,16],compil:19,complet:[1,16],complex:21,comput:[3,9,10,11,16,18,21],concurr:9,connect:0,consist:21,construct:[3,8,10],contain:[9,15],context:[0,1,2,9,10,12,16,18],contrast:19,conveni:21,convent:21,converg:9,convers:21,convert:[5,14,21],coordin:18,copi:15,correct:7,correctli:[7,18],correspond:[5,21],cos:10,cosh:10,cpu:19,creat:[0,1,2,3,10,11,12,14,16,18],create_params_str:14,csv:18,current:[0,13,18,20,21],dag:[3,9,10,12,21],dag_root:12,dagnod:[2,3,9,10,11,12],data:[3,9,10,11,12,16,18,19,21],data_gen:[1,16],dataset:[9,19],datatensor:19,deal:21,debug:21,declar:9,decomposit:3,decreas:3,dedic:21,defin:[1,3,11,21],definit:[3,21],delimit:18,depend:[15,21],deploy:19,deriv:3,describ:[12,21],develop:16,dict:[2,3,9,10,11,14],dictionari:[9,14,21],differ:[9,12,18,19,21],dim:18,dimens:[9,18,19],dimension:19,direct:11,directli:[6,16],directori:20,distribut:[1,15,19],dml:[3,4,5,7,10,11,12,14,20],dml_wrapper:21,dmlcode:12,dmlscript:12,doc:21,document:21,doe:[16,18,21],doesn:11,done:18,doubl:[10,16,18],dtype:16,each:[2,7,18,21],easili:16,echo:18,effici:19,either:[3,10,19],element:[16,21],enabl:[18,21],end:[2,5,19],engin:[19,21],ensur:[0,3],entir:19,enumer:11,env:[6,7],environ:[6,7,8,15,20,21],eps:9,equal:[3,10,11],equival:10,error:[0,10],essenti:21,etc:10,evalu:[3,10,11],even:10,exactli:21,exampl:[9,16,20],except:5,exchang:16,execut:[0,3,10,11,12,15,16,18,19],exist:[5,19,21],expens:0,expertis:19,extens:21,extra:9,facilit:19,fail:21,fals:[3,10,11,12,18],featur:[9,16,19],fed_a:18,fed_b:18,few:15,file:[3,4,5,6,7,18,20],fill:[0,1,16],find:21,finish:0,first:[15,16,18,19,20,21],fit:9,flag:[3,9],folder:21,follow:[0,9,15,18],form:[0,20,21],format:[14,18,21],found:5,fraction:1,free:21,from:[0,1,3,4,5,8,9,10,11,12,15,16,18,19,20,21],full:[1,16],function_cal:21,fundament:3,gen_1input_1output_mat_oper:6,gen_2input_1output_oper:6,gen_graph_funct:[7,21],gen_model_head:7,gen_node_script:7,gen_script:7,gen_simple_function_cal:6,gener:[3,4,6,7,9,10,11,16,20,21],generate_function_nam:[8,21],generated_node_script:7,generatedscriptpart:[6,7],get:[0,3,10,11,12,16],get_graph_inputs_with_initi:5,get_graph_inputs_without_initi:5,get_lineage_trac:[10,11],get_module_dir:14,get_stderr:0,get_stdout:0,get_value_info:5,getter:0,git:15,github:15,give:14,given:[1,3,4,5,7,8,9,13,20],googl:21,gpu:19,graph:[5,6,7,8,11,20],graph_nam:8,graphproto:[5,6,7],guid:18,handi:21,handl:[0,21],has:[0,12],have:[0,4,5,6,15,18,19,20,21],header:[7,18],held:2,help:[19,21],helper:21,here:18,heterogen:19,high:[16,19],homogen:19,howev:21,http:15,human:21,hybrid:19,idea:21,identifi:10,immedi:10,implement:0,improv:21,includ:21,increas:3,increment:1,independ:21,index:[2,3],index_return:3,indic:18,info:5,inform:[3,10,11,21],initi:[5,9,21],input:[3,4,5,6,9,10,12,20,21],input_onnx_fil:[4,20],input_var:12,insert:[7,21],instal:[17,20],instanc:[0,9,13,16],instead:[15,21],instruct:[0,18,20],integr:19,intend:[3,10,11,12],interfac:19,intern:[11,12,13,14,21],interpret:1,invok:20,involv:15,is_python_local_data:10,is_verbos:9,iter:[2,9,10,14],its:[18,21],itself:21,jar:15,java:[0,3,10,11,15],java_gatewai:[3,10,11,12,13],javaobject:[3,10,11,12,13],jinja2:[6,7,15],jinja:[6,7,21],just:15,jvm:[0,3,10,11,13],jvmview:[3,10,11,13],kmean:9,kwarg:[2,3,9],l2svm:[9,16],label:[9,16],lambd:1,lamda:1,languag:19,larg:21,last:18,lazi:[3,10,11],lazili:10,learn:9,least:0,leav:0,left:[0,6,18,21],let:16,level:[16,19],leverag:18,librari:15,lifecycl:19,like:[6,10,18,21],line:[0,3,10,11,12,15],lineag:[3,10,11,12],list:[0,5,7,18,21],load:[4,5,6,7,20],load_model:5,local:[10,12,19],localhost:18,locat:[18,21],logic:21,look:[15,16,18,21],m_re:16,m_res_np:16,machin:[3,9,10,11],mai:19,main:[7,15,20,21],main_graph:7,make:[15,16,18],manag:0,mani:21,manner:9,mat:3,matmul:21,matric:[3,18],matrix:[1,2,9,10,13,18],matrix_block_to_numpi:13,matrixblock:13,maven:15,max:1,max_it:9,maximum:[1,9],mean:[9,10],member:21,memori:19,merg:21,metadata:18,method:[3,10,11],might:[0,15],min:1,mix:15,mode:15,model:[4,5,7,9,16,19,20],model_gener:[20,21],modelproto:[5,7],modul:14,more:21,most:[3,10],mtd:18,multi:19,multipl:[12,16],multipli:[16,18],mvn:15,name:[2,3,4,5,8,10,11,12,14,20,21],named_input_nod:10,named_input_var:[3,10,11],named_paramet:14,necessari:[0,5,15,16],need:[0,3,15,18,20,21],nest:19,neutron:21,newer:15,next:18,nice:21,node:[3,5,6,7,11,21],nodeproto:[5,6,7],nodetre:5,non:[1,5,15],none:[1,3,4,5,7,10,11,12,20],normal:1,note:[15,16,18],noth:[10,16],np_arr:13,number:[0,1,3,9,15,16,21],numpi:[3,9,10,11,13,15,16,18],numpy_to_matrix_block:13,object:[0,2,3,10,11,12,13],older:15,onc:[0,15,18],one:[0,1,6,9,16,18,21],ones:18,onli:[0,1,3,10,11,12,16,21],onnx2systemd:[4,20],onnx:[4,6,7,15],onnx_fil:5,onnx_help:[5,21],onnx_onnx_rel_1_7_ml_pb2:[5,6,7],onnx_systemd:[4,5,6,7,8,20],open:[0,15],openjdk:15,oper:[0,1,2,3,9,11,12,19],operation_nod:[1,2,3,9,10],operationnod:[1,2,3,9,10],operator_gen:[6,7],operator_gener:21,option:[4,7,20],order:[3,7,18,19,21],other:[15,18,21],our:[12,14,16,18],out:[0,9,21],output:[0,3,4,6,9,10,11,12,15,16,20,21],output_dml_fil:[4,20],output_fil:7,output_refer:21,output_test:21,output_typ:[3,10],outputtyp:[10,11],over:19,overload:10,own:21,packag:[15,16,19],pair:[2,3,10,11],param:[0,2,5,6,8],paramet:[1,2,3,4,5,6,7,9,10,11,12,13,14,20],pars:0,part:[6,7,15,21],pass:[0,3,10,11,12],pass_python_data_to_prepared_script:[3,10,11],path:[3,8,14,18],pathlik:[3,14],pca:9,pdf:1,per:9,perfom:[],perform:[9,10],pip3:15,plan:19,pleas:15,point:[3,15],pointer:13,poison:1,port:[0,18],posit:[3,10],possibl:[3,20,21],pre_setup:15,precis:21,prepar:[3,5,10,11,12],prepared_script:[3,10,11],preparedscript:12,preparedvalu:5,prerequisit:17,print:[0,3,9,10,11,16,18],procedur:0,process:[0,16,21],produc:21,profession:21,program:0,project:15,proto3:21,proto:21,protobuf:21,protocol:[0,21],provid:[6,14,16,19,21],py4j:[3,10,11,12,13,15,16],pycharm:21,python:[0,3,10,11,12,15,16,18,19,20,21],queue:0,quick:16,quit:15,rais:5,rand:[1,9],randint:16,random:[0,1,9,16],rang:[2,16],ratio:9,read:0,readabl:21,readi:15,recogn:[3,10],recommend:3,recompil:12,record:9,recurs:21,reduc:9,refer:[18,21],rememb:16,remov:[5,21],remove_end_nod:5,render_funct:7,replac:21,repositori:[15,18,21],repres:[1,2,3,5,10,11,14],represent:[13,21],requir:[6,7,18,21],rerun:12,res:18,resolv:21,resolve_systemds_root:8,respect:10,respons:9,restart:21,result:[3,7,10,11,12,16,18],resultvari:12,rev:3,revers:[3,21],right:[6,21],root:15,row:[1,3,10,18],run:[0,9,13,16,18,21],runtim:15,safe:3,same:[4,20,21],sampl:9,sample_model:21,sample_model_refer:21,sample_model_wrapp:21,save:[3,10,11],savetxt:18,scalar:16,scale:9,schema:[19,21],scienc:19,scientist:16,scope:21,script:[3,5,6,7,10,11,19,20],script_build:[9,10,11,12],sds:[0,9,16,18],sds_context:[1,2,3,10],search:[5,8],second:18,see:21,seed:[1,9,16],sent:3,separ:[0,21],seq:1,sequenc:[3,10,11],serv:19,server:15,set:[9,16,18,20,21],setup:18,setw:21,sever:21,shall:[5,6,7],shape:[1,9,16],should:[3,9,10,11,12,15,16,18,21],similar:[9,15,16],simpl:[5,6,15,21],simple_mat_add:20,simpli:[10,18,21],simplifi:16,simul:18,sin:10,sinc:[0,21],singl:[1,18,21],sinh:10,size:[1,16],skip:3,snip:21,snippet:[7,21],some:[16,18,21],someth:0,sort:3,spark:19,sparsiti:1,specif:21,specifi:[3,9,18,21],src:[15,20],stack:18,stai:21,standard:[0,21],start:[0,1,16,21],statement:0,stderr:0,stdout:0,step:[1,15],stick:21,stop:[1,16],store:[11,21],str:[1,2,3,4,5,7,8,9,10,11,12,14,20],strategi:21,string:[3,7,10,11,14,21],structur:[5,7,21],sub:[0,7,21],subprocess:[0,16],sum:[10,18],support:[0,9,18,21],sure:15,symmetr:3,syntax:[0,21],system:[15,19,21],systemd:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,20,21],systemds_context:[1,2],systemds_root:[8,21],systemdscontext:[1,2,3,9,10,12,16,18],take:[8,16,21],taken:0,tan:10,tanh:10,task:19,tcp:0,temp:18,templat:[6,7,21],tensor:[1,19],tensorproto:5,termin:[15,16,18],test:18,test_model:[20,21],test_simpl:20,thei:[10,21],them:[5,7],therebi:10,therefor:[0,10,21],thi:[0,1,2,3,4,10,11,12,15,16,18,19,20,21],three:[15,18,21],till:[3,10,11],todo:[1,12,17],togeth:[7,21],toler:9,tool:20,top:18,topmost:12,trace:[3,10,11],train:19,translat:21,transpos:3,travers:7,tree:[5,7,21],treenod:5,tupl:[1,2,5,7,10,12,18],two:[6,18],type:[3,10,11,21],unclutt:21,underli:[19,21],understand:21,uniform:1,uniniti:5,union:[1,2,3,9,10,12],unit:21,unittest:20,unlik:11,unnam:[2,3,10,11,14],unnamed_input_nod:10,unnamed_input_var:[3,10,11],unnamed_paramet:14,use:[3,9,11,12,13,14,15,16,18,21],used:[9,12,14,18,21],user:19,uses:21,using:[0,9,15,18,21],util:[13,14,21],valid:[8,21],valu:[1,3,5,9,10,11,16,21],value_info:5,valueinfo:5,valueinfoproto:5,var_nam:[3,10,11,12],variabl:[3,8,10,11,12,14,18,21],varianc:10,vector:[1,9],verbos:[3,9,10,11],veri:21,verifi:15,versatil:19,version:[5,15,21],via:15,view:21,virtual:[3,10,11],wai:[3,21],wcss:9,weight:9,well:[7,19,21],were:21,wheel:15,when:[0,21],which:[0,3,6,7,12,14,16,21],whose:19,why:21,wise:16,within:20,without:[3,10,11,12],work:[18,19,21],worker:2,would:[0,21],wrapper:21,write:5,written:7,yet:16,you:[10,15,18,20,21],your:[0,15,18],zero:[1,16]},titles:[\"SystemDSContext\",\"Data Generators\",\"Federated\",\"Matrix\",\"Convert\",\"Onnx Helper\",\"Operator Gen\",\"Render\",\"Util\",\"Algorithms\",\"Operation Node\",\"Dag\",\"Script\",\"Converters\",\"Helpers\",\"Install SystemDS\",\"QuickStart\",\"Built-in Algorithms\",\"Federated Environment\",\"SystemDS\",\"QuickStart Onnx\",\"Design\"],titleterms:{\"final\":21,\"import\":21,\"new\":21,aggreg:18,algorithm:[9,17],built:17,complex:16,convert:[4,13,20],creat:21,dag:11,data:1,dataset:17,design:21,dml:21,dml_script:21,environ:18,exampl:[18,21],feder:[2,18],file:21,gen:6,gener:1,get:17,goal:21,graph:21,helper:[5,14],implement:21,instal:15,limit:21,matrix:[3,16],model:[17,21],more:16,multipl:18,node:10,onnx:[5,20,21],oper:[6,10,16,21],pip:15,prerequisit:20,quickstart:[16,20],render:[7,21],run:20,script:[12,21],simpl:18,sourc:15,start:18,step:[17,21],sub_graph:21,systemd:[15,19],systemdscontext:0,test:[20,21],testcas:21,tool:21,train:17,travers:21,usag:20,util:8,valid:17,worker:18}})\n\\ No newline at end of file\n+Search.setIndex({docnames:[\"api/context/systemds_context\",\"api/matrix/data_gen\",\"api/matrix/federated\",\"api/matrix/matrix\",\"api/onnx_systemds/convert\",\"api/onnx_systemds/onnx_helper\",\"api/onnx_systemds/operator_gen\",\"api/onnx_systemds/render\",\"api/onnx_systemds/util\",\"api/operator/algorithms\",\"api/operator/operation_node\",\"api/script_building/dag\",\"api/script_building/script\",\"api/utils/converters\",\"api/utils/helpers\",\"getting_started/install\",\"getting_started/simple_examples\",\"guide/algorithms\",\"guide/federated\",\"index\",\"onnx_systemds/onnx_systemds\",\"onnx_systemds/onnx_systemds_design\"],envversion:{\"sphinx.domains.c\":2,\"sphinx.domains.changeset\":1,\"sphinx.domains.citation\":1,\"sphinx.domains.cpp\":3,\"sphinx.domains.index\":1,\"sphinx.domains.javascript\":2,\"sphinx.domains.math\":2,\"sphinx.domains.python\":2,\"sphinx.domains.rst\":2,\"sphinx.domains.std\":1,\"sphinx.ext.todo\":2,sphinx:56},filenames:[\"api/context/systemds_context.rst\",\"api/matrix/data_gen.rst\",\"api/matrix/federated.rst\",\"api/matrix/matrix.rst\",\"api/onnx_systemds/convert.rst\",\"api/onnx_systemds/onnx_helper.rst\",\"api/onnx_systemds/operator_gen.rst\",\"api/onnx_systemds/render.rst\",\"api/onnx_systemds/util.rst\",\"api/operator/algorithms.rst\",\"api/operator/operation_node.rst\",\"api/script_building/dag.rst\",\"api/script_building/script.rst\",\"api/utils/converters.rst\",\"api/utils/helpers.rst\",\"getting_started/install.rst\",\"getting_started/simple_examples.rst\",\"guide/algorithms.rst\",\"guide/federated.rst\",\"index.rst\",\"onnx_systemds/onnx_systemds.rst\",\"onnx_systemds/onnx_systemds_design.rst\"],objects:{\"systemds.context\":{SystemDSContext:[0,0,1,\"\"]},\"systemds.context.SystemDSContext\":{__init__:[0,1,1,\"\"],close:[0,1,1,\"\"],get_stderr:[0,1,1,\"\"],get_stdout:[0,1,1,\"\"]},\"systemds.matrix\":{Federated:[2,0,1,\"\"],Matrix:[3,0,1,\"\"],data_gen:[1,2,0,\"-\"]},\"systemds.matrix.Federated\":{__init__:[2,1,1,\"\"]},\"systemds.matrix.Matrix\":{__init__:[3,1,1,\"\"],cholesky:[3,1,1,\"\"],code_line:[3,1,1,\"\"],compute:[3,1,1,\"\"],order:[3,1,1,\"\"],pass_python_data_to_prepared_script:[3,1,1,\"\"],rev:[3,1,1,\"\"],t:[3,1,1,\"\"]},\"systemds.matrix.data_gen\":{full:[1,3,1,\"\"],rand:[1,3,1,\"\"],seq:[1,3,1,\"\"]},\"systemds.onnx_systemds\":{convert:[4,2,0,\"-\"],onnx_helper:[5,2,0,\"-\"],operator_gen:[6,2,0,\"-\"],render:[7,2,0,\"-\"],util:[8,2,0,\"-\"]},\"systemds.onnx_systemds.convert\":{onnx2systemds:[20,3,1,\"\"]},\"systemds.onnx_systemds.onnx_helper\":{NodeTree:[5,0,1,\"\"],PreparedValue:[5,0,1,\"\"],get_graph_inputs_with_initializers:[5,3,1,\"\"],get_graph_inputs_without_initializers:[5,3,1,\"\"],get_value_info:[5,3,1,\"\"],load_model:[5,3,1,\"\"]},\"systemds.onnx_systemds.onnx_helper.NodeTree\":{remove_end_node:[5,1,1,\"\"]},\"systemds.onnx_systemds.operator_gen\":{gen_1input_1output_mat_operator:[6,3,1,\"\"],gen_2input_1output_operator:[6,3,1,\"\"],gen_simple_function_call:[6,3,1,\"\"]},\"systemds.onnx_systemds.render\":{gen_graph_functions:[7,3,1,\"\"],gen_model_header:[7,3,1,\"\"],gen_node_script:[7,3,1,\"\"],gen_script:[7,3,1,\"\"],render_function:[7,3,1,\"\"]},\"systemds.onnx_systemds.util\":{generate_function_name:[8,3,1,\"\"],resolve_systemds_root:[8,3,1,\"\"]},\"systemds.operator\":{OperationNode:[10,0,1,\"\"],algorithm:[9,2,0,\"-\"]},\"systemds.operator.OperationNode\":{\"var\":[10,1,1,\"\"],__init__:[10,1,1,\"\"],abs:[10,1,1,\"\"],acos:[10,1,1,\"\"],asin:[10,1,1,\"\"],atan:[10,1,1,\"\"],code_line:[10,1,1,\"\"],compute:[10,1,1,\"\"],cos:[10,1,1,\"\"],cosh:[10,1,1,\"\"],get_lineage_trace:[10,1,1,\"\"],mean:[10,1,1,\"\"],pass_python_data_to_prepared_script:[10,1,1,\"\"],sin:[10,1,1,\"\"],sinh:[10,1,1,\"\"],sum:[10,1,1,\"\"],tan:[10,1,1,\"\"],tanh:[10,1,1,\"\"]},\"systemds.operator.algorithm\":{kmeans:[9,3,1,\"\"],l2svm:[9,3,1,\"\"],lm:[9,3,1,\"\"],pca:[9,3,1,\"\"]},\"systemds.script_building\":{dag:[11,2,0,\"-\"],script:[12,2,0,\"-\"]},\"systemds.script_building.dag\":{DAGNode:[11,0,1,\"\"],OutputType:[11,0,1,\"\"]},\"systemds.script_building.dag.DAGNode\":{code_line:[11,1,1,\"\"],compute:[11,1,1,\"\"],get_lineage_trace:[11,1,1,\"\"],pass_python_data_to_prepared_script:[11,1,1,\"\"]},\"systemds.script_building.script\":{DMLScript:[12,0,1,\"\"]},\"systemds.script_building.script.DMLScript\":{add_code:[12,1,1,\"\"],add_input_from_python:[12,1,1,\"\"],build_code:[12,1,1,\"\"],execute:[12,1,1,\"\"]},\"systemds.utils\":{converters:[13,2,0,\"-\"],helpers:[14,2,0,\"-\"]},\"systemds.utils.converters\":{matrix_block_to_numpy:[13,3,1,\"\"],numpy_to_matrix_block:[13,3,1,\"\"]},\"systemds.utils.helpers\":{create_params_string:[14,3,1,\"\"],get_module_dir:[14,3,1,\"\"]}},objnames:{\"0\":[\"py\",\"class\",\"Python class\"],\"1\":[\"py\",\"method\",\"Python method\"],\"2\":[\"py\",\"module\",\"Python module\"],\"3\":[\"py\",\"function\",\"Python function\"]},objtypes:{\"0\":\"py:class\",\"1\":\"py:method\",\"2\":\"py:module\",\"3\":\"py:function\"},terms:{\"0011352\":16,\"0014692\":16,\"00324092\":16,\"00616902\":16,\"0095087\":16,\"01039221\":16,\"01686351\":16,\"02033445\":16,\"02649209\":16,\"03839821\":16,\"04078623\":9,\"09961844\":9,\"0_242\":15,\"0_xxx\":15,\"100\":16,\"11538199\":9,\"18954599\":9,\"20386541\":9,\"2205885\":9,\"242\":15,\"26812763\":9,\"37957689\":9,\"39956035\":9,\"4327084\":9,\"43386048\":9,\"49858968\":9,\"54638565\":9,\"55358873\":9,\"57000751\":9,\"5x10\":16,\"8001\":18,\"8002\":18,\"8003\":18,\"abstract\":19,\"boolean\":9,\"case\":15,\"class\":[0,2,3,5,7,10,11,12,21],\"default\":[0,3,10],\"export\":[20,21],\"float\":[1,2,3,9,10],\"function\":[3,6,7,8,10,14,16,21],\"import\":[0,6,7,9,15,16,18,20],\"int\":[0,1,2,3,9,10],\"new\":[0,16],\"return\":[0,1,2,3,5,6,7,8,9,10,11,12,14,16],\"true\":3,\"try\":0,\"var\":10,\"while\":10,For:[11,12,13,14,21],Not:21,OPS:[11,12,13,14],One:16,The:[0,5,6,7,8,9,10,13,15,16,18,19,21],Then:[15,20],There:21,These:[19,21],Using:18,__eq__:10,__init__:[0,2,3,10],__lt__:10,abl:[18,21],abs:10,absolut:10,access:21,aco:10,action:[3,10,11],activ:[3,10,11],actual:[3,10,11,16,21],acycl:11,adapt:21,add:[6,12,21],add_cod:12,add_input_from_python:12,adding:21,addit:[3,10,11],addition:[6,15],addr1:18,addr2:18,addr3:18,address:[2,18],ads:21,after:[15,18,21],again:[0,21],algorithm:16,all:[0,1,3,5,7,10,11,14,15,18,21],allow:21,alreadi:[12,21],also:[0,7,18,20,21],although:3,alwai:0,analysi:9,ani:[0,11,21],annoi:0,anoth:16,apach:[15,19],appli:3,arcco:10,arcsin:10,arctan:10,arg:[2,3],argument:[6,9,21],arrai:[3,10,11,13,16,19],asarrai:18,asin:10,associ:[5,9],atan:10,attribut:21,automat:[0,16,21],avail:21,averag:9,avg_sample_size_per_centroid:9,axi:10,b08:15,base:21,basic:14,becaus:[0,15,21],becom:[18,21],befor:[3,10,11],begin:2,behavior:12,below:[3,21],besid:21,best:21,bin:21,bind:19,bit:15,block:[0,13],bodi:7,bool:[2,3,9,10,11,12],both:14,bottom:[18,21],box:9,buffer:21,build:[3,10,11,12,15,21],build_cod:12,built:16,builtin:[3,10,11],cach:12,calcul:[0,10,16],call:[3,6,10,11,14,16,18,20,21],can:[0,3,9,10,11,12,14,15,16,18,20,21],cell:1,center:9,centroid:9,chang:9,check:[3,5,15,18],choleski:3,clean:19,cleanup:0,clone:15,close:[0,16],cluster:9,code:[3,10,11,12,15,16,21],code_lin:[3,10,11],col:[1,18],column:[1,3,9,10],com:15,combin:21,command:[15,18],comment:21,common:[5,21],compar:21,comparison:10,compat:[15,16],compil:19,complet:[1,16],complex:21,componenet:9,comput:[3,9,10,11,16,18,21],concurr:9,connect:0,consist:21,construct:[3,8,10],contain:[9,15],context:[0,1,2,9,10,12,16,18],contrast:19,conveni:21,convent:21,converg:9,convers:21,convert:[5,14,21],coordin:18,copi:15,correct:7,correctli:[7,18],correspond:[5,21],cos:10,cosh:10,cpu:19,creat:[0,1,2,3,10,11,12,14,16,18],create_params_str:14,csv:18,current:[0,13,18,20,21],dag:[3,9,10,12,21],dag_root:12,dagnod:[2,3,9,10,11,12],data:[3,9,10,11,12,16,18,19,21],data_gen:[1,16],dataset:[9,19],datatensor:19,deal:21,debug:21,declar:9,decomposit:3,decreas:3,dedic:21,defin:[1,3,11,21],definit:[3,21],delimit:18,depend:[15,21],deploy:19,deriv:3,describ:[12,21],develop:16,dict:[2,3,9,10,11,14],dictionari:[9,14,21],differ:[9,12,18,19,21],dim:18,dimens:[9,18,19],dimension:[9,19],direct:11,directli:[6,16],directori:20,distribut:[1,15,19],dml:[3,4,5,7,10,11,12,14,20],dml_wrapper:21,dmlcode:12,dmlscript:12,doc:21,document:21,doe:[16,18,21],doesn:11,done:18,doubl:[10,16,18],dtype:16,each:[2,7,9,18,21],easili:16,echo:18,effici:19,either:[3,10,19],element:[16,21],enabl:[18,21],end:[2,5,19],engin:[19,21],ensur:[0,3],entir:19,enumer:11,env:[6,7],environ:[6,7,8,15,20,21],eps:9,equal:[3,10,11],equival:10,error:[0,10],essenti:21,etc:10,evalu:[3,10,11],even:10,everi:10,exactli:21,exampl:[9,16,20],except:5,exchang:16,execut:[0,3,10,11,12,15,16,18,19],exist:[5,19,21],expect:10,expens:0,expertis:19,extens:21,extra:9,facilit:19,fail:21,fals:[3,10,11,12,18],featur:[9,16,19],fed_a:18,fed_b:18,few:15,file:[3,4,5,6,7,18,20],fill:[0,1,16],find:21,finish:0,first:[15,16,18,19,20,21],fit:9,flag:[3,9],folder:21,follow:[0,9,15,18],form:[0,20,21],format:[14,18,21],found:5,fraction:1,free:21,from:[0,1,3,4,5,8,9,10,11,12,15,16,18,19,20,21],full:[1,16],function_cal:21,fundament:3,gen_1input_1output_mat_oper:6,gen_2input_1output_oper:6,gen_graph_funct:[7,21],gen_model_head:7,gen_node_script:7,gen_script:7,gen_simple_function_cal:6,gener:[3,4,6,7,9,10,11,16,20,21],generate_function_nam:[8,21],generated_node_script:7,generatedscriptpart:[6,7],get:[0,3,10,11,12,16],get_graph_inputs_with_initi:5,get_graph_inputs_without_initi:5,get_lineage_trac:[10,11],get_module_dir:14,get_stderr:0,get_stdout:0,get_value_info:5,getter:0,git:15,github:15,give:14,given:[1,3,4,5,7,8,9,13,20],googl:21,gpu:19,graph:[5,6,7,8,11,20],graph_nam:8,graphproto:[5,6,7],guid:18,handi:21,handl:[0,21],has:[0,12],have:[0,4,5,6,15,18,19,20,21],header:[7,18],held:2,help:[19,21],helper:21,here:18,heterogen:19,high:[16,19],homogen:19,howev:21,http:15,human:21,hybrid:19,idea:21,identifi:10,immedi:10,implement:0,improv:21,includ:21,increas:3,increment:1,independ:21,index:[2,3],index_return:3,indic:18,info:5,inform:[3,10,11,21],initi:[5,9,21],input:[3,4,5,6,9,10,12,20,21],input_onnx_fil:[4,20],input_var:12,insert:[7,21],instal:[17,20],instanc:[0,9,13,16],instead:[15,21],instruct:[0,18,20],integr:19,intend:[3,10,11,12],interfac:19,intern:[11,12,13,14,21],interpret:1,invok:20,involv:15,is_python_local_data:10,is_verbos:9,iter:[2,9,10,14],its:[18,21],itself:21,jar:15,java:[0,3,10,11,15],java_gatewai:[3,10,11,12,13],javaobject:[3,10,11,12,13],jinja2:[6,7,15],jinja:[6,7,21],just:15,jvm:[0,3,10,11,13],jvmview:[3,10,11,13],kmean:9,kwarg:[2,3,9],l2svm:[9,16],label:[9,16],lambd:1,lamda:1,languag:19,larg:21,last:18,lazi:[3,10,11],lazili:10,learn:9,least:0,leav:0,left:[0,6,18,21],let:16,level:[16,19],leverag:18,librari:15,lifecycl:19,like:[6,10,18,21],line:[0,3,10,11,12,15],lineag:[3,10,11,12],list:[0,5,7,9,18,21],load:[4,5,6,7,20],load_model:5,local:[10,12,19],localhost:18,locat:[18,21],logic:21,look:[15,16,18,21],m_re:16,m_res_np:16,machin:[3,9,10,11],mai:19,main:[7,15,20,21],main_graph:7,make:[15,16,18],manag:0,mani:21,manner:9,mat:3,matmul:21,matric:[3,18],matrix:[1,2,9,10,13,18],matrix_block_to_numpi:13,matrixblock:13,maven:15,max:1,max_it:9,maximum:[1,9],mean:[9,10],member:21,memori:19,merg:21,metadata:18,method:[3,10,11],might:[0,15],min:1,mix:15,mode:15,model:[4,5,7,9,16,19,20],model_gener:[20,21],modelproto:[5,7],modul:14,more:21,most:[3,10],mtd:18,multi:[10,19],multipl:[10,16],multipli:[16,18],mvn:15,name:[2,3,4,5,8,10,11,12,14,20,21],named_input_nod:10,named_input_var:[3,10,11],named_paramet:14,necessari:[0,5,15,16],need:[0,3,15,18,20,21],nest:19,neutron:21,newer:15,next:18,nice:21,node:[3,5,6,7,11,21],nodeproto:[5,6,7],nodetre:5,non:[1,5,15],none:[1,3,4,5,7,10,11,12,20],normal:1,note:[15,16,18],noth:[10,16],np_arr:13,number:[0,1,3,9,15,16,21],number_of_output:10,numpi:[3,9,10,11,13,15,16,18],numpy_to_matrix_block:13,object:[0,2,3,10,11,12,13],older:15,onc:[0,15,18],one:[0,1,6,9,16,18,21],ones:18,onli:[0,1,3,10,11,12,16,21],onnx2systemd:[4,20],onnx:[4,6,7,15],onnx_fil:5,onnx_help:[5,21],onnx_onnx_rel_1_7_ml_pb2:[5,6,7],onnx_systemd:[4,5,6,7,8,20],open:[0,15],openjdk:15,oper:[0,1,2,3,9,11,12,19],operation_nod:[1,2,3,9,10],operationnod:[1,2,3,9,10],operator_gen:[6,7],operator_gener:21,option:[4,7,20],order:[3,7,18,19,21],other:[10,15,18,21],our:[12,14,16,18],out:[0,9,21],output:[0,3,4,6,9,10,11,12,15,16,20,21],output_dml_fil:[4,20],output_fil:7,output_refer:21,output_test:21,output_typ:[3,10],outputtyp:[10,11],over:19,overload:10,own:21,packag:[15,16,19],pair:[2,3,10,11],param:[0,2,5,6,8],paramet:[1,2,3,4,5,6,7,9,10,11,12,13,14,20],pars:0,part:[6,7,15,21],pass:[0,3,10,11,12],pass_python_data_to_prepared_script:[3,10,11],path:[3,8,14,18],pathlik:[3,14],pca:9,pdf:1,per:9,perfom:[],perform:[9,10],pip3:15,plan:19,pleas:15,point:[3,15],pointer:13,poison:1,port:[0,18],posit:[3,10],possibl:[3,20,21],pre_setup:15,precis:21,prepar:[3,5,10,11,12],prepared_script:[3,10,11],preparedscript:12,preparedvalu:5,prerequisit:17,princip:9,print:[0,3,9,10,11,16,18],procedur:0,process:[0,16,21],produc:21,profession:21,program:0,project:15,proto3:21,proto:21,protobuf:21,protocol:[0,21],provid:[6,14,16,19,21],py4j:[3,10,11,12,13,15,16],pycharm:21,python:[0,3,10,11,12,15,16,18,19,20,21],queue:0,quick:16,quit:15,rais:5,rand:[1,9],randint:16,random:[0,1,9,16],rang:[2,16],ratio:9,read:0,readabl:21,readi:15,recogn:[3,10],recommend:3,recompil:12,record:9,recurs:21,reduc:9,refer:[18,21],rememb:[10,16],remov:[5,21],remove_end_nod:5,render_funct:7,replac:21,repositori:[15,18,21],repres:[1,2,3,5,10,11,14],represent:[13,21],requir:[6,7,18,21],rerun:12,res:18,resolv:21,resolve_systemds_root:8,respect:10,respons:9,restart:21,result:[3,7,10,11,12,16,18],resultvari:12,rev:3,revers:[3,21],right:[6,21],root:15,row:[1,3,9,10,18],run:[0,9,13,16,18,21],runtim:15,safe:3,same:[4,20,21],sampl:9,sample_model:21,sample_model_refer:21,sample_model_wrapp:21,save:[3,10,11],savetxt:18,scalar:16,scale:9,scenario:10,schema:[19,21],scienc:19,scientist:16,scope:21,script:[3,5,6,7,10,11,19,20],script_build:[9,10,11,12],sds:[0,9,16,18],sds_context:[1,2,3,10],search:[5,8],second:18,see:21,seed:[1,9,16],sent:3,separ:[0,21],seq:1,sequenc:[3,10,11],serv:19,server:15,set:[9,10,16,18,20,21],setup:18,setw:21,sever:21,shall:[5,6,7],shape:[1,9,16],should:[3,9,10,11,12,15,16,18,21],similar:[9,15,16],similarli:9,simpl:[5,6,15,21],simple_mat_add:20,simpli:[10,18,21],simplifi:16,simul:18,sin:10,sinc:[0,21],singl:[1,18,21],sinh:10,size:[1,16],skip:3,snip:21,snippet:[7,21],some:[16,18,21],someth:0,sort:3,spark:19,sparsiti:1,specif:21,specifi:[3,9,18,21],src:[15,20],stack:18,stai:21,standard:[0,21],start:[0,1,16,21],statement:0,stderr:0,stdout:0,step:[1,15],stick:21,stop:[1,16],store:[11,21],str:[1,2,3,4,5,7,8,9,10,11,12,14,20],strategi:21,string:[3,7,10,11,14,21],structur:[5,7,21],sub:[0,7,21],subprocess:[0,16],sum:[10,18],support:[0,9,18,21],sure:15,symmetr:3,syntax:[0,21],system:[15,19,21],systemd:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,20,21],systemds_context:[1,2],systemds_root:[8,21],systemdscontext:[1,2,3,9,10,12,16,18],take:[8,16,21],taken:0,tan:10,tanh:10,task:19,tcp:0,temp:18,templat:[6,7,21],tensor:[1,19],tensorproto:5,termin:[15,16,18],test:18,test_model:[20,21],test_simpl:20,than:10,thei:[10,21],them:[5,7],therebi:10,therefor:[0,10,21],thi:[0,1,2,3,4,10,11,12,15,16,18,19,20,21],three:[15,18,21],till:[3,10,11],todo:[1,12,17],togeth:[7,21],toler:9,tool:20,top:18,topmost:12,trace:[3,10,11],train:19,translat:21,transpos:3,travers:7,tree:[5,7,21],treenod:5,tupl:[1,2,5,7,10,12,18],two:[6,9,18],type:[3,10,11,21],unclutt:21,underli:[19,21],understand:21,uniform:1,uniniti:5,union:[1,2,3,9,10,12],unit:21,unittest:20,unlik:11,unnam:[2,3,10,11,14],unnamed_input_nod:10,unnamed_input_var:[3,10,11],unnamed_paramet:14,unseen:9,use:[3,9,11,12,13,14,15,16,18,21],used:[9,12,14,18,21],user:19,uses:21,using:[0,9,15,18,21],util:[13,14,21],valid:[8,21],valu:[1,3,5,9,10,11,16,21],value_info:5,valueinfo:5,valueinfoproto:5,var_nam:[3,10,11,12],variabl:[3,8,10,11,12,14,18,21],varianc:10,vector:[1,9],verbos:[3,9,10,11],veri:21,verifi:15,versatil:19,version:[5,15,21],via:15,view:21,virtual:[3,10,11],wai:[3,21],wcss:9,weight:9,well:[7,10,19,21],were:21,wheel:15,when:[0,21],which:[0,3,6,7,12,14,16,21],whose:19,why:21,wise:16,within:20,without:[3,10,11,12],work:[18,19,21],worker:2,would:[0,21],wrapper:21,write:5,written:7,yet:16,you:[10,15,18,20,21],your:[0,15,18],zero:[1,16]},titles:[\"SystemDSContext\",\"Data Generators\",\"Federated\",\"Matrix\",\"Convert\",\"Onnx Helper\",\"Operator Gen\",\"Render\",\"Util\",\"Algorithms\",\"Operation Node\",\"Dag\",\"Script\",\"Converters\",\"Helpers\",\"Install SystemDS\",\"QuickStart\",\"Built-in Algorithms\",\"Federated Environment\",\"SystemDS\",\"QuickStart Onnx\",\"Design\"],titleterms:{\"final\":21,\"import\":21,\"new\":21,aggreg:18,algorithm:[9,17],built:17,complex:16,convert:[4,13,20],creat:21,dag:11,data:1,dataset:17,design:21,dml:21,dml_script:21,environ:18,exampl:[18,21],feder:[2,18],file:21,gen:6,gener:1,get:17,goal:21,graph:21,helper:[5,14],implement:21,instal:15,limit:21,matrix:[3,16],model:[17,21],more:16,multipl:18,node:10,onnx:[5,20,21],oper:[6,10,16,21],pip:15,prerequisit:20,quickstart:[16,20],render:[7,21],run:20,script:[12,21],simpl:18,sourc:15,start:18,step:[17,21],sub_graph:21,systemd:[15,19],systemdscontext:0,test:[20,21],testcas:21,tool:21,train:17,travers:21,usag:20,util:8,valid:17,worker:18}})\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "@@ -98,7 +98,7 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n\"\"\"\nPerforms PCA on the matrix input\n- :param x: Input dataset to perform K-Means on.\n+ :param x: Input dataset to perform Principal Componenet Analysis (PCA) on.\n:param K: The number of reduced dimensions.\n:param center: Boolean specifying if the input values should be centered.\n:param scale: Boolean specifying if the input values should be scaled.\n@@ -112,7 +112,7 @@ def pca(x: DAGNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\nif 'K' in kwargs.keys() and kwargs.get('K') < 1:\nraise ValueError(\n- \"Invalid number of clusters in K means, number must be integer above 0\")\n+ \"Invalid number of dimensions in PCA, number must be integer above 0\")\nif 'scale' in kwargs.keys():\nif kwargs.get('scale') == True:\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOCS] Fix typo in pca algortihm python |
49,720 | 02.09.2020 16:26:16 | -7,200 | f2940990d970ad3703d6ac3a45a7bb454fc9c5ba | [MINOR][DOC] Updates in built-in docs
This commit updates the doc file for new builtin "smote "
The commit also introduces,
1. A sanity check in mice.dml
2. minor fix in calculation of iterations in smote.dml
3. verbose variable in imputeByFD.dml | [
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "@@ -50,6 +50,7 @@ limitations under the License.\n* [`pnmf`-Function](#pnmf-function)\n* [`scale`-Function](#scale-function)\n* [`sigmoid`-Function](#sigmoid-function)\n+ * [`smote`-Function](#smote-function)\n* [`steplm`-Function](#steplm-function)\n* [`slicefinder`-Function](#slicefinder-function)\n* [`normalize`-Function](#normalize-function)\n@@ -535,14 +536,14 @@ using robust functional dependencies.\n### Usage\n```r\n-imputeByFD(F, sourceAttribute, targetAttribute, threshold)\n+imputeByFD(X, sourceAttribute, targetAttribute, threshold)\n```\n### Arguments\n| Name | Type | Default | Description |\n| :-------- | :------ | -------- | :---------- |\n-| F | String | -- | A data frame |\n+| X | Matrix[Double] | -- | Matrix of feature vectors (recoded matrix for non-numeric values) |\n| source | Integer | -- | Source attribute to use for imputation and error correction |\n| target | Integer | -- | Attribute to be fixed |\n| threshold | Double | -- | threshold value in interval [0, 1] for robust FDs |\n@@ -551,8 +552,14 @@ imputeByFD(F, sourceAttribute, targetAttribute, threshold)\n| Type | Description |\n| :----- | :---------- |\n-| String | Frame with possible imputations |\n+| Matrix[Double] | Matrix with possible imputations |\n+### Example\n+\n+```r\n+X = matrix(\"1 1 1 2 4 5 5 3 3 NaN 4 5 4 1\", rows=7, cols=2)\n+imputeByFD(X = X, source = 1, target = 2, threshold = 0.6, verbose = FALSE)\n+```\n## `KMeans`-Function\n@@ -777,25 +784,24 @@ mice(F, cMask, iter, complete, verbose)\n| Name | Type | Default | Description |\n| :------- | :------------- | -------- | :---------- |\n-| F | Frame[String] | required | Data Frame with one-dimensional row matrix with N columns where N>1. |\n+| X | Matrix[Double] | required | Data Matrix (Recoded Matrix for categorical features), ncol(X) > 1|\n| cMask | Matrix[Double] | required | 0/1 row vector for identifying numeric (0) and categorical features (1) with one-dimensional row matrix with column = ncol(F). |\n| iter | Integer | `3` | Number of iteration for multiple imputations. |\n-| complete | Integer | `3` | A complete dataset generated though a specific iteration. |\n| verbose | Boolean | `FALSE` | Boolean value. |\n### Returns\n| Type | Description |\n| :------------- | :---------- |\n-| Frame[String] | imputed dataset. |\n-| Frame[String] | A complete dataset generated though a specific iteration. |\n+| Matrix[Double] | imputed dataset. |\n+\n### Example\n```r\n-F = as.frame(matrix(\"4 3 2 8 7 8 5\", rows=1, cols=7))\n+F = matrix(\"4 3 NaN 8 7 8 5 NaN 6\", rows=3, cols=3)\ncMask = round(rand(rows=1,cols=ncol(F),min=0,max=1))\n-[dataset, singleSet] = mice(F, cMask, iter = 3, complete = 3, verbose = FALSE)\n+dataset = mice(F, cMask, iter = 3, verbose = FALSE)\n```\n## `multiLogReg`-Function\n@@ -936,7 +942,43 @@ sigmoid(X)\nX = rand (rows = 20, cols = 10)\nY = sigmoid(X)\n```\n+## `smote`-Function\n+\n+The `smote`-function (Synthetic Minority Oversampling Technique) implements a classical techniques for handling class imbalance.\n+The built-in takes the samples from minority class and over-sample them by generating the synthesized samples.\n+The built-in accepts two parameters s and k. The parameter s define the number of synthesized samples to be generated\n+ i.e., over-sample the minority class by s time, where s is the multiple of 100. For given 40 samples of minority class and\n+ s = 200 the smote will generate the 80 synthesized samples to over-sample the class by 200 percent. The parameter k is used to generate the\n+ k nearest neighbours for each minority class sample and then the neighbours are chosen randomly in synthesis process.\n+\n+### Usage\n+\n+```r\n+smote(X, s, k, verbose);\n+```\n+\n+### Arguments\n+\n+| Name | Type | Default | Description |\n+| :------ | :------------- | -------- | :---------- |\n+| X | Matrix[Double] | required | Matrix of feature vectors of minority class samples |\n+| s | Integer | 200 | Amount of SMOTE (percentage of oversampling), integral multiple of 100 |\n+| k | Integer | `1` | Number of nearest neighbour\n+| verbose | Boolean | `TRUE` | If `TRUE` print messages are activated |\n+\n+### Returns\n+\n+| Type | Description |\n+| :------------- | :---------- |\n+| Matrix[Double] | Matrix of (N/100) * X synthetic minority class samples\n+\n+\n+### Example\n+```r\n+X = rand (rows = 50, cols = 10)\n+B = smote(X = X, s=200, k=3, verbose=TRUE);\n+```\n## `steplm`-Function\nThe `steplm`-function (stepwise linear regression) implements a classical forward feature selection method.\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/imputeByFD.dml",
"new_path": "scripts/builtin/imputeByFD.dml",
"diff": "# X Double --- Matrix with possible imputations\n-m_imputeByFD = function(Matrix[Double] X, Integer sourceAttribute, Integer targetAttribute, Double threshold)\n+m_imputeByFD = function(Matrix[Double] X, Integer sourceAttribute, Integer targetAttribute, Double threshold, Boolean verbose = FALSE)\nreturn(Matrix[Double] X)\n{\n# sanity checks\n@@ -51,6 +51,9 @@ m_imputeByFD = function(Matrix[Double] X, Integer sourceAttribute, Integer targe\n# impute missing values and fix errors\nX[,targetAttribute] = imputeAndCorrect(X[,sourceAttribute], X[,targetAttribute], threshold)\n+\n+ if(verbose)\n+ print(\"output \\n\"+toString(X))\n}\nimputeAndCorrect = function(Matrix[Double] X, Matrix[Double] Y, Double threshold)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/mice.dml",
"new_path": "scripts/builtin/mice.dml",
"diff": "# ---------------------------------------------------------------------------------------------\n# NAME TYPE DEFAULT MEANING\n# ---------------------------------------------------------------------------------------------\n-# X String --- Data Matrix (Recoded Matrix for categorical features)\n+# X Double --- Data Matrix (Recoded Matrix for categorical features)\n# cMask Double --- A 0/1 row vector for identifying numeric (0) and categorical features (1)\n# iter Integer 3 Number of iteration for multiple imputations\n# ---------------------------------------------------------------------------------------------\nm_mice= function(Matrix[Double] X, Matrix[Double] cMask, Integer iter = 3, Boolean verbose = FALSE)\nreturn(Matrix[Double] output)\n{\n+ if(ncol(X) < 2)\n+ stop(\"MICE can not be applied on single vectors.\n+ expected number of columns > 1 found: \"+ncol(X))\n+\nlastIndex = ncol(X);\nsumMax = sum(cMask);\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/smote.dml",
"new_path": "scripts/builtin/smote.dml",
"diff": "@@ -56,7 +56,7 @@ return (Matrix[Double] Y) {\n}\n# number of synthetic samples from each minority class sample\n- iter = (s/100)-1\n+ iter = (s/100)\n# matrix to store synthetic samples\nsynthetic_samples = matrix(0, 0, ncol(X))\nwhile(iter > 0)\n@@ -79,6 +79,8 @@ return (Matrix[Double] Y) {\n}\nY = synthetic_samples\n+ if(verbose)\n+ print(nrow(Y)+ \" synthesized samples generated.\")\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"new_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"diff": "@@ -31,7 +31,7 @@ for(i in 1: ncol(F)) {\njspecR = \"{ids:true, recode:[\"+s+\"]}\";\n[X, M] = transformencode(target=F, spec=jspecR);\n# call the method\n-Y = imputeByFD(X, $2, $3, $4);\n+Y = imputeByFD(X, $2, $3, $4, FALSE);\n# getting the actual data back\ndF = transformdecode(target=Y, spec=jspecR, meta=M);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/smote.dml",
"new_path": "src/test/scripts/functions/builtin/smote.dml",
"diff": "A = read($X);\n-B = smote(X = A, s=$S, k=$K);\n+B = smote(X = A, s=$S, k=$K, verbose=TRUE);\n# test if all point fall in same cluster (closed to each other)\n# read some new data T != A\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Updates in built-in docs
This commit updates the doc file for new builtin "smote "
The commit also introduces,
1. A sanity check in mice.dml
2. minor fix in calculation of iterations in smote.dml
3. verbose variable in imputeByFD.dml |
49,738 | 02.09.2020 20:26:04 | -7,200 | 4a604b66e6444902ef903866c6bbb9f15c3d666d | Fix parfor deep copy handling on eval, missing tests
This patch fixes the currently failing test on pipeline enumeration and
adds the two new packages 'pipelines', and 'privacy' to the github
workflows. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/functionsTests.yml",
"new_path": ".github/workflows/functionsTests.yml",
"diff": "@@ -68,6 +68,8 @@ jobs:\nnary,\nparamserv,\nparfor,\n+ pipelines,\n+ privacy,\nquaternary,\nrecompile,\nreorg,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/util/ProgramConverter.java",
"new_path": "src/main/java/org/apache/sysds/runtime/util/ProgramConverter.java",
"diff": "@@ -44,6 +44,7 @@ import org.apache.sysds.parser.FunctionStatement;\nimport org.apache.sysds.parser.FunctionStatementBlock;\nimport org.apache.sysds.parser.IfStatement;\nimport org.apache.sysds.parser.IfStatementBlock;\n+import org.apache.sysds.parser.ParForStatement;\nimport org.apache.sysds.parser.ParForStatementBlock;\nimport org.apache.sysds.parser.ParForStatementBlock.ResultVar;\nimport org.apache.sysds.parser.StatementBlock;\n@@ -543,7 +544,8 @@ public class ProgramConverter\nForStatementBlock orig = (ForStatementBlock) sb;\nForStatementBlock fsb = createForStatementBlockCopy(orig, true);\nForStatement origstmt = (ForStatement) orig.getStatement(0);\n- ForStatement fstmt = new ForStatement(); //only shallow\n+ ForStatement fstmt = (origstmt instanceof ParForStatement) ?\n+ new ParForStatement() : new ForStatement(); //only shallow\nfstmt.setPredicate(origstmt.getIterablePredicate());\nfsb.setStatements(CollectionUtils.asArrayList(fstmt));\nfor( StatementBlock c : origstmt.getBody() )\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2655] Fix parfor deep copy handling on eval, missing tests
This patch fixes the currently failing test on pipeline enumeration and
adds the two new packages 'pipelines', and 'privacy' to the github
workflows. |
49,738 | 02.09.2020 22:36:09 | -7,200 | a0b195eebe8d703875b9e2cb6016041ef959ed19 | Fix robustness spark transform encode (empty partitions)
This patch fixes an edge cases of spark transform encode (specifically
recode and dummy code) when spark partitions are completely empty. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/MultiReturnParameterizedBuiltinSPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/spark/MultiReturnParameterizedBuiltinSPInstruction.java",
"diff": "@@ -254,9 +254,11 @@ public class MultiReturnParameterizedBuiltinSPInstruction extends ComputationSPI\nthrows Exception\n{\n//build meta data (e.g., recode maps)\n- if( _raEncoder != null )\n+ if( _raEncoder != null ) {\n+ _raEncoder.prepareBuildPartial();\nwhile( iter.hasNext() )\n_raEncoder.buildPartial(iter.next()._2());\n+ }\n//output recode maps as columnID - token pairs\nArrayList<Tuple2<Integer,Object>> ret = new ArrayList<>();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderRecode.java",
"new_path": "src/main/java/org/apache/sysds/runtime/transform/encode/EncoderRecode.java",
"diff": "@@ -122,13 +122,15 @@ public class EncoderRecode extends Encoder\nmap.put(key, Long.valueOf(map.size()+1));\n}\n- public void buildPartial(FrameBlock in) {\n- if( !isApplicable() )\n- return;\n-\n+ public void prepareBuildPartial() {\n//ensure allocated partial recode map\nif( _rcdMapsPart == null )\n_rcdMapsPart = new HashMap<>();\n+ }\n+\n+ public void buildPartial(FrameBlock in) {\n+ if( !isApplicable() )\n+ return;\n//construct partial recode map (tokens w/o codes)\n//iterate over columns for sequential access\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinMiceTest.java",
"diff": "@@ -51,42 +51,34 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nrunMiceNominalTest(mask, 1, false, LopProperties.ExecType.CP);\n}\n-// @Test\n-// public void testMiceMixSpark() {\n-// double[][] mask = {{ 0.0, 0.0, 1.0, 1.0, 0.0}};\n-// runMiceNominalTest(mask, 1, LopProperties.ExecType.SPARK);\n-// }\n-\n@Test\npublic void testMiceNumberCP() {\ndouble[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\nrunMiceNominalTest(mask, 2, false, LopProperties.ExecType.CP);\n}\n-// @Test\n-// public void testMiceNumberSpark() {\n-// double[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\n-// runMiceNominalTest(mask, 2, LopProperties.ExecType.SPARK);\n-// }\n-\n@Test\npublic void testMiceCategoricalCP() {\ndouble[][] mask = {{ 1.0, 1.0, 1.0, 1.0, 1.0}};\nrunMiceNominalTest(mask, 3, false, LopProperties.ExecType.CP);\n}\n-// @Test\n-// public void testMiceCategoricalSpark() {\n-// double[][] mask = {{ 1.0, 1.0, 1.0, 1.0, 1.0}};\n-// runMiceNominalTest(mask, 3, LopProperties.ExecType.SPARK);\n-// }\n-\n@Test\npublic void testMiceMixLineageReuseCP() {\ndouble[][] mask = {{ 0.0, 0.0, 1.0, 1.0, 0.0}};\nrunMiceNominalTest(mask, 1, true, LopProperties.ExecType.CP);\n}\n+ //added a single, relatively-fast spark test, others seem infeasible\n+ //as forcing every operation to spark takes too long for complex,\n+ //composite builtins like mice.\n+\n+ @Test\n+ public void testMiceNumberSpark() {\n+ double[][] mask = {{ 0.0, 0.0, 0.0, 0.0, 0.0}};\n+ runMiceNominalTest(mask, 2, false, LopProperties.ExecType.SPARK);\n+ }\n+\nprivate void runMiceNominalTest(double[][] mask, int testType, boolean lineage, LopProperties.ExecType instType) {\nTypes.ExecMode platformOld = setExecMode(instType);\ntry {\n@@ -96,8 +88,8 @@ public class BuiltinMiceTest extends AutomatedTestBase {\nprogramArgs = new String[]{\"-nvargs\", \"X=\" + DATASET, \"Mask=\"+input(\"M\"),\n\"iteration=\" + iter, \"dataN=\" + output(\"N\"), \"dataC=\" + output(\"C\")};\nif (lineage) {\n- String[] lin = new String[] {\"-stats\",\"-lineage\", ReuseCacheType.REUSE_HYBRID.name().toLowerCase()};\n- programArgs = (String[]) ArrayUtils.addAll(programArgs, lin);\n+ programArgs = (String[]) ArrayUtils.addAll(programArgs, new String[] {\n+ \"-stats\",\"-lineage\", ReuseCacheType.REUSE_HYBRID.name().toLowerCase()});\n}\nwriteInputMatrixWithMTD(\"M\", mask, true);\n@@ -125,18 +117,16 @@ public class BuiltinMiceTest extends AutomatedTestBase {\n}\n}\n- private void testNumericOutput()\n- {\n+ private void testNumericOutput() {\n//compare matrices\nHashMap<MatrixValue.CellIndex, Double> dmlfileN = readDMLMatrixFromHDFS(\"N\");\nHashMap<MatrixValue.CellIndex, Double> rfileN = readRMatrixFromFS(\"N\");\n// compare numerical imputations\nTestUtils.compareMatrices(dmlfileN, rfileN, eps, \"Stat-DML\", \"Stat-R\");\n-\n}\n- private void testCategoricalOutput()\n- {\n+\n+ private void testCategoricalOutput() {\nHashMap<MatrixValue.CellIndex, Double> dmlfileC = readDMLMatrixFromHDFS(\"C\");\nHashMap<MatrixValue.CellIndex, Double> rfileC = readRMatrixFromFS(\"C\");\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2656] Fix robustness spark transform encode (empty partitions)
This patch fixes an edge cases of spark transform encode (specifically
recode and dummy code) when spark partitions are completely empty. |
49,720 | 03.09.2020 21:51:40 | -7,200 | f7cd343af4fcc4b0e1d9e490c409e8cbdf0d0058 | [MINOR] Test docker file update
changes to resolve issues with R packages. | [
{
"change_type": "MODIFY",
"old_path": "docker/testsysds.Dockerfile",
"new_path": "docker/testsysds.Dockerfile",
"diff": "@@ -39,7 +39,10 @@ RUN wget http://archive.apache.org/dist/maven/maven-3/$MAVEN_VERSION/binaries/ap\n# Install Extras\nRUN apt-get update -qq && \\\napt-get upgrade -y && \\\n- apt-get install openjdk-8-jdk-headless -y\n+ apt-get install openjdk-8-jdk-headless -y && \\\n+ apt-get install libcurl4-openssl-dev -y && \\\n+ apt-get install libxml2-dev -y && \\\n+ apt-get install r-cran-xml -y\nCOPY ./src/test/scripts/installDependencies.R installDependencies.R\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Test docker file update
changes to resolve issues with R packages. |
49,738 | 03.09.2020 22:12:30 | -7,200 | 92884d6b8f11451d4ffc018720ee0a24e8296fb3 | New slice finding algorithm (dml implementation)
This patch adds the new slice finding implementation via linear algebra
(SliceLine). Even without full pruning, this implementation performs
exact slice finding for the Salaries dataset in 2.6s (incl compilation
and startup). | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/slicing/slicing.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#-------------------------------------------------------------\n+# X Input matrix (integer encoded [1..v])\n+# e error vector (classification accuracy, l2 norm, etc)\n+# k top-K subsets / slices\n+# minSup minimum support (min number of rows per slice)\n+# w weight [0,1]: 0 only size, 1 only error\n+# ------------------------------------------------------------\n+# TK top-k slices (k x ncol(X) if successful)\n+# TKC score, size, error of slices (k x 3)\n+# ------------------------------------------------------------\n+\n+slicing = function(Matrix[Double] X, Matrix[Double] e, Integer k = 4, Integer minSup = 4, Double w = 0.5, Boolean verbose = FALSE)\n+ return(Matrix[Double] TK, Matrix[Double] TKC)\n+{\n+ m = nrow(X);\n+ n = ncol(X);\n+\n+ # prepare offset vectors and one-hot encoded X\n+ fdom = colMaxs(X);\n+ foffb = t(cumsum(t(fdom))) - fdom;\n+ foffe = t(cumsum(t(fdom)))\n+ rix = matrix(seq(1,m)%*%matrix(1,1,n), m*n, 1)\n+ cix = matrix(X + foffb, m*n, 1);\n+ X2 = table(rix, cix); #one-hot encoded\n+\n+ # initialize interesting statistics\n+ n2 = ncol(X2); # one-hot encoded features\n+ eAvg = sum(e) / m; # average error\n+ CID = seq(1,n2); # column IDs\n+ cCnts = t(colSums(X2)); # column counts\n+ err = t(t(e) %*% X2) # total error vector\n+\n+ if( verbose ) {\n+ drop = as.integer(sum(cCnts < minSup));\n+ print(\"SliceFinder: dropping \"+drop+\"/\"+n2+\" features below minSup = \"+minSup+\".\");\n+ }\n+\n+ # working set of active slices (#attr x #slices) and top k\n+ selCols = (cCnts >= minSup);\n+ attr = removeEmpty(target=CID, margin=\"rows\", select=selCols);\n+ cCnts = removeEmpty(target=cCnts, margin=\"rows\", select=selCols);\n+ err = removeEmpty(target=err, margin=\"rows\", select=selCols);\n+ S = table(seq(1,nrow(attr)), attr, nrow(attr), n2);\n+ continue = ncol(S) > 0 & sum(S) > 0;\n+ level = 1;\n+\n+ # score 1-slices and create initial top-k\n+ R = scoreSlices(cCnts, err, m, eAvg, w);\n+\n+ [TK, TKC] = maintainTopK(S, R, matrix(0, 0, n2), matrix(0, 0, 3), k, minSup);\n+\n+ if( verbose ) {\n+ [maxsc, minsc] = analyzeTopK(TKC);\n+ print(\"SliceFinder: initial top-K: count=\"+nrow(TK)+\", max=\"+maxsc+\", min=\"+minsc)\n+ }\n+\n+ # lattice enumeration w/ size/error pruning, one iteration per level\n+ while( continue ) {\n+ level = level + 1;\n+\n+ # enumerate candidate join pairs, incl size/error pruning\n+ enumC = getPairedCandidates(S, R, TK, TKC, k, level, minSup, foffb, foffe);\n+\n+ if(verbose) {\n+ print(\"SliceFinder: level \"+level+\":\")\n+ print(\" -- generated paired slice candidates: \"+nrow(S)+\" -> \"+nrow(enumC));\n+ }\n+\n+ # extract and evaluate candidate slices\n+ # note: this could be done as a single matrix multiply, but to avoid\n+ # large intermediates we use repeated matrix-vector multiplication\n+ R = matrix(0, nrow(enumC), 3)\n+ parfor( i in 1:nrow(enumC) )\n+ R[i,] = evalSlice(X2, e, t(enumC[i,]), level, w);\n+\n+ # maintain top-k after evaluation\n+ [TK, TKC] = maintainTopK(S, R, TK, TKC, k, minSup);\n+\n+ # prune slices after evaluation and new top-K\n+ # TODO evaluate if useful -> more pruning if at least 1 below threhsold?\n+ [S, R] = pruneSlices(enumC, R, TK, TKC, k, minSup);\n+ #S = enumC;\n+\n+ if(verbose) {\n+ [maxsc, minsc] = analyzeTopK(TKC);\n+ print(\" -- after eval and pruning: \"+nrow(S));\n+ print(\" -- top-K: count=\"+nrow(TK)+\", max=\"+maxsc+\", min=\"+minsc);\n+ }\n+\n+ # termination condition (max #feature levels)\n+ continue = ncol(S) > 0 & sum(S) > 0 & level < n;\n+ }\n+\n+ if( verbose ) {\n+ print(sum(TK));\n+ print(\"SliceFinder: terminated at level \"+level+\":\\n\"\n+ + toString(TK) + \"\\n\" + toString(TKC));\n+ }\n+}\n+\n+maintainTopK = function(Matrix[Double] S, Matrix[Double] R, Matrix[Double] TK, Matrix[Double] TKC, Integer k, Integer minSup)\n+ return(Matrix[Double] TK, Matrix[Double] TKC)\n+{\n+ # prune invalid minSup and scores\n+ I = (R[,1] > 1) & (R[,3] >= minSup);\n+\n+ if( sum(I)!=0 ) {\n+ S = removeEmpty(target=S, margin=\"rows\", select=I);\n+ R = removeEmpty(target=R, margin=\"rows\", select=I);\n+\n+ # evaluated candidated and previous top-k\n+ slices = rbind(TK, S);\n+ scores = rbind(TKC, R);\n+\n+ # extract top-k\n+ IX = order(target=scores, by=1, decreasing=TRUE, index.return=TRUE);\n+ IX = IX[1:min(k,nrow(IX)),];\n+ P = table(seq(1,nrow(IX)), IX, nrow(IX), nrow(slices));\n+ TK = P %*% slices;\n+ TKC = P %*% scores;\n+ }\n+}\n+\n+analyzeTopK = function(Matrix[Double] TKC) return(Double maxsc, Double minsc) {\n+ maxsc = ifelse(nrow(TKC)>0, as.scalar(TKC[1,1]), NaN);\n+ minsc = ifelse(nrow(TKC)>0, as.scalar(TKC[nrow(TKC),1]), NaN);\n+}\n+\n+\n+scoreSlices = function(Matrix[Double] ss, Matrix[Double] se, Integer m, Double eAvg, Double w)\n+ return(Matrix[Double] C)\n+{\n+ sc = w * (se/ss / eAvg) + (1-w) * ss/m;\n+ C = cbind(sc, se, ss);\n+}\n+\n+evalSlice = function(Matrix[Double] X, Matrix[Double] e, Matrix[Double] s, Integer l, Double w = 0.5)\n+ return(Matrix[Double] C)\n+{\n+ I = (X %*% s) == l; # slice indicator\n+ ss = sum(I); # absolute slice size (nnz)\n+ se = as.scalar(t(I) %*% e); # absolute slice error\n+\n+ # score of relative error and relative size\n+ sc = w * (se/ss / sum(e)/nrow(X)) + (1-w) * ss/nrow(X);\n+ C = t(as.matrix(list(sc, se, ss)));\n+}\n+\n+getPairedCandidates = function(Matrix[Double] S, Matrix[Double] R, Matrix[Double] TK, Matrix[Double] TKC, Integer k, Integer level, Integer minSup, Matrix[Double] foffb, Matrix[Double] foffe)\n+ return(Matrix[Double] P)\n+{\n+while(FALSE){}\n+ # join compatible slices (without self)\n+ join = S %*% t(S) == (level-2)\n+\n+ # pruning by size (at least one below threshold)\n+ vsize = outer(R[,3], t(R[,3]), \"min\") >= minSup;\n+ I = join * vsize;\n+ I = upper.tri(target=I, diag=FALSE);\n+\n+ # pair construction\n+ nr = nrow(I); nc = ncol(I);\n+ rix = matrix(I * seq(1,nr), nr*nc, 1);\n+ cix = matrix(I * t(seq(1,nc)), nr*nc, 1);\n+ rix = removeEmpty(target=rix, margin=\"rows\");\n+ cix = removeEmpty(target=cix, margin=\"rows\");\n+ P1 = table(seq(1,nrow(rix)), rix, nrow(rix), nrow(S));\n+ P2 = table(seq(1,nrow(cix)), cix, nrow(rix), nrow(S));\n+ P = ((P1 %*% S) + (P2 %*% S)) != 0;\n+\n+ # prune invalid self joins (>1 bit per feature)\n+ I = matrix(1, nrow(P), 1);\n+ for( j in 1:ncol(foffb) ) {\n+ beg = as.scalar(foffb[1,j])+1;\n+ end = as.scalar(foffe[1,j]);\n+ I = I & (rowSums(P[,beg:end]) <= 1);\n+ }\n+ P = removeEmpty(target=P, margin=\"rows\", select=I);\n+\n+ # deduplication\n+ # TODO additional size pruning given dedup mapping\n+ ID = matrix(1, nrow(P), 1);\n+ dom = foffe-foffb;\n+ for( j in 1:ncol(dom) ) {\n+ beg = as.scalar(foffb[1,j])+1;\n+ end = as.scalar(foffe[1,j]);\n+ I = rowIndexMax(P[,beg:end]);\n+ prod = 1;\n+ if(j<ncol(dom))\n+ prod = prod(dom[1,(j+1):ncol(dom)])\n+ ID = ID + I * prod;\n+ }\n+ Dedup = removeEmpty(target=table(ID,seq(1,nrow(P))), margin=\"rows\") != 0\n+ P = Dedup %*% P\n+}\n+\n+pruneSlices = function(Matrix[Double] S, Matrix[Double] R, Matrix[Double] TK, Matrix[Double] TKC, Integer k, Integer minSup)\n+ return(Matrix[Double] S, Matrix[Double] R)\n+{\n+ I = R[,3] >= minSup;\n+ S = removeEmpty(target=S, margin=\"rows\", select=I);\n+ R = removeEmpty(target=R, margin=\"rows\", select=I);\n+}\n+\n+Forig = read(\"./Salaries.csv\", data_type=\"frame\", format=\"csv\", header=TRUE);\n+\n+F = Forig[,1:ncol(Forig)-1];\n+y = as.matrix(Forig[,ncol(Forig)]);\n+\n+# data preparation\n+jspec= \"{ ids:true, recode:[1,2,3,6], bin:[{id:4, method:equi-width, numbins:14},{id:5, method:equi-width, numbins:12}]}\"\n+[X,M] = transformencode(target=F, spec=jspec);\n+X = X[,2:ncol(X)]\n+\n+# learn model\n+B = lm(X=X, y=y, verbose=FALSE);\n+yhat = X %*% B;\n+e = (y-yhat)^2;\n+\n+# call slice finding\n+[S,C] = slicing(X=X, e=e, k=4, w=0.5, minSup=4, verbose=TRUE);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2641] New slice finding algorithm (dml implementation)
This patch adds the new slice finding implementation via linear algebra
(SliceLine). Even without full pruning, this implementation performs
exact slice finding for the Salaries dataset in 2.6s (incl compilation
and startup). |
49,689 | 31.08.2020 14:12:19 | -7,200 | 3b4b01937b98f7095a9460684fafdb99a9ad5df2 | [MINOR] Update release scripts | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/release/old-release-build.sh",
"diff": "+#!/usr/bin/env bash\n+#-------------------------------------------------------------\n+#\n+# Modifications Copyright 2019 Graz University of Technology\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+function exit_with_usage {\n+ cat << EOF\n+release-build - Creates build distributions from a git commit hash or from HEAD.\n+SYNOPSIS\n+usage: release-build.sh [--release-prepare | --release-publish | --release-snapshot]\n+DESCRIPTION\n+Use maven infrastructure to create a project release package and publish\n+to staging release location (ToDo:release-location)\n+and maven staging release repository.\n+--release-prepare --releaseVersion=\"0.11.0\" --developmentVersion=\"0.11.0-SNAPSHOT\" [--releaseRc=\"rc1\"] [--tag=\"v0.11.0\"] [--gitCommitHash=\"a874b73\"]\n+This form execute maven release:prepare and upload the release candidate distribution\n+to the staging release location.\n+--release-publish --gitCommitHash=\"a874b73\"\n+Publish the maven artifacts of a release to the staging maven repository.\n+--release-snapshot [--gitCommitHash=\"a874b73\"]\n+Publish the maven snapshot artifacts to snapshots maven repository\n+OPTIONS\n+--releaseVersion - Release identifier used when publishing\n+--developmentVersion - Release identifier used for next development cyce\n+--releaseRc - Release RC identifier used when publishing, default 'rc1'\n+--tag - Release Tag identifier used when taging the release, default 'v$releaseVersion'\n+--gitCommitHash - Release tag, branch name or commit to build from, default master HEAD\n+--dryRun - Dry run only, mostly used for testing.\n+A GPG passphrase is expected as an environment variable\n+GPG_PASSPHRASE - Passphrase for GPG key used to sign release\n+EXAMPLES\n+release-build.sh --release-prepare --releaseVersion=\"0.11.0\" --developmentVersion=\"0.12.0-SNAPSHOT\"\n+release-build.sh --release-prepare --releaseVersion=\"0.11.0\" --developmentVersion=\"0.12.0-SNAPSHOT\" --releaseRc=\"rc1\" --tag=\"v0.11.0-rc1\"\n+release-build.sh --release-prepare --releaseVersion=\"0.11.0\" --developmentVersion=\"0.12.0-SNAPSHOT\" --releaseRc=\"rc1\" --tag=\"v0.11.0-rc1\" --gitCommitHash=\"a874b73\" --dryRun\n+# Create 0.12 RC2 builds from branch-0.12\n+./release-build.sh --release-prepare --releaseVersion=\"0.12.0\" --developmentVersion=\"0.12.1-SNAPSHOT\" --releaseRc=\"rc2\" --tag=\"v0.12.0-rc2\" --gitCommitHash=\"branch-0.12\"\n+release-build.sh --release-publish --gitCommitHash=\"a874b73\"\n+release-build.sh --release-publish --gitTag=\"v0.11.0-rc1\"\n+release-build.sh --release-snapshot\n+release-build.sh --release-snapshot --gitCommitHash=\"a874b73\"\n+EOF\n+ exit 1\n+}\n+\n+set -e\n+\n+if [ $# -eq 0 ]; then\n+ exit_with_usage\n+fi\n+\n+\n+# Process each provided argument configuration\n+while [ \"${1+defined}\" ]; do\n+ IFS=\"=\" read -ra PARTS <<< \"$1\"\n+ case \"${PARTS[0]}\" in\n+ --release-prepare)\n+ GOAL=\"release-prepare\"\n+ RELEASE_PREPARE=true\n+ shift\n+ ;;\n+ --release-publish)\n+ GOAL=\"release-publish\"\n+ RELEASE_PUBLISH=true\n+ shift\n+ ;;\n+ --release-snapshot)\n+ GOAL=\"release-snapshot\"\n+ RELEASE_SNAPSHOT=true\n+ shift\n+ ;;\n+ --gitCommitHash)\n+ GIT_REF=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --gitTag)\n+ GIT_TAG=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --releaseVersion)\n+ RELEASE_VERSION=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --developmentVersion)\n+ DEVELOPMENT_VERSION=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --releaseRc)\n+ RELEASE_RC=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --tag)\n+ RELEASE_TAG=\"${PARTS[1]}\"\n+ shift\n+ ;;\n+ --dryRun)\n+ DRY_RUN=\"-DdryRun=true\"\n+ shift\n+ ;;\n+\n+ *help* | -h)\n+ exit_with_usage\n+ exit 0\n+ ;;\n+ -*)\n+ echo \"Error: Unknown option: $1\" >&2\n+ exit 1\n+ ;;\n+ *) # No more options\n+ break\n+ ;;\n+ esac\n+done\n+\n+if [[ -z \"$GPG_PASSPHRASE\" ]]; then\n+ echo 'The environment variable GPG_PASSPHRASE is not set. Enter the passphrase to'\n+ echo 'unlock the GPG signing key that will be used to sign the release!'\n+ echo\n+ stty -echo && printf \"GPG passphrase: \" && read GPG_PASSPHRASE && printf '\\n' && stty echo\n+fi\n+\n+if [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$RELEASE_VERSION\" ]]; then\n+ echo \"ERROR: --releaseVersion must be passed as an argument to run this script\"\n+ exit_with_usage\n+fi\n+\n+if [[ \"$RELEASE_PREPARE\" == \"true\" && -z \"$DEVELOPMENT_VERSION\" ]]; then\n+ echo \"ERROR: --developmentVersion must be passed as an argument to run this script\"\n+ exit_with_usage\n+fi\n+\n+if [[ \"$RELEASE_PUBLISH\" == \"true\" ]]; then\n+ if [[ \"$GIT_REF\" && \"$GIT_TAG\" ]]; then\n+ echo \"ERROR: Only one argumented permitted when publishing : --gitCommitHash or --gitTag\"\n+ exit_with_usage\n+ fi\n+ if [[ -z \"$GIT_REF\" && -z \"$GIT_TAG\" ]]; then\n+ echo \"ERROR: --gitCommitHash OR --gitTag must be passed as an argument to run this script\"\n+ exit_with_usage\n+ fi\n+fi\n+\n+if [[ \"$RELEASE_PUBLISH\" == \"true\" && \"$DRY_RUN\" ]]; then\n+ echo \"ERROR: --dryRun not supported for --release-publish\"\n+ exit_with_usage\n+fi\n+\n+if [[ \"$RELEASE_SNAPSHOT\" == \"true\" && \"$DRY_RUN\" ]]; then\n+ echo \"ERROR: --dryRun not supported for --release-publish\"\n+ exit_with_usage\n+fi\n+\n+# Commit ref to checkout when building\n+GIT_REF=${GIT_REF:-master}\n+if [[ \"$RELEASE_PUBLISH\" == \"true\" && \"$GIT_TAG\" ]]; then\n+ GIT_REF=\"tags/$GIT_TAG\"\n+fi\n+\n+#(pwd) doesn't work for me due to having a space in the path\n+#BASE_DIR=$(pwd)\n+BASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\n+RELEASE_WORK_DIR=$BASE_DIR/target/release2\n+\n+MVN=\"mvn\"\n+PUBLISH_PROFILES=\"-Pdistribution,rat\"\n+\n+if [ -z \"$RELEASE_RC\" ]; then\n+ RELEASE_RC=\"rc1\"\n+fi\n+\n+if [ -z \"$RELEASE_TAG\" ]; then\n+ RELEASE_TAG=\"v$RELEASE_VERSION-$RELEASE_RC\"\n+fi\n+\n+#ToDo: release staging location\n+RELEASE_STAGING_LOCATION=\"${SYSTEMDS_ROOT}/temp\"\n+\n+\n+echo \" \"\n+echo \"-------------------------------------------------------------\"\n+echo \"------- Release preparation with the following parameters ---\"\n+echo \"-------------------------------------------------------------\"\n+echo \"Executing ==> $GOAL\"\n+echo \"Git reference ==> $GIT_REF\"\n+echo \"release version ==> $RELEASE_VERSION\"\n+echo \"development version ==> $DEVELOPMENT_VERSION\"\n+echo \"rc ==> $RELEASE_RC\"\n+echo \"tag ==> $RELEASE_TAG\"\n+if [ \"$DRY_RUN\" ]; then\n+ echo \"dry run ? ==> true\"\n+fi\n+echo \" \"\n+echo \"Deploying to :\"\n+echo $RELEASE_STAGING_LOCATION\n+echo \" \"\n+\n+function checkout_code {\n+ # Checkout code\n+ eval rm -rf $RELEASE_WORK_DIR\n+ eval mkdir -p $RELEASE_WORK_DIR\n+ eval cd $RELEASE_WORK_DIR\n+ git clone https://github.com/apache/systemds.git\n+ cd systemds\n+ git checkout $GIT_REF\n+ git_hash=`git rev-parse --short HEAD`\n+ echo \"Checked out SystemDS git hash $git_hash\"\n+\n+ git clean -d -f -x\n+ #rm .gitignore\n+ #rm -rf .git\n+\n+ eval cd \"$BASE_DIR\" #return to base dir\n+}\n+\n+if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\n+ echo \"Preparing release $RELEASE_VERSION\"\n+ # Checkout code\n+ checkout_code\n+ eval cd $RELEASE_WORK_DIR/systemds\n+ #cd /c/virtual\\ D/SystemDS/systemds/target/release/systemds\n+\n+ # Build and prepare the release\n+ # release:prepare only works on SNAPSHOT. Below command produces\n+ # artifacts with -SNAPSHOT, and also commits the pom changes to the repo\n+ $MVN $PUBLISH_PROFILES release:clean release:prepare $DRY_RUN \\\n+ -Darguments=\"-Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\" \\\n+ -DskipTests\" \\\n+ -DreleaseVersion=\"$RELEASE_VERSION\" -DdevelopmentVersion=\"$DEVELOPMENT_VERSION\" -Dtag=\"$RELEASE_TAG\"\n+\n+ # exit at this point to run followiing steps manually.\n+ echo \"WARNING: Set followinig enviornment variables and run rest of the steps for 'Release Prepare' \"\n+ echo\n+ echo \"MVN=$MVN\"\n+ echo \"PUBLISH_PROFILES=\\\"$PUBLISH_PROFILES\\\"\"\n+ echo \"DRY_RUN=$DRY_RUN\"\n+ echo \"GPG_PASSPHRASE=$GPG_PASSPHRASE\"\n+ echo \"RELEASE_VERSION=$RELEASE_VERSION\"\n+ echo \"RELEASE_RC=$RELEASE_RC\"\n+ echo \"DEVELOPMENT_VERSION=$DEVELOPMENT_VERSION\"\n+ echo \"RELEASE_TAG=$RELEASE_TAG\"\n+ echo \"RELEASE_WORK_DIR=$RELEASE_WORK_DIR\"\n+ echo \"RELEASE_STAGING_LOCATION=$RELEASE_STAGING_LOCATION\"\n+ echo \"BASE_DIR=$BASE_DIR\"\n+\n+ # As fix has been added below to update version information exit to update pom file is not needed.\n+ # exit 5\n+\n+ # Update dev/release/target/release/systemds/pom.xml with similar to following contents which is for 0.13.0 RC1\n+ # Update <version>0.13.0</version>\n+ # Update <tag>v0.13.0-rc1</tag>\n+ #sed -i .bak \"s|<version>$DEVELOPMENT_VERSION<\\/version>|<version>$RELEASE_VERSION<\\/version>|\" $BASE_DIR/target/release/systemds/pom.xml\n+ #sed -i .bak \"s|<tag>HEAD<\\/tag>|<tag>$RELEASE_TAG<\\/tag>|\" $BASE_DIR/target/release/systemds/pom.xml\n+\n+ # Remove SNAPSHOT from the version in pom\n+ sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" /c/virtual\\ D/SystemDS/systemds/target/release2/systemds/pom.xml\n+ eval cd $RELEASE_WORK_DIR/systemds\n+ ## Rerunning mvn with clean and package goals, as release:prepare changes ordeer for some dependencies like unpack and shade.\n+ $MVN $PUBLISH_PROFILES clean package $DRY_RUN \\\n+ -Darguments=\"-Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\" \\\n+ -DskipTests\" \\\n+ -DreleaseVersion=\"$RELEASE_VERSION\" -DdevelopmentVersion=\"$DEVELOPMENT_VERSION\" -Dtag=\"$RELEASE_TAG\" \\\n+\n+ # Pull the latest code (with committed pom changes) and deploy to the local target directory\n+ checkout_code\n+ # Remove SNAPSHOT from the version in pom\n+ sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" /c/virtual\\ D/SystemDS/systemds/target/release2/systemds/pom.xml\n+ eval cd $RELEASE_WORK_DIR/systemds\n+ GPG_OPTS=\"-Dgpg.keyname=$GPG_KEYID -Dgpg.passphrase=$GPG_PASSPHRASE\"\n+ # Deploy to /target folder for the next job to pick the artifacts up for there\n+ CMD=\"$MVN $PUBLISH_PROFILES deploy \\\n+ -DskiptTests \\\n+ -DaltDeploymentRepository=altDepRepo::default::file:./target \\\n+ ${GPG_OPTS}\"\n+\n+ echo \"Executing: \" \"$CMD\"\n+ $CMD\n+\n+ eval cd $RELEASE_WORK_DIR\n+ # To upload the artifacts to the svn-staging (dist.apache.org), call\n+ # the second script, svn_dev_upload.sh.\n+\n+# ToDo: release staging location\n+# if [ -z \"$DRY_RUN\" ]; then\n+# RELEASE_STAGING_LOCATION=\"https://dist.apache.org/repos/dist/dev/systemds/\"\n+# svn co $RELEASE_STAGING_LOCATION svn-release-staging\n+# mkdir -p svn-release-staging/$RELEASE_VERSION-$RELEASE_RC\n+# cp $RELEASE_WORK_DIR/systemml/target/systemml-*-bin.* svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+# cp $RELEASE_WORK_DIR/systemml/target/systemml-*-src.* svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+#\n+# cd svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+# rm -f *.asc\n+# for i in *.zip *.tgz *.tar.gz; do gpg --output $i.asc --detach-sig --armor $i; done\n+# rm -f *.sha512\n+# for i in *.zip *.tgz *.tar.gz; do shasum -a 512 $i > $i.sha512; done\n+#\n+# cd .. #exit $RELEASE_VERSION-$RELEASE_RC/\n+#\n+# svn add $RELEASE_VERSION-$RELEASE_RC/\n+# svn ci -m\"Apache SystemML $RELEASE_VERSION-$RELEASE_RC\"\n+# fi\n+\n+\n+ eval cd \"$BASE_DIR\" #exit target\n+\n+ exit 0\n+fi\n+\n+#ToDo: fix release deployment\n+if [[ \"$RELEASE_PUBLISH\" == \"true\" ]]; then\n+ echo \"Preparing release $RELEASE_VERSION\"\n+ # Checkout code\n+ checkout_code\n+ cd $RELEASE_WORK_DIR/systemds\n+\n+ #Deploy scala 2.10\n+# mvn -DaltDeploymentRepository=apache.releases.https::default::https://repository.apache.org/service/local/staging/deploy/maven2 clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES\n+\n+ mvn -DaltDeploymentRepository=$SYSTEMDS_ROOT/temp clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES\n+\n+ cd \"$BASE_DIR\" #exit target\n+\n+ exit 0\n+fi\n+\n+#ToDo: fix snapshot deployment\n+#if [[ \"$RELEASE_SNAPSHOT\" == \"true\" ]]; then\n+# # Checkout code\n+# checkout_code\n+# cd $RELEASE_WORK_DIR/systemds\n+#\n+# CURRENT_VERSION=$($MVN help:evaluate -Dexpression=project.version \\\n+# | grep -v INFO | grep -v WARNING | grep -v Download)\n+#\n+# # Publish Bahir Snapshots to Maven snapshot repo\n+# echo \"Deploying SystemDS SNAPSHOT at '$GIT_REF' ($git_hash)\"\n+# echo \"Publish version is $CURRENT_VERSION\"\n+# if [[ ! $CURRENT_VERSION == *\"SNAPSHOT\"* ]]; then\n+# echo \"ERROR: Snapshots must have a version containing SNAPSHOT\"\n+# echo \"ERROR: You gave version '$CURRENT_VERSION'\"\n+# exit 1\n+# fi\n+#\n+# #Deploy scala 2.10\n+# $MVN -DaltDeploymentRepository=apache.snapshots.https::default::https://repository.apache.org/content/repositories/snapshots clean package gpg:sign install:install deploy:deploy -DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES\n+#\n+# cd \"$BASE_DIR\" #exit target\n+# exit 0\n+#fi\n+\n+\n+cd \"$BASE_DIR\" #return to base dir\n+echo \"ERROR: wrong execution goals\"\n+exit_with_usage\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/release/publish.sh",
"diff": "+#!/usr/bin/env bash\n+MVN=mvn\n+PUBLISH_PROFILES=\"-Pdistribution,rat\"\n+DRY_RUN=-DdryRun=true\n+GPG_PASSPHRASE=$1\n+RELEASE_TAG=v2.0\n+RELEASE_STAGING_LOCATION=\"/c/virtual\\ D/SystemDS/systemds/temp\"\n+BASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\n+RELEASE_WORK_DIR=\"/c/virtual\\ D/SystemDS/systemds/target/release2\"\n+RELEASE_VERSION=2.0\n+RELEASE_RC=rc1\n+GIT_REF=-master\n+export GNUPGHOME=\"../../.gnupg_copy/\" #relative path\n+\n+function checkout_code {\n+ # Checkout code\n+ eval rm -rf $RELEASE_WORK_DIR\n+ eval mkdir -p $RELEASE_WORK_DIR\n+ eval cd $RELEASE_WORK_DIR\n+ git clone https://github.com/apache/systemds.git\n+ cd systemds\n+ git checkout $GIT_REF\n+ git_hash=`git rev-parse --short HEAD`\n+ echo \"Checked out SystemDS git hash $git_hash\"\n+\n+ git clean -d -f -x\n+ #rm .gitignore\n+ #rm -rf .git\n+\n+ eval cd \"$BASE_DIR\" #return to base dir\n+}\n+\n+\n+echo \"Preparing release $RELEASE_VERSION\"\n+# Checkout code\n+#checkout_code\n+eval cd $RELEASE_WORK_DIR/systemds\n+\n+#Deploy to apache maven repo.\n+#settings.xml in maven home contains the username/passwd corresponding to ID apache.release.hattps\n+mvn -X -DaltDeploymentRepository=apache.releases.https::default::https://repository.apache.org/service/local/staging/deploy/maven2 \\\n+clean package gpg:sign install:install deploy:deploy \\\n+-DskiptTests -Darguments=\"-DskipTests -Dgpg.passphrase=\\\"$GPG_PASSPHRASE\\\"\" -Dgpg.passphrase=\"$GPG_PASSPHRASE\" $PUBLISH_PROFILES \\\n+\n+exit 0\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/simple-release-build.sh",
"new_path": "dev/release/simple-release-build.sh",
"diff": "@@ -214,9 +214,14 @@ if [ -z ${SKIP_SIGN} ]; then\nfi\n# skipped mvn clean verify release:update-versions verify install:install deploy:deploy\n+#CMD=\"$MVN $PUBLISH_PROFILES deploy \\\n+# -DskiptTests \\\n+# -DaltDeploymentRepository=altDepRepo::default::file://$RELEASE_STAGING_LOCATION \\\n+# ${GPG_OPTS}\"\n+\nCMD=\"$MVN $PUBLISH_PROFILES deploy \\\n-DskiptTests \\\n- -DaltDeploymentRepository=altDepRepo::default::file://$RELEASE_STAGING_LOCATION \\\n+ -DaltDeploymentRepository=altDepRepo::default::file:///temp \\\n${GPG_OPTS}\"\necho \"Executing: \" \"$CMD\"\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/release/svn_dev_upload.sh",
"diff": "+#!/usr/bin/env bash\n+MVN=mvn\n+PUBLISH_PROFILES=\"-Pdistribution,rat\"\n+DRY_RUN=-DdryRun=true\n+GPG_PASSPHRASE=$1\n+DEVELOPMENT_VERSION=2.0-SNAPSHOT\n+RELEASE_TAG=v2.0\n+RELEASE_STAGING_LOCATION=\"/c/virtual\\ D/SystemDS/systemds/temp\"\n+BASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\n+RELEASE_WORK_DIR=\"/c/virtual\\ D/SystemDS/systemds/target/release2\"\n+RELEASE_VERSION=2.0\n+RELEASE_RC=rc1\n+GIT_REF=-master\n+#export GNUPGHOME=\"/c/virtual\\ D/SystemDS/systemds/target/.gnupg_copy\"\n+export GNUPGHOME=\"../../../target/.gnupg_copy/\"\n+\n+RELEASE_STAGING_REMOTE=\"https://dist.apache.org/repos/dist/dev/systemds/\"\n+eval cd $RELEASE_STAGING_LOCATION;\n+rm -rf svn-release-staging\n+# Checkout the artifacts\n+svn co $RELEASE_STAGING_REMOTE svn-release-staging\n+rm -rf svn-release-staging/$RELEASE_VERSION-$RELEASE_RC\n+# Create a new folder for this release\n+mkdir -p svn-release-staging/$RELEASE_VERSION-$RELEASE_RC\n+# Copy the artifacts from target\n+eval cp $RELEASE_WORK_DIR/systemds/target/systemds-*-bin.* svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+eval cp $RELEASE_WORK_DIR/systemds/target/systemds-*-src.* svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+\n+cd svn-release-staging/$RELEASE_VERSION-$RELEASE_RC/\n+rm -f *.asc\n+for i in *.zip *.tgz; do gpg --output $i.asc --detach-sig --armor $i; done\n+rm -f *.sha512\n+for i in *.zip *.tgz; do shasum -a 512 $i > $i.sha512; done\n+\n+cd .. #exit $RELEASE_VERSION-$RELEASE_RC/\n+\n+#svn add $RELEASE_VERSION-$RELEASE_RC/\n+svn add $(svn status | awk '{$1=\"\"; print $0}')\n+#svn ci -m\"Apache systemds $RELEASE_VERSION-$RELEASE_RC\"\n+#manually commit from tortoise\n+\n+exit 0\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Update release scripts |
49,689 | 04.09.2020 15:40:03 | -7,200 | 3013558ba2e03865a63f0fa05831fea03e4ca0bf | [MINOR] Fix bugs in the release scripts | [
{
"change_type": "MODIFY",
"old_path": "dev/release/old-release-build.sh",
"new_path": "dev/release/old-release-build.sh",
"diff": "#!/usr/bin/env bash\n#-------------------------------------------------------------\n#\n-# Modifications Copyright 2019 Graz University of Technology\n-#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n#\n#-------------------------------------------------------------\n+\nfunction exit_with_usage {\ncat << EOF\nrelease-build - Creates build distributions from a git commit hash or from HEAD.\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/publish.sh",
"new_path": "dev/release/publish.sh",
"diff": "#!/usr/bin/env bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\nMVN=mvn\nPUBLISH_PROFILES=\"-Pdistribution,rat\"\nDRY_RUN=-DdryRun=true\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/svn_dev_upload.sh",
"new_path": "dev/release/svn_dev_upload.sh",
"diff": "#!/usr/bin/env bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\nMVN=mvn\nPUBLISH_PROFILES=\"-Pdistribution,rat\"\nDRY_RUN=-DdryRun=true\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix bugs in the release scripts |
49,689 | 05.09.2020 22:37:22 | -7,200 | 5596fcf0d77946cf11ff34085e8879552dd852be | Non-recursive construction of HOPs from Lineage
This patch implements a non-recursive version of HOP dag construction
from lineage dag, which fixes the stack overflow while re-computing
from lineage. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRecomputeUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageRecomputeUtils.java",
"diff": "@@ -25,8 +25,10 @@ import java.util.HashMap;\nimport java.util.LinkedHashMap;\nimport java.util.List;\nimport java.util.Map;\n+import java.util.Stack;\nimport java.util.stream.Collectors;\n+import org.apache.commons.lang3.mutable.MutableInt;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types.DataType;\nimport org.apache.sysds.common.Types.OpOp1;\n@@ -100,7 +102,7 @@ public class LineageRecomputeUtils {\nroot.resetVisitStatusNR();\nMap<Long, Hop> operands = new HashMap<>();\nMap<String, Hop> partDagRoots = new HashMap<>();\n- rConstructHops(root, operands, partDagRoots, prog);\n+ constructHopsNR(root, operands, partDagRoots, prog);\nHop out = HopRewriteUtils.createTransientWrite(\nvarname, operands.get(rootId));\n@@ -134,17 +136,38 @@ public class LineageRecomputeUtils {\nprog.addProgramBlock(pb);\n}\n-\n- private static void rConstructHops(LineageItem item, Map<Long, Hop> operands, Map<String, Hop> partDagRoots, Program prog)\n+ private static void constructHopsNR(LineageItem item, Map<Long, Hop> operands, Map<String, Hop> partDagRoots, Program prog)\n{\n- if (item.isVisited())\n- return;\n-\n- //recursively process children (ordering by data dependencies)\n- if (!item.isLeaf())\n- for (LineageItem c : item.getInputs())\n- rConstructHops(c, operands, partDagRoots, prog);\n+ //NOTE: This method follows the same non-recursive\n+ //skeleton as explainLineageItemNR\n+ Stack<LineageItem> stackItem = new Stack<>();\n+ Stack<MutableInt> stackPos = new Stack<>();\n+ stackItem.push(item); stackPos.push(new MutableInt(0));\n+ while (!stackItem.empty()) {\n+ LineageItem tmpItem = stackItem.peek();\n+ MutableInt tmpPos = stackPos.peek();\n+ //check ascent condition - no item processing\n+ if (tmpItem.isVisited()) {\n+ stackItem.pop(); stackPos.pop();\n+ }\n+ //check ascent condition - append item\n+ else if( tmpItem.getInputs() == null\n+ || tmpItem.getInputs().length <= tmpPos.intValue() ) {\n+ constructSingleHop(tmpItem, operands, partDagRoots, prog);\n+ stackItem.pop(); stackPos.pop();\n+ tmpItem.setVisited();\n+ }\n+ //check descent condition\n+ else if( tmpItem.getInputs() != null ) {\n+ stackItem.push(tmpItem.getInputs()[tmpPos.intValue()]);\n+ tmpPos.increment();\n+ stackPos.push(new MutableInt(0));\n+ }\n+ }\n+ }\n+ private static void constructSingleHop(LineageItem item, Map<Long, Hop> operands, Map<String, Hop> partDagRoots, Program prog)\n+ {\n//process current lineage item\n//NOTE: we generate instructions from hops (but without rewrites) to automatically\n//handle execution types, rmvar instructions, and rewiring of inputs/outputs\n@@ -406,8 +429,6 @@ public class LineageRecomputeUtils {\nbreak;\n}\n}\n-\n- item.setVisited();\n}\n// Construct and compile the function body\n@@ -428,7 +449,7 @@ public class LineageRecomputeUtils {\nfor (int i=0; i<inputs.length; i++)\noperands.put((long)i, HopRewriteUtils.createTransientRead(inputs[i], inpHops.get(i))); //order preserving\n// Construct the Hop dag.\n- rConstructHops(patchRoot, operands, null, null);\n+ constructHopsNR(patchRoot, operands, null, null);\n// TWrite the func return (pass dag root to copy datatype)\nHop out = HopRewriteUtils.createTransientWrite(outname, operands.get(patchRoot.getId()));\n// Save the Hop dag\n@@ -518,6 +539,282 @@ public class LineageRecomputeUtils {\nthrow new DMLRuntimeException(\"Unsupported opcode: \"+item.getOpcode());\n}\n+ @Deprecated\n+ @SuppressWarnings(\"unused\")\n+ private static void rConstructHops(LineageItem item, Map<Long, Hop> operands, Map<String, Hop> partDagRoots, Program prog)\n+ {\n+ if (item.isVisited())\n+ return;\n+\n+ //recursively process children (ordering by data dependencies)\n+ if (!item.isLeaf())\n+ for (LineageItem c : item.getInputs())\n+ rConstructHops(c, operands, partDagRoots, prog);\n+\n+ //process current lineage item\n+ //NOTE: we generate instructions from hops (but without rewrites) to automatically\n+ //handle execution types, rmvar instructions, and rewiring of inputs/outputs\n+ switch (item.getType()) {\n+ case Creation: {\n+ if (item.getData().startsWith(LPLACEHOLDER)) {\n+ long phId = Long.parseLong(item.getData().substring(3));\n+ Hop input = operands.get(phId);\n+ operands.remove(phId);\n+ // Replace the placeholders with TReads\n+ operands.put(item.getId(), input); // order preserving\n+ break;\n+ }\n+ Instruction inst = InstructionParser.parseSingleInstruction(item.getData());\n+\n+ if (inst instanceof DataGenCPInstruction) {\n+ DataGenCPInstruction rand = (DataGenCPInstruction) inst;\n+ HashMap<String, Hop> params = new HashMap<>();\n+ if( rand.getOpcode().equals(\"rand\") ) {\n+ if( rand.output.getDataType() == DataType.TENSOR)\n+ params.put(DataExpression.RAND_DIMS, new LiteralOp(rand.getDims()));\n+ else {\n+ params.put(DataExpression.RAND_ROWS, new LiteralOp(rand.getRows()));\n+ params.put(DataExpression.RAND_COLS, new LiteralOp(rand.getCols()));\n+ }\n+ params.put(DataExpression.RAND_MIN, new LiteralOp(rand.getMinValue()));\n+ params.put(DataExpression.RAND_MAX, new LiteralOp(rand.getMaxValue()));\n+ params.put(DataExpression.RAND_PDF, new LiteralOp(rand.getPdf()));\n+ params.put(DataExpression.RAND_LAMBDA, new LiteralOp(rand.getPdfParams()));\n+ params.put(DataExpression.RAND_SPARSITY, new LiteralOp(rand.getSparsity()));\n+ params.put(DataExpression.RAND_SEED, new LiteralOp(rand.getSeed()));\n+ }\n+ else if( rand.getOpcode().equals(\"seq\") ) {\n+ params.put(Statement.SEQ_FROM, new LiteralOp(rand.getFrom()));\n+ params.put(Statement.SEQ_TO, new LiteralOp(rand.getTo()));\n+ params.put(Statement.SEQ_INCR, new LiteralOp(rand.getIncr()));\n+ }\n+ Hop datagen = new DataGenOp(OpOpDG.valueOf(rand.getOpcode().toUpperCase()),\n+ new DataIdentifier(\"tmp\"), params);\n+ datagen.setBlocksize(rand.getBlocksize());\n+ operands.put(item.getId(), datagen);\n+ } else if (inst instanceof VariableCPInstruction\n+ && ((VariableCPInstruction) inst).isCreateVariable()) {\n+ String parts[] = InstructionUtils.getInstructionPartsWithValueType(inst.toString());\n+ DataType dt = DataType.valueOf(parts[4]);\n+ ValueType vt = dt == DataType.MATRIX ? ValueType.FP64 : ValueType.STRING;\n+ HashMap<String, Hop> params = new HashMap<>();\n+ params.put(DataExpression.IO_FILENAME, new LiteralOp(parts[2]));\n+ params.put(DataExpression.READROWPARAM, new LiteralOp(Long.parseLong(parts[6])));\n+ params.put(DataExpression.READCOLPARAM, new LiteralOp(Long.parseLong(parts[7])));\n+ params.put(DataExpression.READNNZPARAM, new LiteralOp(Long.parseLong(parts[8])));\n+ params.put(DataExpression.FORMAT_TYPE, new LiteralOp(parts[5]));\n+ DataOp pread = new DataOp(parts[1].substring(5), dt, vt, OpOpData.PERSISTENTREAD, params);\n+ pread.setFileName(parts[2]);\n+ operands.put(item.getId(), pread);\n+ }\n+ else if (inst instanceof RandSPInstruction) {\n+ RandSPInstruction rand = (RandSPInstruction) inst;\n+ HashMap<String, Hop> params = new HashMap<>();\n+ if (rand.output.getDataType() == DataType.TENSOR)\n+ params.put(DataExpression.RAND_DIMS, new LiteralOp(rand.getDims()));\n+ else {\n+ params.put(DataExpression.RAND_ROWS, new LiteralOp(rand.getRows()));\n+ params.put(DataExpression.RAND_COLS, new LiteralOp(rand.getCols()));\n+ }\n+ params.put(DataExpression.RAND_MIN, new LiteralOp(rand.getMinValue()));\n+ params.put(DataExpression.RAND_MAX, new LiteralOp(rand.getMaxValue()));\n+ params.put(DataExpression.RAND_PDF, new LiteralOp(rand.getPdf()));\n+ params.put(DataExpression.RAND_LAMBDA, new LiteralOp(rand.getPdfParams()));\n+ params.put(DataExpression.RAND_SPARSITY, new LiteralOp(rand.getSparsity()));\n+ params.put(DataExpression.RAND_SEED, new LiteralOp(rand.getSeed()));\n+ Hop datagen = new DataGenOp(OpOpDG.RAND, new DataIdentifier(\"tmp\"), params);\n+ datagen.setBlocksize(rand.getBlocksize());\n+ operands.put(item.getId(), datagen);\n+ }\n+ break;\n+ }\n+ case Dedup: {\n+ // Create function call for each dedup entry\n+ String[] parts = item.getOpcode().split(LineageDedupUtils.DEDUP_DELIM); //e.g. dedup_R_SB13_0\n+ String name = parts[2] + parts[1] + parts[3]; //loopId + outVar + pathId\n+ List<Hop> finputs = Arrays.stream(item.getInputs())\n+ .map(inp -> operands.get(inp.getId())).collect(Collectors.toList());\n+ String[] inputNames = new String[item.getInputs().length];\n+ for (int i=0; i<item.getInputs().length; i++)\n+ inputNames[i] = LPLACEHOLDER + i; //e.g. IN#0, IN#1\n+ Hop funcOp = new FunctionOp(FunctionType.DML, DMLProgram.DEFAULT_NAMESPACE,\n+ name, inputNames, finputs, new String[] {parts[1]}, false);\n+\n+ // Cut the Hop dag after function calls\n+ partDagRoots.put(parts[1], funcOp);\n+ // Compile the dag and save\n+ constructBasicBlock(partDagRoots, parts[1], prog);\n+\n+ // Construct a Hop dag for the function body from the dedup patch, and compile\n+ Hop output = constructHopsDedupPatch(parts, inputNames, finputs, prog);\n+ // Create a TRead on the function o/p as a leaf for the next Hop dag\n+ // Use the function body root/return hop to propagate right data type\n+ operands.put(item.getId(), HopRewriteUtils.createTransientRead(parts[1], output));\n+ break;\n+ }\n+ case Instruction: {\n+ CPType ctype = InstructionUtils.getCPTypeByOpcode(item.getOpcode());\n+ SPType stype = InstructionUtils.getSPTypeByOpcode(item.getOpcode());\n+\n+ if (ctype != null) {\n+ switch (ctype) {\n+ case AggregateUnary: {\n+ Hop input = operands.get(item.getInputs()[0].getId());\n+ Hop aggunary = InstructionUtils.isUnaryMetadata(item.getOpcode()) ?\n+ HopRewriteUtils.createUnary(input, OpOp1.valueOfByOpcode(item.getOpcode())) :\n+ HopRewriteUtils.createAggUnaryOp(input, item.getOpcode());\n+ operands.put(item.getId(), aggunary);\n+ break;\n+ }\n+ case AggregateBinary: {\n+ Hop input1 = operands.get(item.getInputs()[0].getId());\n+ Hop input2 = operands.get(item.getInputs()[1].getId());\n+ Hop aggbinary = HopRewriteUtils.createMatrixMultiply(input1, input2);\n+ operands.put(item.getId(), aggbinary);\n+ break;\n+ }\n+ case AggregateTernary: {\n+ Hop input1 = operands.get(item.getInputs()[0].getId());\n+ Hop input2 = operands.get(item.getInputs()[1].getId());\n+ Hop input3 = operands.get(item.getInputs()[2].getId());\n+ Hop aggternary = HopRewriteUtils.createSum(\n+ HopRewriteUtils.createBinary(\n+ HopRewriteUtils.createBinary(input1, input2, OpOp2.MULT),\n+ input3, OpOp2.MULT));\n+ operands.put(item.getId(), aggternary);\n+ break;\n+ }\n+ case Unary:\n+ case Builtin: {\n+ Hop input = operands.get(item.getInputs()[0].getId());\n+ Hop unary = HopRewriteUtils.createUnary(input, item.getOpcode());\n+ operands.put(item.getId(), unary);\n+ break;\n+ }\n+ case Reorg: {\n+ operands.put(item.getId(), HopRewriteUtils.createReorg(\n+ operands.get(item.getInputs()[0].getId()), item.getOpcode()));\n+ break;\n+ }\n+ case Reshape: {\n+ ArrayList<Hop> inputs = new ArrayList<>();\n+ for(int i=0; i<5; i++)\n+ inputs.add(operands.get(item.getInputs()[i].getId()));\n+ operands.put(item.getId(), HopRewriteUtils.createReorg(inputs, ReOrgOp.RESHAPE));\n+ break;\n+ }\n+ case Binary: {\n+ //handle special cases of binary operations\n+ String opcode = (\"^2\".equals(item.getOpcode())\n+ || \"*2\".equals(item.getOpcode())) ?\n+ item.getOpcode().substring(0, 1) : item.getOpcode();\n+ Hop input1 = operands.get(item.getInputs()[0].getId());\n+ Hop input2 = operands.get(item.getInputs()[1].getId());\n+ Hop binary = HopRewriteUtils.createBinary(input1, input2, opcode);\n+ operands.put(item.getId(), binary);\n+ break;\n+ }\n+ case Ternary: {\n+ operands.put(item.getId(), HopRewriteUtils.createTernary(\n+ operands.get(item.getInputs()[0].getId()),\n+ operands.get(item.getInputs()[1].getId()),\n+ operands.get(item.getInputs()[2].getId()), item.getOpcode()));\n+ break;\n+ }\n+ case Ctable: { //e.g., ctable\n+ if( item.getInputs().length==3 )\n+ operands.put(item.getId(), HopRewriteUtils.createTernary(\n+ operands.get(item.getInputs()[0].getId()),\n+ operands.get(item.getInputs()[1].getId()),\n+ operands.get(item.getInputs()[2].getId()), OpOp3.CTABLE));\n+ else if( item.getInputs().length==5 )\n+ operands.put(item.getId(), HopRewriteUtils.createTernary(\n+ operands.get(item.getInputs()[0].getId()),\n+ operands.get(item.getInputs()[1].getId()),\n+ operands.get(item.getInputs()[2].getId()),\n+ operands.get(item.getInputs()[3].getId()),\n+ operands.get(item.getInputs()[4].getId()), OpOp3.CTABLE));\n+ break;\n+ }\n+ case BuiltinNary: {\n+ String opcode = item.getOpcode().equals(\"n+\") ? \"plus\" : item.getOpcode();\n+ operands.put(item.getId(), HopRewriteUtils.createNary(\n+ OpOpN.valueOf(opcode.toUpperCase()), createNaryInputs(item, operands)));\n+ break;\n+ }\n+ case ParameterizedBuiltin: {\n+ operands.put(item.getId(), constructParameterizedBuiltinOp(item, operands));\n+ break;\n+ }\n+ case MatrixIndexing: {\n+ operands.put(item.getId(), constructIndexingOp(item, operands));\n+ break;\n+ }\n+ case MMTSJ: {\n+ //TODO handling of tsmm type left and right -> placement transpose\n+ Hop input = operands.get(item.getInputs()[0].getId());\n+ Hop aggunary = HopRewriteUtils.createMatrixMultiply(\n+ HopRewriteUtils.createTranspose(input), input);\n+ operands.put(item.getId(), aggunary);\n+ break;\n+ }\n+ case Variable: {\n+ if( item.getOpcode().startsWith(\"cast\") )\n+ operands.put(item.getId(), HopRewriteUtils.createUnary(\n+ operands.get(item.getInputs()[0].getId()),\n+ OpOp1.valueOfByOpcode(item.getOpcode())));\n+ else //cpvar, write\n+ operands.put(item.getId(), operands.get(item.getInputs()[0].getId()));\n+ break;\n+ }\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported instruction \"\n+ + \"type: \" + ctype.name() + \" (\" + item.getOpcode() + \").\");\n+ }\n+ }\n+ else if( stype != null ) {\n+ switch(stype) {\n+ case Reblock: {\n+ Hop input = operands.get(item.getInputs()[0].getId());\n+ input.setBlocksize(ConfigurationManager.getBlocksize());\n+ input.setRequiresReblock(true);\n+ operands.put(item.getId(), input);\n+ break;\n+ }\n+ case Checkpoint: {\n+ Hop input = operands.get(item.getInputs()[0].getId());\n+ operands.put(item.getId(), input);\n+ break;\n+ }\n+ case MatrixIndexing: {\n+ operands.put(item.getId(), constructIndexingOp(item, operands));\n+ break;\n+ }\n+ case GAppend: {\n+ operands.put(item.getId(), HopRewriteUtils.createBinary(\n+ operands.get(item.getInputs()[0].getId()),\n+ operands.get(item.getInputs()[1].getId()), OpOp2.CBIND));\n+ break;\n+ }\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported instruction \"\n+ + \"type: \" + stype.name() + \" (\" + item.getOpcode() + \").\");\n+ }\n+ }\n+ else\n+ throw new DMLRuntimeException(\"Unsupported instruction: \" + item.getOpcode());\n+ break;\n+ }\n+ case Literal: {\n+ CPOperand op = new CPOperand(item.getData());\n+ operands.put(item.getId(), ScalarObjectFactory\n+ .createLiteralOp(op.getValueType(), op.getName()));\n+ break;\n+ }\n+ }\n+\n+ item.setVisited();\n+ }\n// Below class represents a single loop and contains related data\n// that are needed for recomputation.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceDedupTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceDedupTest.java",
"diff": "@@ -95,11 +95,10 @@ public class LineageTraceDedupTest extends AutomatedTestBase\ntestLineageTrace(TEST_NAME5);\n}\n- /*@Test\n+ @Test\npublic void testLineageTrace6() {\ntestLineageTrace(TEST_NAME6);\n- }*/\n- //FIXME: stack overflow only when ran the full package\n+ }\n@Test\npublic void testLineageTrace7() {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2650] Non-recursive construction of HOPs from Lineage
This patch implements a non-recursive version of HOP dag construction
from lineage dag, which fixes the stack overflow while re-computing
from lineage. |
49,706 | 05.08.2020 21:55:00 | -7,200 | e6119429b724ab4ba3976544e8b76fc86f0cd9fc | Compressed Binary Cell Operations
Basic Compressed Binary Cell Operation.
Will fail in most cases, because the implementation is not finished. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/rewrite/ProgramRewriter.java",
"new_path": "src/main/java/org/apache/sysds/hops/rewrite/ProgramRewriter.java",
"diff": "@@ -52,7 +52,6 @@ public class ProgramRewriter\nprivate ArrayList<HopRewriteRule> _dagRuleSet = null;\nprivate ArrayList<StatementBlockRewriteRule> _sbRuleSet = null;\n-\npublic ProgramRewriter() {\n// by default which is used during initial compile\n// apply all (static and dynamic) rewrites\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/AbstractCompressedMatrixBlock.java",
"diff": "@@ -35,6 +35,8 @@ import org.apache.sysds.runtime.instructions.cp.CM_COV_Object;\nimport org.apache.sysds.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysds.runtime.instructions.spark.data.IndexedMatrixValue;\nimport org.apache.sysds.runtime.matrix.data.CTableMap;\n+import org.apache.sysds.runtime.matrix.data.LibMatrixBincell;\n+import org.apache.sysds.runtime.matrix.data.LibMatrixBincell.BinaryAccessType;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysds.runtime.matrix.data.MatrixValue;\n@@ -60,7 +62,6 @@ public abstract class AbstractCompressedMatrixBlock extends MatrixBlock {\nprotected List<ColGroup> _colGroups;\n-\n/**\n* Constructor for building an empty Compressed Matrix block object.\n*/\n@@ -136,12 +137,59 @@ public abstract class AbstractCompressedMatrixBlock extends MatrixBlock {\n@Override\npublic MatrixBlock binaryOperations(BinaryOperator op, MatrixValue thatValue, MatrixValue result) {\n- printDecompressWarning(\"binaryOperations\", (MatrixBlock) thatValue);\n- MatrixBlock left = decompress();\n+\n+ MatrixBlock that = getUncompressed(thatValue);\n+\n+ if(!LibMatrixBincell.isValidDimensionsBinary(this, that)) {\n+ throw new RuntimeException(\"Block sizes are not matched for binary \" + \"cell operations: \" + this.rlen + \"x\"\n+ + this.clen + \" vs \" + that.getNumRows() + \"x\" + that.getNumColumns());\n+ }\n+\nMatrixBlock right = getUncompressed(thatValue);\n- return left.binaryOperations(op, right, result);\n+\n+ CompressedMatrixBlock ret = null;\n+ if(result == null || !(result instanceof CompressedMatrixBlock))\n+ ret = new CompressedMatrixBlock(getNumRows(), getNumColumns(), sparse);\n+ else {\n+ ret = (CompressedMatrixBlock) result;\n+ ret.reset(rlen, clen);\n}\n+ // MatrixBlock ret = (MatrixBlock) result;\n+ bincellOp(right, ret, op);\n+ return ret;\n+ }\n+\n+ /**\n+ * matrix-matrix binary operations, MM, MV\n+ *\n+ * @param m2 input matrix 2\n+ * @param ret result matrix\n+ * @param op binary operator\n+ */\n+ private void bincellOp(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op) {\n+\n+\n+ BinaryAccessType atype = LibMatrixBincell.getBinaryAccessType((MatrixBlock) this, m2);\n+ if(atype == BinaryAccessType.MATRIX_COL_VECTOR // MATRIX - VECTOR\n+ || atype == BinaryAccessType.MATRIX_ROW_VECTOR) {\n+ binaryMV(m2, ret, op, atype);\n+ }\n+ else if(atype == BinaryAccessType.OUTER_VECTOR_VECTOR) // VECTOR - VECTOR\n+ {\n+ binaryVV(m2, ret, op, atype);\n+ }\n+ else {\n+ binaryMM(m2, ret, op);\n+ }\n+ }\n+\n+ protected abstract void binaryMV(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op, BinaryAccessType atype );\n+\n+ protected abstract void binaryVV(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op, BinaryAccessType atype );\n+\n+ protected abstract void binaryMM(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op);\n+\n@Override\npublic MatrixBlock binaryOperationsInPlace(BinaryOperator op, MatrixValue thatValue) {\nprintDecompressWarning(\"binaryOperationsInPlace\", (MatrixBlock) thatValue);\n@@ -254,7 +302,8 @@ public abstract class AbstractCompressedMatrixBlock extends MatrixBlock {\nif(grp instanceof ColGroupValue) {\nint[] counts = ((ColGroupValue) grp).getCounts();\nreturn vals.cmOperations(op, getCountsAsBlock(counts));\n- }else{\n+ }\n+ else {\nreturn vals.cmOperations(op);\n}\n}\n@@ -497,17 +546,20 @@ public abstract class AbstractCompressedMatrixBlock extends MatrixBlock {\nreturn isCompressed((MatrixBlock) mVal) ? ((CompressedMatrixBlock) mVal).decompress() : (MatrixBlock) mVal;\n}\n- private void printDecompressWarning(String operation) {\n+ protected void printDecompressWarning(String operation) {\nLOG.warn(\"Operation '\" + operation + \"' not supported yet - decompressing for ULA operations.\");\n}\n- private void printDecompressWarning(String operation, MatrixBlock m2) {\n+ protected void printDecompressWarning(String operation, MatrixBlock m2) {\nif(isCompressed(m2)) {\nLOG.warn(\"Operation '\" + operation + \"' not supported yet - decompressing for ULA operations.\");\n}\n+ else {\n+ LOG.warn(\"Operation '\" + operation + \"' not supported yet - decompressing'\");\n}\n+ }\n@Override\npublic boolean isShallowSerialize() {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"diff": "@@ -34,6 +34,8 @@ import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Future;\nimport org.apache.commons.lang.NotImplementedException;\n+import org.apache.commons.lang3.tuple.ImmutablePair;\n+import org.apache.commons.lang3.tuple.Pair;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysds.hops.OptimizerUtils;\n@@ -60,12 +62,15 @@ import org.apache.sysds.runtime.functionobjects.Builtin.BuiltinCode;\nimport org.apache.sysds.runtime.functionobjects.KahanFunction;\nimport org.apache.sysds.runtime.functionobjects.KahanPlus;\nimport org.apache.sysds.runtime.functionobjects.KahanPlusSq;\n+import org.apache.sysds.runtime.functionobjects.Mean;\nimport org.apache.sysds.runtime.functionobjects.Multiply;\nimport org.apache.sysds.runtime.functionobjects.ReduceAll;\nimport org.apache.sysds.runtime.functionobjects.ReduceCol;\n+import org.apache.sysds.runtime.functionobjects.ReduceRow;\nimport org.apache.sysds.runtime.instructions.cp.KahanObject;\nimport org.apache.sysds.runtime.matrix.data.IJV;\nimport org.apache.sysds.runtime.matrix.data.LibMatrixBincell;\n+import org.apache.sysds.runtime.matrix.data.LibMatrixBincell.BinaryAccessType;\nimport org.apache.sysds.runtime.matrix.data.LibMatrixReorg;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.runtime.matrix.data.MatrixIndexes;\n@@ -73,6 +78,7 @@ import org.apache.sysds.runtime.matrix.data.MatrixValue;\nimport org.apache.sysds.runtime.matrix.operators.AggregateBinaryOperator;\nimport org.apache.sysds.runtime.matrix.operators.AggregateUnaryOperator;\nimport org.apache.sysds.runtime.matrix.operators.BinaryOperator;\n+import org.apache.sysds.runtime.matrix.operators.LeftScalarOperator;\nimport org.apache.sysds.runtime.matrix.operators.ScalarOperator;\nimport org.apache.sysds.runtime.util.CommonThreadPool;\n@@ -412,6 +418,42 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\n}\n+ protected void binaryMV(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op, BinaryAccessType aType ){\n+ if(aType == BinaryAccessType.MATRIX_COL_VECTOR){\n+ throw new NotImplementedException(\"Binary Matrix Col Vector operations are not implemented CLA\");\n+ }else if(aType== BinaryAccessType.MATRIX_ROW_VECTOR){\n+ // Apply the operation to each of the column groups.\n+ // Most implementations will only modify metadata.\n+ ArrayList<ColGroup> newColGroups = new ArrayList<>();\n+\n+ for(ColGroup grp : _colGroups) {\n+ if(grp instanceof ColGroupUncompressed){\n+ LOG.error(\"NOT HANDLING UNCOMPRESSED IN BINARY MV\");\n+ }else{\n+\n+ if(grp.getNumCols() == 1){\n+ ScalarOperator sop = new LeftScalarOperator(op.fn, m2.getValue(0, grp.getColIndices()[0]),1);\n+ newColGroups.add(grp.scalarOperation(sop));\n+ }else{\n+ throw new NotImplementedException(\"Cocoded columns (nr cols:\" + grp.getNumCols() + \") groupType: not implemented for Binary Matrix Row Vector operations\");\n+ }\n+ }\n+ // newColGroups.add(grp.binaryMVR(m2, op));\n+ }\n+ ret._colGroups = newColGroups;\n+ // ret.setNonZeros(rlen * clen);\n+ // throw new NotImplementedException(\"Binary Matrix Row Vector operations are not implemented CLA\");\n+ }\n+ }\n+\n+ protected void binaryVV(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op, BinaryAccessType aType ){\n+ throw new NotImplementedException(\"Binary Vector Vector operations are not implemented\");\n+ }\n+\n+ protected void binaryMM(MatrixBlock m2, CompressedMatrixBlock ret, BinaryOperator op){\n+ throw new NotImplementedException(\"Binary Matrix Matrix operations are not implemented\");\n+ }\n+\n@Override\npublic MatrixBlock append(MatrixBlock that, MatrixBlock ret) {\n@@ -621,9 +663,6 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\n}\n}\n- if(LOG.isDebugEnabled())\n- LOG.debug(\"Compressed MM in \" + time.stop());\n-\nreturn ret;\n}\n@@ -759,6 +798,21 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\nret.quickSetValue(i, 0, builtin.execute(ret.quickGetValue(i, 0), 0));\n}\n+ // special handling of mean\n+ if(op.aggOp.increOp.fn instanceof Mean) {\n+ if(op.indexFn instanceof ReduceAll)\n+ ret.quickSetValue(0, 0, ret.quickGetValue(0, 0) / (getNumColumns() * getNumRows()));\n+ else if(op.indexFn instanceof ReduceCol) {\n+ for(int i = 0; i < getNumRows(); i++) {\n+ ret.quickSetValue(i, 0, ret.quickGetValue(i, 0) / getNumColumns());\n+ }\n+ }\n+ else if(op.indexFn instanceof ReduceRow)\n+ for(int i = 0; i < getNumColumns(); i++) {\n+ ret.quickSetValue(0, i, ret.quickGetValue(0, i) / getNumRows());\n+ }\n+ }\n+\n// drop correction if necessary\nif(op.aggOp.existsCorrection() && inCP)\nret.dropLastRowsOrColumns(op.aggOp.correction);\n@@ -946,7 +1000,7 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\n}\nprivate static void rightMultByVector(List<ColGroup> groups, MatrixBlock vect, MatrixBlock ret, int rl, int ru) {\n- ColGroupValue.setupThreadLocalMemory(getMaxNumValues(groups));\n+ ColGroupValue.setupThreadLocalMemory(getMaxNumValues(groups).getLeft());\n// boolean cacheDDC1 = ru - rl > CompressionSettings.BITMAP_BLOCK_SZ * 2;\n@@ -992,6 +1046,7 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\nprivate static void leftMultByVectorTranspose(List<ColGroup> colGroups, MatrixBlock vector, MatrixBlock result,\nboolean doTranspose, boolean allocTmp) {\n// transpose vector if required\n+ LOG.debug(\"Left Mult vector Transpose \" + vector.getClass());\nMatrixBlock rowVector = vector;\nif(doTranspose) {\nrowVector = new MatrixBlock(1, vector.getNumRows(), false);\n@@ -1003,14 +1058,22 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\nresult.allocateDenseBlock();\n// setup memory pool for reuse\n- if(allocTmp)\n- ColGroupValue.setupThreadLocalMemory(getMaxNumValues(colGroups));\n-\n- // delegate matrix-vector operation to each column group\n+ if(allocTmp){\n+ Pair<Integer, List<Integer>> v = getMaxNumValues(colGroups);\n+ ColGroupValue.setupThreadLocalMemory(v.getLeft());\n+ for(int i = 0; i< colGroups.size(); i++){\n+ colGroups.get(i).leftMultByRowVector(rowVector, result, v.getRight().get(i));\n+ }\n+ }\n+ else\n+ {\nfor(ColGroup grp : colGroups) {\n- grp.leftMultByRowVector(rowVector, result);\n+ grp.leftMultByRowVector(rowVector, result, -1);\n+ }\n}\n+ // delegate matrix-vector operation to each column group\n+\n// post-processing\nif(allocTmp)\nColGroupValue.cleanupThreadLocalMemory();\n@@ -1092,7 +1155,7 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\ntmpret.allocateDenseBlock();\n// setup memory pool for reuse\n- ColGroupValue.setupThreadLocalMemory(getMaxNumValues(groups));\n+ ColGroupValue.setupThreadLocalMemory(getMaxNumValues(groups).getLeft());\n// approach: for each colgroup, extract uncompressed columns one at-a-time\n// vector-matrix multiplies against remaining col groups\n@@ -1154,12 +1217,19 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\nreturn grpParts;\n}\n- private static int getMaxNumValues(List<ColGroup> groups) {\n+ private static Pair<Integer, List<Integer>> getMaxNumValues(List<ColGroup> groups) {\nint numVals = 1;\n+ List<Integer> numValues = new ArrayList<>(groups.size());\n+ int nr;\nfor(ColGroup grp : groups)\n- if(grp instanceof ColGroupValue)\n- numVals = Math.max(numVals, ((ColGroupValue) grp).getNumValues());\n- return numVals;\n+ if(grp instanceof ColGroupValue){\n+ nr = ((ColGroupValue) grp).getNumValues();\n+ numValues.add(nr);\n+ numVals = Math.max(numVals, nr);\n+ } else{\n+ numValues.add(-1);\n+ }\n+ return new ImmutablePair<>(numVals, numValues);\n}\npublic boolean hasUncompressedColGroup() {\n@@ -1189,10 +1259,11 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\npublic Object call() {\n// setup memory pool for reuse\ntry {\n- ColGroupValue.setupThreadLocalMemory(getMaxNumValues(_groups));\n- // delegate matrix-vector operation to each column group\n- for(ColGroup grp : _groups)\n- grp.leftMultByRowVector(_vect, _ret);\n+ Pair<Integer, List<Integer>> v = getMaxNumValues(_groups);\n+ ColGroupValue.setupThreadLocalMemory(v.getLeft());\n+ for(int i = 0; i< _groups.size(); i++){\n+ _groups.get(i).leftMultByRowVector(_vect, _ret, v.getRight().get(i));\n+ }\nColGroupValue.cleanupThreadLocalMemory();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/CompressionSettingsBuilder.java",
"diff": "@@ -33,7 +33,7 @@ public class CompressionSettingsBuilder {\nprivate double samplingRatio = 1.0;\nprivate boolean allowSharedDictionary = false;\nprivate boolean transposeInput = true;\n- private boolean skipList = true;\n+ private boolean skipList = false;\nprivate int seed = -1;\nprivate boolean investigateEstimate = false;\nprivate boolean lossy = false;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroup.java",
"diff": "@@ -266,8 +266,9 @@ public abstract class ColGroup implements Serializable {\n*\n* @param vector row vector\n* @param result matrix block result\n+ * @param numVals The Number of values contained in the Column.\n*/\n- public abstract void leftMultByRowVector(MatrixBlock vector, MatrixBlock result);\n+ public abstract void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int numVals);\n/**\n* Perform the specified scalar operation directly on the compressed column group, without decompressing individual\n@@ -278,6 +279,8 @@ public abstract class ColGroup implements Serializable {\n*/\npublic abstract ColGroup scalarOperation(ScalarOperator op);\n+ // public abstract ColGroup binaryMVR(MatrixBlock m2, BinaryOperator op);\n+\n/**\n* Unary Aggregate operator, since aggregate operators require new object output, the output becomes an uncompressed\n* matrix.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupDDC.java",
"diff": "@@ -173,9 +173,9 @@ public abstract class ColGroupDDC extends ColGroupValue {\n}\n}\n- protected final void postScaling(double[] vals, double[] c) {\n+ protected final void postScaling(double[] vals, double[] c, int numVals) {\nfinal int ncol = getNumCols();\n- final int numVals = getNumValues();\n+ // final int numVals = getNumValues();\nif(_dict instanceof QDictionary) {\nQDictionary d = (QDictionary) _dict;\n@@ -248,10 +248,10 @@ public abstract class ColGroupDDC extends ColGroupValue {\n}\n@Override\n- public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {\n+ public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int numVals) {\ndouble[] a = ColGroupConverter.getDenseVector(vector);\ndouble[] c = result.getDenseBlockValues();\n- final int numVals = getNumValues();\n+ numVals = (numVals == -1) ? getNumValues(): numVals;\nif(8 * numVals < _numRows) {\n// iterative over codes and pre-aggregate inputs per code (guaranteed <=255)\n@@ -263,7 +263,7 @@ public abstract class ColGroupDDC extends ColGroupValue {\nvals[index] += a[i];\n}\n}\n- postScaling(vals, c);\n+ postScaling(vals, c, numVals);\n}\nelse {\n// iterate over codes, compute all, and add to the result\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupOLE.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupOLE.java",
"diff": "@@ -379,12 +379,12 @@ public class ColGroupOLE extends ColGroupOffset {\n}\n@Override\n- public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {\n+ public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int numVals) {\ndouble[] a = ColGroupConverter.getDenseVector(vector);\ndouble[] c = result.getDenseBlockValues();\nfinal int blksz = CompressionSettings.BITMAP_BLOCK_SZ;\nfinal int numCols = getNumCols();\n- final int numVals = getNumValues();\n+ // final int numVals = getNumValues();\nfinal double[] values = getValues();\nif(numVals >= 1 && _numRows > blksz) {\n@@ -633,7 +633,7 @@ public class ColGroupOLE extends ColGroupOffset {\nint[] ret = allocIVector(numVals, rl == 0);\nfinal int blksz = CompressionSettings.BITMAP_BLOCK_SZ;\n- if(rl > 0) { // rl aligned with blksz\n+ if(rl > 0 && _skipList != null) { // rl aligned with blksz\nint rskip = (_numRows / 2 / blksz) * blksz;\nfor(int k = 0; k < numVals; k++) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupRLE.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupRLE.java",
"diff": "@@ -358,11 +358,11 @@ public class ColGroupRLE extends ColGroupOffset {\n}\n@Override\n- public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {\n+ public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int numVals) {\ndouble[] a = ColGroupConverter.getDenseVector(vector);\ndouble[] c = result.getDenseBlockValues();\nfinal int numCols = getNumCols();\n- final int numVals = getNumValues();\n+ // final int numVals = getNumValues();\nfinal double[] values = getValues();\nif(numVals >= 1 && _numRows > CompressionSettings.BITMAP_BLOCK_SZ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupUncompressed.java",
"diff": "@@ -274,7 +274,7 @@ public class ColGroupUncompressed extends ColGroup {\n}\n@Override\n- public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {\n+ public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int numVals) {\nMatrixBlock pret = new MatrixBlock(1, _colIndexes.length, false);\nLibMatrixMult.matrixMult(vector, _data, pret);\n@@ -292,18 +292,18 @@ public class ColGroupUncompressed extends ColGroup {\n// throw new NotImplementedException();\n// }\n- public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result, int k) {\n- MatrixBlock pret = new MatrixBlock(1, _colIndexes.length, false);\n- LibMatrixMult.matrixMult(vector, _data, pret, k);\n+ // public void leftMultByRowVector(MatrixBlock vector, MatrixBlock result) {\n+ // MatrixBlock pret = new MatrixBlock(1, _colIndexes.length, false);\n+ // LibMatrixMult.matrixMult(vector, _data, pret, k);\n- // copying partialResult to the proper indices of the result\n- if(!pret.isEmptyBlock(false)) {\n- double[] rsltArr = result.getDenseBlockValues();\n- for(int colIx = 0; colIx < _colIndexes.length; colIx++)\n- rsltArr[_colIndexes[colIx]] = pret.quickGetValue(0, colIx);\n- result.recomputeNonZeros();\n- }\n- }\n+ // // copying partialResult to the proper indices of the result\n+ // if(!pret.isEmptyBlock(false)) {\n+ // double[] rsltArr = result.getDenseBlockValues();\n+ // for(int colIx = 0; colIx < _colIndexes.length; colIx++)\n+ // rsltArr[_colIndexes[colIx]] = pret.quickGetValue(0, colIx);\n+ // result.recomputeNonZeros();\n+ // }\n+ // }\n@Override\npublic ColGroup scalarOperation(ScalarOperator op) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/parfor/opt/CostEstimatorHops.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/parfor/opt/CostEstimatorHops.java",
"diff": "@@ -31,6 +31,7 @@ import org.apache.sysds.runtime.controlprogram.parfor.opt.Optimizer.CostModelTyp\npublic class CostEstimatorHops extends CostEstimator\n{\n+\npublic static final double DEFAULT_MEM_SP = 20*1024*1024;\nprivate OptTreePlanMappingAbstract _map = null;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2615] Compressed Binary Cell Operations
Basic Compressed Binary Cell Operation.
Will fail in most cases, because the implementation is not finished. |
49,720 | 06.09.2020 17:29:51 | -7,200 | f4e2412bf15abdda41bf0d0099cc1d0a2c40c1f1 | New builtin for forward and backward NA filling
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/na_locf.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Builtin function for imputing missing values using forward fill and backward fill techniques\n+\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X Double --- Matrix X\n+# option String \"locf\" String \"locf\" (last observation moved forward) to do forward fill\n+# String \"nocb\" (next observation carried backward) to do backward fill\n+# verbose Boolean FALSE to print output on screen\n+# ---------------------------------------------------------------------------------------------\n+\n+#Output(s)\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# output Double --- Matrix with no missing values\n+\n+m_na_locf = function(Matrix[Double] X, String option = \"locf\", Boolean verbose = FALSE)\n+ return(Matrix[Double] output)\n+{\n+ output = X\n+ if(sum(is.nan(X)) > 0) {\n+ if(option == \"locf\")\n+ output = locf(X)\n+ else\n+ output = rev(locf(rev(X)))\n+ }\n+\n+ if(verbose)\n+ print(toString(output))\n+}\n+\n+locf = function(Matrix[Double] X)\n+ return(Matrix[Double] outputLocf)\n+{\n+ # store mask of missing values\n+ mask = is.nan(X)\n+\n+ # replace NaN with a number i.e., zeros\n+ X = replace(target=X, pattern = NaN, replacement = 0)\n+\n+ # use the cumsumprod built-in to do fill forward\n+ output = matrix(0, nrow(X), ncol(X))\n+ parfor(i in 1:ncol(X))\n+ output[ ,i] = cumsumprod(cbind(X[,i],mask[,i]))\n+\n+ # if there are leading NAs\n+ leading_NA = (output == 0) & (mask == 1)\n+ outputR = matrix(0, nrow(X), ncol(X))\n+\n+ if(sum(leading_NA) > 0) {\n+ # doing fill forward in reverse\n+ parfor(i in 1:ncol(X))\n+ outputR[ ,i] = rev(cumsumprod(rev(cbind(X[,i],leading_NA[,i]))))\n+ }\n+\n+ outputLocf = (outputR * leading_NA) + output\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -140,6 +140,7 @@ public enum Builtins {\nMSVMPREDICT(\"msvmPredict\", true),\nMULTILOGREG(\"multiLogReg\", true),\nMULTILOGREGPREDICT(\"multiLogRegPredict\", true),\n+ NA_LOCF(\"na_locf\", true),\nNCOL(\"ncol\", false),\nNORMALIZE(\"normalize\", true),\nNROW(\"nrow\", false),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/BitmapEncoder.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/BitmapEncoder.java",
"diff": "@@ -289,7 +289,7 @@ public class BitmapEncoder {\nlengths.put(scaledValues[idx], lengths.get(scaledValues[idx]) + fullSizeOffsetsLists[idx].size());\n}\nelse {\n- Queue<IntArrayList> offsets = new LinkedList<IntArrayList>();\n+ Queue<IntArrayList> offsets = new LinkedList<>();\noffsets.add(fullSizeOffsetsLists[idx]);\nvalues.put(scaledValues[idx], offsets);\nlengths.put(scaledValues[idx], fullSizeOffsetsLists[idx].size());\n@@ -347,7 +347,7 @@ public class BitmapEncoder {\nlengths.put(array, lengths.get(array) + fullSizeOffsetsLists[idx / numColumns].size());\n}\nelse {\n- Queue<IntArrayList> offsets = new LinkedList<IntArrayList>();\n+ Queue<IntArrayList> offsets = new LinkedList<>();\noffsets.add(fullSizeOffsetsLists[idx / numColumns]);\nvalues.put(array, offsets);\nlengths.put(array, fullSizeOffsetsLists[idx / numColumns].size());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"diff": "@@ -1638,7 +1638,7 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\n@Override\npublic List<ColGroup> call() {\n- List<ColGroup> res = new ArrayList<ColGroup>();\n+ List<ColGroup> res = new ArrayList<>();\nfor(ColGroup x : _colGroups) {\nres.add(x.scalarOperation(_sop));\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinNaLocfTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.commons.lang.ArrayUtils;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+public class BuiltinNaLocfTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"na_locfTest\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinNaLocfTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 1e-10;\n+ private final static int rows = 25;\n+ private final static int cols = 25;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"O\"}));\n+ }\n+\n+ @Test\n+ public void tesLocfNoLineageCP() {\n+ runLocfTest(false, \"locf\", LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void tesLocfLineageCP() {\n+ runLocfTest(true, \"locf\", LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void tesLocfNoLineageSPARK() {\n+ runLocfTest(false,\"locf\", LopProperties.ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void tesLocfLineageSPARK() {\n+ runLocfTest(true,\"locf\", LopProperties.ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void tesnocbNoLineageCP() {\n+ runLocfTest(false, \"nocb\", LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void tesnocbLineageCP() {\n+ runLocfTest(true, \"nocb\", LopProperties.ExecType.CP);\n+ }\n+\n+ @Test\n+ public void tesnocbNoLineageSPARK() {\n+ runLocfTest(false,\"nocb\", LopProperties.ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void tesnocbLineageSPARK() {\n+ runLocfTest(true,\"nocb\", LopProperties.ExecType.SPARK);\n+ }\n+\n+ private void runLocfTest(boolean lineage, String option, LopProperties.ExecType instType) {\n+ Types.ExecMode platformOld = setExecMode(instType);\n+ try {\n+ setOutputBuffering(false);\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-nvargs\", \"X=\" + input(\"A\"), \"option=\"+option, \"O=\" + output(\"O\")};\n+ if(lineage) {\n+ String[] lin = new String[] {\"-stats\", \"-lineage\", ReuseCacheType.REUSE_HYBRID.name().toLowerCase()};\n+ programArgs = (String[]) ArrayUtils.addAll(programArgs, lin);\n+ }\n+\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = getRCmd(inputDir(), option, expectedDir());\n+\n+ //generate actual dataset\n+ double[][] A = getRandomMatrix(rows, cols, -10, 10, 0.6, 7);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+ //compare matrices\n+ HashMap<MatrixValue.CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"O\");\n+ HashMap<MatrixValue.CellIndex, Double> rfile = readRMatrixFromFS(\"O\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/na_locfTest.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+\n+library(\"Matrix\")\n+library(\"imputeTS\")\n+\n+A = as.matrix(readMM(paste(args[1], \"A.mtx\", sep=\"\")))\n+\n+A[A==0] = NA\n+\n+B = na_locf(A, option = args[2])\n+\n+\n+writeMM(as(B, \"CsparseMatrix\"), paste(args[3], \"O\", sep=\"\"));\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/na_locfTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($X);\n+# replace zeros with NaN\n+dataWithNa = replace(target=A, pattern = 0, replacement = NaN)\n+B = na_locf(dataWithNa, $option, FALSE)\n+write(B, $O);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2635] New builtin for forward and backward NA filling
Closes #1036. |
49,689 | 07.09.2020 16:48:33 | -7,200 | 8a85d529d3c6670dc04915e0e672aae14c343307 | Fix capturaing inputs to a loop body | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/ForStatementBlock.java",
"new_path": "src/main/java/org/apache/sysds/parser/ForStatementBlock.java",
"diff": "@@ -278,6 +278,17 @@ public class ForStatementBlock extends StatementBlock\npublic Lop getToLops() { return _toLops; }\npublic Lop getIncrementLops() { return _incrementLops; }\n+ public ArrayList<String> getInputstoSB() {\n+ // By calling getInputstoSB on all the child statement blocks,\n+ // we remove the variables only read in the for predicate but\n+ // never used in the body from the input list.\n+ ArrayList<String> inputs = new ArrayList<>();\n+ ForStatement fstmt = (ForStatement)_statements.get(0);\n+ for (StatementBlock sb : fstmt.getBody())\n+ inputs.addAll(sb.getInputstoSB());\n+ return inputs;\n+ }\n+\n@Override\npublic VariableSet analyze(VariableSet loPassed) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/WhileStatementBlock.java",
"new_path": "src/main/java/org/apache/sysds/parser/WhileStatementBlock.java",
"diff": "@@ -255,6 +255,17 @@ public class WhileStatementBlock extends StatementBlock\n_predicateLops = predicateLops;\n}\n+ public ArrayList<String> getInputstoSB() {\n+ // By calling getInputstoSB on all the child statement blocks,\n+ // we remove the variables only read in the while predicate but\n+ // never used in the body from the input list.\n+ ArrayList<String> inputs = new ArrayList<>();\n+ WhileStatement fstmt = (WhileStatement)_statements.get(0);\n+ for (StatementBlock sb : fstmt.getBody())\n+ inputs.addAll(sb.getInputstoSB());\n+ return inputs;\n+ }\n+\n@Override\npublic VariableSet analyze(VariableSet loPassed) {\nVariableSet predVars = new VariableSet();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/Lineage.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/Lineage.java",
"diff": "@@ -125,17 +125,6 @@ public class Lineage {\n_initDedupBlock = ldb;\n}\n- public void computeDedupBlock(ProgramBlock pb, ExecutionContext ec) {\n- if( !(pb instanceof ForProgramBlock || pb instanceof WhileProgramBlock) )\n- throw new DMLRuntimeException(\"Invalid deduplication block: \"+ pb.getClass().getSimpleName());\n- if (!_dedupBlocks.containsKey(pb)) {\n- boolean valid = LineageDedupUtils.isValidDedupBlock(pb, false);\n- _dedupBlocks.put(pb, valid?\n- LineageDedupUtils.computeDedupBlock(pb, ec) : null);\n- }\n- _activeDedupBlock = _dedupBlocks.get(pb); //null if invalid\n- }\n-\npublic void initializeDedupBlock(ProgramBlock pb, ExecutionContext ec) {\nif( !(pb instanceof ForProgramBlock || pb instanceof WhileProgramBlock) )\nthrow new DMLRuntimeException(\"Invalid deduplication block: \"+ pb.getClass().getSimpleName());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageMap.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageMap.java",
"diff": "@@ -75,8 +75,8 @@ public class LineageMap {\npublic void processDedupItem(LineageMap lm, Long path, LineageItem[] liinputs, String name) {\nString delim = LineageDedupUtils.DEDUP_DELIM;\nfor (Map.Entry<String, LineageItem> entry : lm._traces.entrySet()) {\n- // Encode everything in the opcode needed by the deserialization logic\n- // to map this lineage item to the right patch.\n+ // Encode everything needed by the recomputation logic in the\n+ // opcode to map this lineage item to the right patch.\nString opcode = LineageItem.dedupItemOpcode + delim + entry.getKey()\n+ delim + name + delim + path.toString();\nLineageItem li = new LineageItem(opcode, liinputs);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceDedupTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/lineage/LineageTraceDedupTest.java",
"diff": "@@ -51,6 +51,7 @@ public class LineageTraceDedupTest extends AutomatedTestBase\nprotected static final String TEST_NAME7 = \"LineageTraceDedup7\"; //nested if-else branches\nprotected static final String TEST_NAME8 = \"LineageTraceDedup8\"; //while loop\nprotected static final String TEST_NAME9 = \"LineageTraceDedup9\"; //while loop w/ if\n+ protected static final String TEST_NAME11 = \"LineageTraceDedup11\"; //mini-batch\nprotected String TEST_CLASS_DIR = TEST_DIR + LineageTraceDedupTest.class.getSimpleName() + \"/\";\n@@ -61,7 +62,7 @@ public class LineageTraceDedupTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<11; i++)\n+ for(int i=1; i<=11; i++)\naddTestConfiguration(TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i));\n}\n@@ -115,6 +116,11 @@ public class LineageTraceDedupTest extends AutomatedTestBase\ntestLineageTrace(TEST_NAME9);\n}\n+ @Test\n+ public void testLineageTrace11() {\n+ testLineageTrace(TEST_NAME11);\n+ }\n+\npublic void testLineageTrace(String testname) {\nboolean old_simplification = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\nboolean old_sum_product = OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/lineage/LineageTraceDedup11.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+M = 8;\n+lim = 100;\n+X = rand(rows=M, cols=784, seed=42)\n+\n+for(i in 1:lim) {\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+ X = ((X + X) * i - X) / (i+1)\n+}\n+write(X, $1, format=\"text\");\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2650] Fix capturaing inputs to a loop body |
49,738 | 10.09.2020 13:40:05 | -7,200 | 6308847f9a6dd3c74edd5a3ef9b62fb2ad2d4511 | [MINOR] Fix compressed OLE matrix-vector operations
This patch partially fixes an issue with cache-conscious OLE
matrix-vector operations. However, it only fixes the issues for
single-threaded operations, while multi-threaded ops still run into
index out-of-bounds issues on covtype. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/CompressedMatrixBlock.java",
"diff": "@@ -982,9 +982,12 @@ public class CompressedMatrixBlock extends AbstractCompressedMatrixBlock {\n// uc.rightMultByVector(vector, result, k);\n// compute remaining compressed column groups in parallel\n+ // note: OLE needs alignment to segment size, otherwise wrong entry\nExecutorService pool = CommonThreadPool.get(k);\nint rlen = getNumRows();\n- int blklen = getAlignedBlockSize((int) (Math.ceil((double) rlen / k)));\n+ int seqsz = CompressionSettings.BITMAP_BLOCK_SZ;\n+ int blklen = (int)(Math.ceil((double)rlen/k));\n+ blklen += (blklen%seqsz != 0)?seqsz-blklen%seqsz:0;\nArrayList<RightMatrixMultTask> tasks = new ArrayList<>();\nfor(int i = 0; i < k & i * blklen < getNumRows(); i++)\ntasks.add(\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupOLE.java",
"new_path": "src/main/java/org/apache/sysds/runtime/compress/colgroup/ColGroupOLE.java",
"diff": "@@ -333,11 +333,11 @@ public class ColGroupOLE extends ColGroupOffset {\ndouble val = aval[k];\nint bix = apos[k];\n- int len = _data[boff + bix];\n- int pos = boff + bix + 1;\n- // LOG.error(\"Len: \"+pos + \" pos: \"+bi + \" ii \" + len);\nfor(int ii = bi; ii < bimax && bix < blen; ii += blksz) {\n// prepare length, start, and end pos\n+ int len = _data[boff + bix];\n+ int pos = boff + bix + 1;\n+\n// compute partial results\nLinearAlgebraUtils.vectAdd(val, c, _data, pos, ii, Math.min(len,ru));\nbix += len + 1;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix compressed OLE matrix-vector operations
This patch partially fixes an issue with cache-conscious OLE
matrix-vector operations. However, it only fixes the issues for
single-threaded operations, while multi-threaded ops still run into
index out-of-bounds issues on covtype. |
49,700 | 13.09.2020 14:39:09 | -7,200 | 2e6883a6e7588b31054c25c61177fed3b8ebf0de | Simplified privacy handling of federated UDF inputs
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/federated/FederatedWorkerHandler.java",
"diff": "@@ -302,6 +302,7 @@ public class FederatedWorkerHandler extends ChannelInboundHandlerAdapter {\nFederatedUDF udf = (FederatedUDF) request.getParam(0);\nData[] inputs = Arrays.stream(udf.getInputIDs())\n.mapToObj(id -> ec.getVariable(String.valueOf(id)))\n+ .map(PrivacyMonitor::handlePrivacy)\n.toArray(Data[]::new);\n//execute user-defined function\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/MultiReturnParameterizedBuiltinFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/MultiReturnParameterizedBuiltinFEDInstruction.java",
"diff": "@@ -190,7 +190,7 @@ public class MultiReturnParameterizedBuiltinFEDInstruction extends ComputationFE\n@Override\npublic FederatedResponse execute(ExecutionContext ec, Data... data) {\n- FrameObject fo = (FrameObject) PrivacyMonitor.handlePrivacy(data[0]);\n+ FrameObject fo = (FrameObject) data[0];\nFrameBlock fb = fo.acquireRead();\nString[] colNames = fb.getColumnNames();\n@@ -220,8 +220,7 @@ public class MultiReturnParameterizedBuiltinFEDInstruction extends ComputationFE\n@Override\npublic FederatedResponse execute(ExecutionContext ec, Data... data) {\n- FrameObject fo = (FrameObject) PrivacyMonitor.handlePrivacy(data[0]);\n- FrameBlock fb = fo.acquireReadAndRelease();\n+ FrameBlock fb = ((FrameObject)data[0]).acquireReadAndRelease();\n// apply transformation\nMatrixBlock mbout = _encoder.apply(fb,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"diff": "@@ -299,7 +299,7 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\n}\npublic FederatedResponse execute(ExecutionContext ec, Data... data) {\n- MatrixObject mo = (MatrixObject) PrivacyMonitor.handlePrivacy(data[0]);\n+ MatrixObject mo = (MatrixObject) data[0];\nMatrixBlock mb = mo.acquireRead();\nString[] colNames = _meta.getColumnNames();\n@@ -331,8 +331,7 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\n@Override\npublic FederatedResponse execute(ExecutionContext ec, Data... data) {\n- FrameObject fo = (FrameObject) PrivacyMonitor.handlePrivacy(data[0]);\n- FrameBlock fb = fo.acquireReadAndRelease();\n+ FrameBlock fb = ((FrameObject)data[0]).acquireReadAndRelease();\n// return column names\nreturn new FederatedResponse(ResponseType.SUCCESS, new Object[] {fb.getColumnNames()});\n}\n@@ -350,8 +349,7 @@ public class ParameterizedBuiltinFEDInstruction extends ComputationFEDInstructio\n@Override\npublic FederatedResponse execute(ExecutionContext ec, Data... data) {\n- FrameObject fo = (FrameObject) PrivacyMonitor.handlePrivacy(data[0]);\n- FrameBlock fb = fo.acquireReadAndRelease();\n+ FrameBlock fb = ((FrameObject)data[0]).acquireReadAndRelease();\n_encoder.build(fb);\nreturn new FederatedResponse(ResponseType.SUCCESS, new Object[] {_encoder});\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2618] Simplified privacy handling of federated UDF inputs
Closes #1053. |
49,720 | 14.09.2020 17:16:47 | -7,200 | 9bf5194a6cf186bc35304bb03780c7a439922b5b | [MINOR][DOC] Formatting Fix in documentation | [
{
"change_type": "MODIFY",
"old_path": "docs/site/dml-language-reference.md",
"new_path": "docs/site/dml-language-reference.md",
"diff": "@@ -2033,6 +2033,7 @@ The built-in function <code>map()</code> provides support for the lambda express\nFunction | Description | Parameters | Example\n-------- | ----------- | ---------- | -------\nmap() | It will execute the given lambda expression on a frame.| Input: (X <frame>, y <String>) <br/>Output: <frame>. <br/> X is a frame and y is a String containing the lambda expression to be executed on frame X. | X = read(\"file1\", data_type=\"frame\", rows=2, cols=3, format=\"binary\") <br/> y = \"lambda expression\" <br/> Z = map(X, y) <br/> # Dimensions of Z = Dimensions of X; <br/> example: Z = map(X, \"x -> x.charAt(2)\")\n+\nExample let X =\n##### FRAME: nrow = 10, ncol = 1 <br/>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Formatting Fix in documentation |
49,720 | 14.09.2020 18:50:49 | -7,200 | ea37c48ec001a65e83d209b3fd05ed3d257f655d | [MINOR][DOC] Formatting fix | [
{
"change_type": "MODIFY",
"old_path": "docs/site/dml-language-reference.md",
"new_path": "docs/site/dml-language-reference.md",
"diff": "@@ -2032,11 +2032,11 @@ The built-in function <code>map()</code> provides support for the lambda express\nFunction | Description | Parameters | Example\n-------- | ----------- | ---------- | -------\n-map() | It will execute the given lambda expression on a frame.| Input: (X <frame>, y <String>) <br/>Output: <frame>. <br/> X is a frame and y is a String containing the lambda expression to be executed on frame X. | X = read(\"file1\", data_type=\"frame\", rows=2, cols=3, format=\"binary\") <br/> y = \"lambda expression\" <br/> Z = map(X, y) <br/> # Dimensions of Z = Dimensions of X; <br/> example: Z = map(X, \"x -> x.charAt(2)\")\n+map() | It will execute the given lambda expression on a frame.| Input: (X <frame>, y <String>) <br/>Output: <frame>. <br/> X is a frame and <br/>y is a String containing the lambda expression to be executed on frame X. | X = read(\"file1\", data_type=\"frame\", rows=2, cols=3, format=\"binary\") <br/> y = \"lambda expression\" <br/> Z = map(X, y) <br/> # Dimensions of Z = Dimensions of X; <br/> example: <br/> <code> Z = map(X, \"x -> x.charAt(2)\") </code>\nExample let X =\n- ##### FRAME: nrow = 10, ncol = 1 <br/>\n+ # FRAME: nrow = 10, ncol = 1\n# C1\n# STRING\nwest\n@@ -2050,9 +2050,10 @@ Example let X =\nwest\neast\n-Z = map(X, \"x -> x.toUpperCase()\") <br/>\n-print(toString(Z))\n- ##### FRAME: nrow = 10, ncol = 1 <br/>\n+<code> Z = map(X, \"x -> x.toUpperCase()\") <br/>\n+print(toString(Z)) </code>\n+\n+ # FRAME: nrow = 10, ncol = 1\n# C1\n# STRING\nWEST\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Formatting fix |
49,693 | 14.09.2020 21:49:05 | -7,200 | d181ea511a5e1735c844792c116ee9ecf19b2ff0 | [MINOR] Fix parsing gpu binary aggregate instruction
An additional integer for thread count was introduced in the string representation of bin_agg instructions which made the gpu version fail. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/gpu/AggregateBinaryGPUInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/gpu/AggregateBinaryGPUInstruction.java",
"diff": "@@ -57,12 +57,12 @@ public class AggregateBinaryGPUInstruction extends GPUInstruction {\nString opcode = parts[0];\nif ( !opcode.equalsIgnoreCase(\"ba+*\"))\nthrow new DMLRuntimeException(\"AggregateBinaryInstruction.parseInstruction():: Unknown opcode \" + opcode);\n- InstructionUtils.checkNumFields( parts, 5 );\n+ InstructionUtils.checkNumFields( parts, 6 );\nCPOperand in1 = new CPOperand(parts[1]);\nCPOperand in2 = new CPOperand(parts[2]);\nCPOperand out = new CPOperand(parts[3]);\n- boolean isLeftTransposed = Boolean.parseBoolean(parts[4]);\n- boolean isRightTransposed = Boolean.parseBoolean(parts[5]);\n+ boolean isLeftTransposed = Boolean.parseBoolean(parts[5]);\n+ boolean isRightTransposed = Boolean.parseBoolean(parts[6]);\nAggregateBinaryOperator aggbin = InstructionUtils.getMatMultOperator(1);\nreturn new AggregateBinaryGPUInstruction(aggbin, in1, in2, out, opcode, str, isLeftTransposed, isRightTransposed);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix parsing gpu binary aggregate instruction
An additional integer for thread count was introduced in the string representation of bin_agg instructions which made the gpu version fail. |
49,738 | 15.09.2020 21:13:13 | -7,200 | 95128102e5269aaa5754283b9d2760b78a8e746c | Improved slicefinder builtin (hybrid tp, pruning)
New hybrid task-parallel execution w/ exposed block size (2x
performance improvement for many slices)
New pruning before pair enumeration for zero error
Slice extraction via separate permutation matrix multiplies | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/slicefinder.dml",
"new_path": "scripts/builtin/slicefinder.dml",
"diff": "# maxL maximum level L (conjunctions of L predicates), 0 unlimited\n# minSup minimum support (min number of rows per slice)\n# alpha weight [0,1]: 0 only size, 1 only error\n-# dpEval flag for data-parallel slice evaluation,\n-# otherwise task-parallel\n+# tpEval flag for task-parallel slice evaluation,\n+# otherwise data-parallel\n+# tpBlksz block size for task-parallel execution (num slices)\n# verbose flag for verbose debug output\n# ------------------------------------------------------------\n# TK top-k slices (k x ncol(X) if successful)\nm_slicefinder = function(Matrix[Double] X, Matrix[Double] e,\nInteger k = 4, Integer maxL = 0, Integer minSup = 32, Double alpha = 0.5,\n- Boolean dpEval = FALSE, Boolean verbose = FALSE)\n+ Boolean tpEval = TRUE, Integer tpBlksz = 16, Boolean verbose = FALSE)\nreturn(Matrix[Double] TK, Matrix[Double] TKC)\n{\nm = nrow(X);\n@@ -80,13 +81,17 @@ m_slicefinder = function(Matrix[Double] X, Matrix[Double] e,\n}\n# extract and evaluate candidate slices\n- if( dpEval ) { #data-parallel\n- R = evalSlice(X2, e, eAvg, t(S), level, alpha);\n- }\n- else { # task-parallel\n+ if( tpEval ) { # task-parallel\n+ # hybrid task-parallel w/ 1 matrix-matrix for blocks of 16 matrix-vector\nR = matrix(0, nrow(S), 4)\n- parfor( i in 1:nrow(S) )\n- R[i,] = evalSlice(X2, e, eAvg, t(S[i,]), level, alpha);\n+ parfor( i in 1:ceil(nrow(S)/tpBlksz), check=0 ) {\n+ beg = (i-1)*tpBlksz + 1;\n+ end = min(i*tpBlksz, nrow(R));\n+ R[beg:end,] = evalSlice(X2, e, eAvg, t(S[beg:end,]), level, alpha);\n+ }\n+ }\n+ else { # data-parallel\n+ R = evalSlice(X2, e, eAvg, t(S), level, alpha);\n}\n# maintain top-k after evaluation\n@@ -199,7 +204,7 @@ getPairedCandidates = function(Matrix[Double] S, Matrix[Double] R,\n{\n# prune invalid slices (possible without affecting overall\n# pruning effectiveness due to handling of missing parents)\n- pI = (R[,4] >= minSup);\n+ pI = (R[,4] >= minSup & R[,2] > 0);\nS = removeEmpty(target=S, margin=\"rows\", select=pI)\nR = removeEmpty(target=R, margin=\"rows\", select=pI)\n@@ -219,7 +224,8 @@ getPairedCandidates = function(Matrix[Double] S, Matrix[Double] R,\nP1 = table(seq(1,nrow(rix)), rix, nrow(rix), nrow(S));\nP2 = table(seq(1,nrow(cix)), cix, nrow(rix), nrow(S));\nP12 = P1 + P2; # combined slice\n- P = (P12 %*% S) != 0;\n+ P = (P1 %*% S + P2 %*% S) != 0;\n+\nse = min(P1 %*% R[,2], P2 %*% R[,2])\nsm = min(P1 %*% R[,3], P2 %*% R[,3])\nss = min(P1 %*% R[,4], P2 %*% R[,4])\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSliceFinderTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSliceFinderTest.java",
"diff": "@@ -104,7 +104,7 @@ public class BuiltinSliceFinderTest extends AutomatedTestBase {\n//setOutputBuffering(false);\nfullDMLScriptName = HOME + dml_test_name + \".dml\";\nprogramArgs = new String[]{\"-args\", data,\n- String.valueOf(K),String.valueOf(dp).toUpperCase(),\n+ String.valueOf(K),String.valueOf(!dp).toUpperCase(),\nString.valueOf(VERBOSE).toUpperCase(), output(\"R\")};\nrunTest(true, false, null, -1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/slicefinder.dml",
"new_path": "src/test/scripts/functions/builtin/slicefinder.dml",
"diff": "@@ -37,6 +37,6 @@ yhat = X %*% B;\ne = (y-yhat)^2;\n# call slice finding\n-[TK,TKC] = slicefinder(X=X, e=e, k=$2, alpha=0.95, minSup=4, dpEval=$3, verbose=$4);\n+[TK,TKC] = slicefinder(X=X, e=e, k=$2, alpha=0.95, minSup=4, tpEval=$3, verbose=$4);\nwrite(TKC, $5)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2641] Improved slicefinder builtin (hybrid tp, pruning)
- New hybrid task-parallel execution w/ exposed block size (2x
performance improvement for many slices)
- New pruning before pair enumeration for zero error
- Slice extraction via separate permutation matrix multiplies |
49,738 | 15.09.2020 23:56:05 | -7,200 | fac221d67c4be8f22e4cda022d3e76cba39f37da | Fix correctness ctable operator rexpand-rows
This patch fixes a correctness issue of the special rexpand-rows
operator as applied for the following slice finder expression
map = table(ID, seq(1,nrow(P)), max(ID), nrow(P)).
The cache-conscious implementation had an offset issue in case of
ncol>blocksize (1024). The fix is a one-line modification though. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixReorg.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixReorg.java",
"diff": "@@ -1996,8 +1996,9 @@ public class LibMatrixReorg\nthrow new DMLRuntimeException(\"Invalid input value <= 0 for ignore=false: \"+val);\n//set expanded value if matching\n+ //note: tmpi populated with i+j indexes, then sorted\nif( val == Math.floor(val) && val >= 1 && val <= max )\n- ret.appendValue((int)(val-1), i+tmpi[j], 1);\n+ ret.appendValue((int)(val-1), tmpi[j], 1);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2665] Fix correctness ctable operator rexpand-rows
This patch fixes a correctness issue of the special rexpand-rows
operator as applied for the following slice finder expression
map = table(ID, seq(1,nrow(P)), max(ID), nrow(P)).
The cache-conscious implementation had an offset issue in case of
ncol>blocksize (1024). The fix is a one-line modification though. |
49,738 | 17.09.2020 21:00:26 | -7,200 | 651d59f02a578f521408f0c40d99a1b66e004f56 | Additional debug output slice finding algorithm
This patch collects the number of enumerated slices and top-k
characteristics per level into a debug matrix and returns it to the
user. Furthermore, this also includes a minor additional pruning
extension (by min error). | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/slicefinder.dml",
"new_path": "scripts/builtin/slicefinder.dml",
"diff": "# ------------------------------------------------------------\n# TK top-k slices (k x ncol(X) if successful)\n# TKC score, size, error of slices (k x 3)\n+# D debug matrix, populated with enumeration stats if verbose\n# ------------------------------------------------------------\nm_slicefinder = function(Matrix[Double] X, Matrix[Double] e,\nInteger k = 4, Integer maxL = 0, Integer minSup = 32, Double alpha = 0.5,\nBoolean tpEval = TRUE, Integer tpBlksz = 16, Boolean verbose = FALSE)\n- return(Matrix[Double] TK, Matrix[Double] TKC)\n+ return(Matrix[Double] TK, Matrix[Double] TKC, Matrix[Double] D)\n{\n+ # init debug matrix: levelID, enumerated S, valid S, TKmax, TKmin\n+ D = matrix(0, 0, 5);\n+\nm = nrow(X);\nn = ncol(X);\n@@ -62,6 +66,7 @@ m_slicefinder = function(Matrix[Double] X, Matrix[Double] e,\nif( verbose ) {\n[maxsc, minsc] = analyzeTopK(TKC);\nprint(\"SliceFinder: initial top-K: count=\"+nrow(TK)+\", max=\"+maxsc+\", min=\"+minsc)\n+ D = rbind(D, t(as.matrix(list(1, n2, nrow(S), maxsc, minsc))));\n}\n# lattice enumeration w/ size/error pruning, one iteration per level\n@@ -99,9 +104,10 @@ m_slicefinder = function(Matrix[Double] X, Matrix[Double] e,\nif(verbose) {\n[maxsc, minsc] = analyzeTopK(TKC);\n- valid = as.integer(sum(R[,4]>=minSup));\n+ valid = as.integer(sum(R[,2]>0 & R[,4]>=minSup));\nprint(\" -- valid slices after eval: \"+valid+\"/\"+nrow(S));\nprint(\" -- top-K: count=\"+nrow(TK)+\", max=\"+maxsc+\", min=\"+minsc);\n+ D = rbind(D, t(as.matrix(list(level, nrow(S), valid, maxsc, minsc))));\n}\n}\n@@ -123,12 +129,12 @@ createAndScoreBasicSlices = function(Matrix[Double] X2, Matrix[Double] e,\nmerr = t(colMaxs(X2 * e)); # maximum error vector\nif( verbose ) {\n- drop = as.integer(sum(cCnts < minSup));\n+ drop = as.integer(sum(cCnts < minSup | err == 0));\nprint(\"SliceFinder: dropping \"+drop+\"/\"+n2+\" features below minSup = \"+minSup+\".\");\n}\n# working set of active slices (#attr x #slices) and top k\n- selCols = (cCnts >= minSup);\n+ selCols = (cCnts >= minSup & err > 0);\nattr = removeEmpty(target=seq(1,n2), margin=\"rows\", select=selCols);\nss = removeEmpty(target=cCnts, margin=\"rows\", select=selCols);\nse = removeEmpty(target=err, margin=\"rows\", select=selCols);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/MultiReturnParameterizedBuiltinFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/MultiReturnParameterizedBuiltinFEDInstruction.java",
"diff": "@@ -41,7 +41,6 @@ import org.apache.sysds.runtime.instructions.cp.Data;\nimport org.apache.sysds.runtime.matrix.data.FrameBlock;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\n-import org.apache.sysds.runtime.privacy.PrivacyMonitor;\nimport org.apache.sysds.runtime.transform.encode.Encoder;\nimport org.apache.sysds.runtime.transform.encode.EncoderBin;\nimport org.apache.sysds.runtime.transform.encode.EncoderComposite;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/ParameterizedBuiltinFEDInstruction.java",
"diff": "@@ -51,7 +51,6 @@ import org.apache.sysds.runtime.matrix.operators.Operator;\nimport org.apache.sysds.runtime.matrix.operators.SimpleOperator;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\nimport org.apache.sysds.runtime.meta.MetaDataFormat;\n-import org.apache.sysds.runtime.privacy.PrivacyMonitor;\nimport org.apache.sysds.runtime.transform.decode.Decoder;\nimport org.apache.sysds.runtime.transform.decode.DecoderFactory;\nimport org.apache.sysds.runtime.transform.encode.Encoder;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2461] Additional debug output slice finding algorithm
This patch collects the number of enumerated slices and top-k
characteristics per level into a debug matrix and returns it to the
user. Furthermore, this also includes a minor additional pruning
extension (by min error). |
49,704 | 18.09.2020 21:10:33 | -7,200 | 98b21a4923793e7458dfe13c2bc0a10d15f9fe72 | Fix relinking in mmchain optimization rewrite
This patch fixes the relinking logic in the matrix multiplication chain
optimization rewrite in order to support ragged input chains such as
((((a, b), c), (D, E), f), e).
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/rewrite/RewriteMatrixMultChainOptimization.java",
"new_path": "src/main/java/org/apache/sysds/hops/rewrite/RewriteMatrixMultChainOptimization.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysds.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import org.apache.commons.lang3.mutable.MutableInt;\nimport org.apache.sysds.hops.AggBinaryOp;\nimport org.apache.sysds.hops.Hop;\nimport org.apache.sysds.hops.HopsException;\n@@ -197,7 +198,7 @@ public class RewriteMatrixMultChainOptimization extends HopRewriteRule\n// Step 5: Relink the hops using the optimal ordering (split[][]) found from DP.\nLOG.trace(\"Optimal MM Chain: \");\n- mmChainRelinkHops(mmOperators.get(0), 0, size - 1, mmChain, mmOperators, 1, split, 1);\n+ mmChainRelinkHops(mmOperators.get(0), 0, size - 1, mmChain, mmOperators, new MutableInt(1), split, 1);\n}\n}\n@@ -263,9 +264,13 @@ public class RewriteMatrixMultChainOptimization extends HopRewriteRule\n* @param split optimal order\n* @param level log level\n*/\n- protected final void mmChainRelinkHops(Hop h, int i, int j, ArrayList<Hop> mmChain, ArrayList<Hop> mmOperators,\n- int opIndex, int[][] split, int level)\n+ protected final void mmChainRelinkHops(Hop h, int i, int j, ArrayList<Hop> mmChain,\n+ ArrayList<Hop> mmOperators, MutableInt opIndex, int[][] split, int level)\n{\n+ //NOTE: the opIndex is a MutableInt in order to get the correct positions\n+ //in ragged chains like ((((a, b), c), (D, E), f), e) that might be given\n+ //like that by the original scripts variable assignments\n+\n//single matrix - end of recursion\nif( i == j ) {\nlogTraceHop(h, level);\n@@ -283,9 +288,9 @@ public class RewriteMatrixMultChainOptimization extends HopRewriteRule\nmmChain.get(i).getParent().add(h);\n}\nelse {\n- h.getInput().add(mmOperators.get(opIndex));\n- mmOperators.get(opIndex).getParent().add(h);\n- opIndex = opIndex + 1;\n+ int ix = opIndex.getAndIncrement();\n+ h.getInput().add(mmOperators.get(ix));\n+ mmOperators.get(ix).getParent().add(h);\n}\n// Set Input2 for current Hop h\n@@ -294,9 +299,9 @@ public class RewriteMatrixMultChainOptimization extends HopRewriteRule\nmmChain.get(j).getParent().add(h);\n}\nelse {\n- h.getInput().add(mmOperators.get(opIndex));\n- mmOperators.get(opIndex).getParent().add(h);\n- opIndex = opIndex + 1;\n+ int ix = opIndex.getAndIncrement();\n+ h.getInput().add(mmOperators.get(ix));\n+ mmOperators.get(ix).getParent().add(h);\n}\n// Find children for both the inputs\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/rewrite/RewriteMatrixMultChainOptimizationSparse.java",
"new_path": "src/main/java/org/apache/sysds/hops/rewrite/RewriteMatrixMultChainOptimizationSparse.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysds.hops.rewrite;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import org.apache.commons.lang3.mutable.MutableInt;\nimport org.apache.sysds.common.Types.OpOpData;\nimport org.apache.sysds.hops.Hop;\nimport org.apache.sysds.hops.HopsException;\n@@ -66,7 +67,7 @@ public class RewriteMatrixMultChainOptimizationSparse extends RewriteMatrixMultC\n// Step 5: Relink the hops using the optimal ordering (split[][]) found from DP.\nLOG.trace(\"Optimal MM Chain: \");\n- mmChainRelinkHops(mmOperators.get(0), 0, size - 1, mmChain, mmOperators, 1, split, 1);\n+ mmChainRelinkHops(mmOperators.get(0), 0, size - 1, mmChain, mmOperators, new MutableInt(1), split, 1);\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/rewrite/RewriteMatrixMultChainOptTest2.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.rewrite;\n+\n+import java.util.HashMap;\n+\n+import org.junit.Test;\n+import org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+\n+public class RewriteMatrixMultChainOptTest2 extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME1 = \"RewriteMMChainTest1\";\n+ private static final String TEST_DIR = \"functions/rewrite/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + RewriteMatrixMultChainOptTest2.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] {\"R\"}));\n+ }\n+\n+ @Test\n+ public void testMMChain1Singlenode() {\n+ testMMChain(TEST_NAME1, ExecMode.SINGLE_NODE);\n+ }\n+\n+ @Test\n+ public void testMMChain1Hybrid() {\n+ testMMChain(TEST_NAME1, ExecMode.HYBRID);\n+ }\n+\n+ @Test\n+ public void testMMChain1Spark() {\n+ testMMChain(TEST_NAME1, ExecMode.HYBRID);\n+ }\n+\n+ private void testMMChain(String testname, ExecMode et)\n+ {\n+ ExecMode etOld = setExecMode(et);\n+\n+ try\n+ {\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{ \"-args\", output(\"R\") };\n+ fullRScriptName = HOME + testname + \".R\";\n+ rCmd = getRCmd(expectedDir());\n+\n+ //execute tests\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"R\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"R\");\n+ TestUtils.compareMatrices(dmlfile, rfile, 1e-8, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ resetExecMode(etOld);\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/rewrite/RewriteMMChainTest1.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+h = matrix(1.0, 3, 3)\n+a = matrix(2.0, 3, 3)\n+s = matrix(3.0, 3, 1)\n+s2 = s %*% t(s)\n+h2 = h %*% h %*% s2 %*% a %*% a\n+\n+writeMM(as(h2, \"CsparseMatrix\"), paste(args[1], \"R\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/rewrite/RewriteMMChainTest1.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+h = matrix(1.0, 3, 3)\n+a = matrix(2.0, 3, 3)\n+s = matrix(3.0, 3, 1)\n+s2 = s %*% t(s)\n+h2 = h %*% h %*% s2 %*% a %*% a\n+\n+write(h2, $1);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2666] Fix relinking in mmchain optimization rewrite
This patch fixes the relinking logic in the matrix multiplication chain
optimization rewrite in order to support ragged input chains such as
((((a, b), c), (D, E), f), e).
Closes #1058. |
49,689 | 19.09.2020 18:13:45 | -7,200 | 97a0df3c7837bc35206baf5b0590c99361460ec5 | Automate upload distribution archives to pypi
This patch adds a script to generate and upload distribution archives
to pypi repo. A successful test run is done to upload to testpypi repo. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/release/pypi_upload.sh",
"diff": "+#!/usr/bin/env bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#BASE_DIR=$(pwd)\n+#BASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\n+BASE_DIR=\"../..\" #points to systemds directory\n+RELEASE_WORK_DIR=$BASE_DIR/target/release2\n+RELEASE_VERSION=2.0.0\n+eval cd $RELEASE_WORK_DIR/systemds/src/main/python\n+\n+# Steps:\n+# 1. update systemds/project_info.py with the new version\n+sed -i \"s/$RELEASE_VERSION-SNAPSHOT/$RELEASE_VERSION/\" systemds/project_info.py\n+\n+# 2. generate distribution archives\n+python3 create_python_dist.py\n+\n+# 3. upload the distribution archives to testpypi/pypi\n+# - For testing follow https://packaging.python.org/tutorials/packaging-projects/\n+# - Note: for testing use command prompt in windows and use Edit->paste to paste\n+# the API token (known issues)\n+\n+#python -m twine upload --repository testpypi dist/* #Test\n+#python twine upload dist/* #Real\n+\n+exit\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/project_info.py",
"new_path": "src/main/python/systemds/project_info.py",
"diff": "# via string substitutions using the maven-resources-plugin\n__project_group_id__ = 'org.apache.systemds'\n__project_artifact_id__ = 'systemds'\n-__project_version__ = '0.2.0-SNAPSHOT'\n+__project_version__ = '2.0.0-SNAPSHOT'\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2667] Automate upload distribution archives to pypi
This patch adds a script to generate and upload distribution archives
to pypi repo. A successful test run is done to upload to testpypi repo. |
49,738 | 19.09.2020 21:46:51 | -7,200 | 132f1e72896cadb25660bf5321935915ca66dcd9 | Additional R baseline for slice finding tests | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSliceFinderTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSliceFinderTest.java",
"diff": "@@ -22,13 +22,18 @@ package org.apache.sysds.test.functions.builtin;\nimport org.junit.Assert;\nimport org.junit.Test;\n+\n+import java.util.HashMap;\n+\nimport org.apache.sysds.common.Types.ExecMode;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysds.test.AutomatedTestBase;\nimport org.apache.sysds.test.TestConfiguration;\nimport org.apache.sysds.test.TestUtils;\n-public class BuiltinSliceFinderTest extends AutomatedTestBase {\n-\n+public class BuiltinSliceFinderTest extends AutomatedTestBase\n+{\n+ private static final String PREP_NAME = \"slicefinderPrep\";\nprivate static final String TEST_NAME = \"slicefinder\";\nprivate static final String TEST_DIR = \"functions/builtin/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + BuiltinSliceFinderTest.class.getSimpleName() + \"/\";\n@@ -94,21 +99,43 @@ public class BuiltinSliceFinderTest extends AutomatedTestBase {\nprivate void runSliceFinderTest(int K, boolean dp, ExecMode mode) {\nExecMode platformOld = setExecMode(ExecMode.HYBRID);\n- String dml_test_name = TEST_NAME;\nloadTestConfiguration(getTestConfiguration(TEST_NAME));\nString HOME = SCRIPT_DIR + TEST_DIR;\nString data = HOME + \"/data/Salaries.csv\";\ntry {\nloadTestConfiguration(getTestConfiguration(TEST_NAME));\n- //\n- fullDMLScriptName = HOME + dml_test_name + \".dml\";\n- programArgs = new String[]{\"-args\", data,\n+\n+ //run data preparation\n+ fullDMLScriptName = HOME + PREP_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", data, output(\"X\"), output(\"e\")};\n+ runTest(true, false, null, -1);\n+\n+ //read output and store for dml and R\n+ double[][] X = TestUtils.convertHashMapToDoubleArray(readDMLMatrixFromHDFS(\"X\"));\n+ double[][] e = TestUtils.convertHashMapToDoubleArray(readDMLMatrixFromHDFS(\"e\"));\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+ writeInputMatrixWithMTD(\"e\", e, true);\n+\n+ //execute main test\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"X\"), input(\"e\"),\nString.valueOf(K),String.valueOf(!dp).toUpperCase(),\nString.valueOf(VERBOSE).toUpperCase(), output(\"R\")};\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + String.valueOf(K)\n+ + \" \" + String.valueOf(!dp).toUpperCase() + \" \" + expectedDir();\n+\nrunTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare dml and R\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"R\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"R\");\n+ TestUtils.compareMatrices(dmlfile, rfile, 1e-2, \"Stat-DML\", \"Stat-R\");\n- double[][] ret = TestUtils.convertHashMapToDoubleArray(readDMLMatrixFromHDFS(\"R\"));\n+ //compare expected results\n+ double[][] ret = TestUtils.convertHashMapToDoubleArray(dmlfile);\nfor(int i=0; i<K; i++)\nTestUtils.compareMatrices(EXPECTED_TOPK[i], ret[i], 1e-2);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/slicefinder.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+library(\"doMC\")\n+\n+registerDoMC(NULL) # physical cores\n+\n+################################################################################\n+\n+slicefinder = function(X, e,\n+ k = 4, maxL = 0, minSup = 32, alpha = 0.5,\n+ tpEval = TRUE, tpBlksz = 16, verbose = FALSE)\n+{\n+ # init debug matrix: levelID, enumerated S, valid S, TKmax, TKmin\n+ D = matrix(0, 0, 5);\n+ m = nrow(X);\n+ n = ncol(X);\n+\n+ # prepare offset vectors and one-hot encoded X\n+ fdom = colMaxs(X);\n+ foffb = t(cumsum(t(fdom))) - fdom;\n+ foffe = t(cumsum(t(fdom)))\n+ rix = matrix(seq(1,m)%*%matrix(1,1,n), m*n, 1)\n+ cix = matrix(X + (matrix(1,nrow(X),1) %*% foffb), m*n, 1);\n+ X2 = table(rix, cix); #one-hot encoded\n+\n+ # initialize statistics and basic slices\n+ n2 = ncol(X2); # one-hot encoded features\n+ eAvg = sum(e) / m; # average error\n+ TMP1 = createAndScoreBasicSlices(X2, e, eAvg, minSup, alpha, verbose);\n+ S = TMP1[[\"S\"]];\n+ R = TMP1[[\"R\"]];\n+\n+ # initialize top-k\n+ TMP2 = maintainTopK(S, R, matrix(0,0,n2), matrix(0,0,4), k, minSup);\n+ TK = TMP2[[\"TK\"]]; TKC = TMP2[[\"TKC\"]];\n+\n+ if( verbose ) {\n+ TMP3 = analyzeTopK(TKC);\n+ maxsc = TMP3[[\"maxsc\"]]; minsc = TMP3[[\"minsc\"]];\n+ print(paste(\"SliceFinder: initial top-K: count=\",nrow(TK),\", max=\",maxsc,\", min=\",minsc,sep=\"\"))\n+ D = rbind(D, t(as.matrix(list(1, n2, nrow(S), maxsc, minsc))));\n+ }\n+\n+ # lattice enumeration w/ size/error pruning, one iteration per level\n+ # termination condition (max #feature levels)\n+ maxL = ifelse(maxL<=0, n, maxL)\n+ level = 1;\n+ while( nrow(S) > 0 & sum(S) > 0 & level < n & level < maxL ) {\n+ level = level + 1;\n+\n+ # enumerate candidate join pairs, incl size/error pruning\n+ nrS = nrow(S);\n+ S = getPairedCandidates(S, R, TK, TKC, k, level, eAvg, minSup, alpha, n2, foffb, foffe);\n+\n+ if(verbose) {\n+ print(paste(\"SliceFinder: level \",level,\":\",sep=\"\"))\n+ print(paste(\" -- generated paired slice candidates: \",nrS,\" -> \",nrow(S),sep=\"\"));\n+ }\n+\n+ # extract and evaluate candidate slices\n+ if( tpEval ) { # task-parallel\n+ R = foreach( i=1:nrow(S), .combine=rbind) %dopar% {\n+ return (evalSlice(X2, e, eAvg, as.matrix(S[i,]), level, alpha))\n+ }\n+ }\n+ else { # data-parallel\n+ R = evalSlice(X2, e, eAvg, t(S), level, alpha);\n+ }\n+\n+ # maintain top-k after evaluation\n+ TMP2 = maintainTopK(S, R, TK, TKC, k, minSup);\n+ TK = TMP2[[\"TK\"]]; TKC = TMP2[[\"TKC\"]];\n+\n+ if(verbose) {\n+ TMP3 = analyzeTopK(TKC);\n+ maxsc = TMP3[[\"maxsc\"]]; minsc = TMP3[[\"minsc\"]];\n+ valid = as.integer(sum(R[,2]>0 & R[,4]>=minSup));\n+ print(paste(\" -- valid slices after eval: \",valid,\"/\",nrow(S),sep=\"\"));\n+ print(paste(\" -- top-K: count=\",nrow(TK),\", max=\",maxsc,\", min=\",minsc,sep=\"\"));\n+ D = rbind(D, t(as.matrix(list(level, nrow(S), valid, maxsc, minsc))));\n+ }\n+ }\n+\n+ TK = decodeTopK(TK, foffb, foffe);\n+\n+ if( verbose ) {\n+ print(paste(\"SliceFinder: terminated at level \",level,\":\"));\n+ print(TK); print(TKC);\n+ }\n+\n+ return (list(\"TK\"=TK, \"TKC\"=TKC, \"D\"=D))\n+}\n+\n+rexpand = function(v, n2=max(v)) {\n+ R = matrix(0, nrow(v), n2)\n+ for( i in 1:nrow(v) ) {\n+ R[i,] = tabulate(v[i,], nbins=n2);\n+ }\n+ return (R)\n+}\n+\n+createAndScoreBasicSlices = function(X2, e, eAvg, minSup, alpha, verbose) {\n+ n2 = ncol(X2);\n+ cCnts = colSums(X2); # column counts\n+ err = t(t(e) %*% X2); # total error vector\n+ merr = t(colMaxs(X2 * (e %*% matrix(1,1,ncol(X2))))); # maximum error vector\n+\n+ if( verbose ) {\n+ drop = as.integer(sum(cCnts < minSup | err == 0));\n+ print(paste(\"SliceFinder: dropping \",drop,\"/\",n2,\" features below minSup = \",minSup,\".\", sep=\"\"));\n+ }\n+\n+ # working set of active slices (#attr x #slices) and top k\n+ selCols = (cCnts >= minSup & err > 0);\n+ attr = as.matrix(seq(1,n2)[selCols])\n+ ss = as.matrix(cCnts[selCols])\n+ se = as.matrix(err[selCols])\n+ sm = as.matrix(merr[selCols])\n+ S = rexpand(attr, n2);\n+\n+ # score 1-slices and create initial top-k\n+ sc = score(ss, se, eAvg, alpha, nrow(X2));\n+ R = as.matrix(cbind(sc, se, sm, ss));\n+\n+ return (list(\"S\"=S,\"R\"=R))\n+}\n+\n+score = function(ss, se, eAvg, alpha, n) {\n+ sc = alpha * ((se/ss) / eAvg - 1) - (1-alpha) * (n/ss - 1);\n+ sc = replace(sc, NaN, -Inf);\n+ return (sc)\n+}\n+\n+scoreUB = function(ss, se, sm, eAvg, minSup, alpha, n) {\n+ # Since sc is either monotonically increasing or decreasing, we\n+ # probe interesting points of sc in the interval [minSup, ss],\n+ # and compute the maximum to serve as the upper bound\n+ s = cbind(matrix(minSup,nrow(ss),1), pmax(se/sm,minSup), ss)\n+ ex = matrix(1,1,3)\n+ sc = rowMaxs(alpha * ((pmin(s*(sm%*%ex),se%*%ex)/s) / eAvg - 1) - (1-alpha) * (1/s*n - 1));\n+ sc = replace(sc, NaN, -Inf);\n+ return (sc)\n+}\n+\n+\n+maintainTopK = function(S, R, TK, TKC, k, minSup) {\n+ # prune invalid minSup and scores\n+ I = as.matrix((R[,1] > 0) & (R[,4] >= minSup));\n+\n+ if( sum(I)!=0 ) {\n+ S = as.matrix(S[I,])\n+ R = as.matrix(R[I,])\n+ if( ncol(S) != ncol(TK) & ncol(S)==1 ) {\n+ S = t(S); R = t(R);\n+ }\n+\n+ # evaluated candidated and previous top-k\n+ slices = as.matrix(rbind(TK, S));\n+ scores = as.matrix(rbind(TKC, R));\n+\n+ # extract top-k\n+ IX = as.matrix(order(scores[,1], decreasing=TRUE));\n+ IX = as.matrix(IX[1:min(k,nrow(IX)),]);\n+ TK = as.matrix(slices[IX,]);\n+ TKC = as.matrix(scores[IX,]);\n+ }\n+ return (list(\"TK\"=TK, \"TKC\"=TKC))\n+}\n+\n+analyzeTopK = function(TKC) {\n+ maxsc = -Inf;\n+ minsc = -Inf;\n+ if( nrow(TKC)>0 ) {\n+ maxsc = TKC[1,1];\n+ minsc = TKC[nrow(TKC),1];\n+ }\n+ return (list(\"maxsc\"=maxsc, \"minsc\"=minsc))\n+}\n+\n+getPairedCandidates = function(S, R, TK, TKC, k,\n+ level, eAvg, minSup, alpha, n2, foffb, foffe)\n+{\n+ # prune invalid slices (possible without affecting overall\n+ # pruning effectiveness due to handling of missing parents)\n+ pI = (R[,4] >= minSup & R[,2] > 0);\n+ S = S[pI,]; R = R[pI,];\n+\n+ # join compatible slices (without self)\n+ join = S %*% t(S) == (level-2)\n+ I = upper.tri(join, diag=FALSE) * join;\n+\n+ # pair construction\n+ nr = nrow(I); nc = ncol(I);\n+ rix = matrix(I * (seq(1,nr)%*%matrix(1,1,ncol(I))), nr*nc, 1);\n+ cix = matrix(I * (matrix(1,nrow(I),1) %*% t(seq(1,nc))), nr*nc, 1);\n+ rix = as.matrix(rix[rix!=0,])\n+ cix = as.matrix(cix[cix!=0,])\n+\n+ P = matrix(0, 0, ncol(S))\n+ if( sum(rix)!=0 ) {\n+ P1 = rexpand(rix, nrow(S));\n+ P2 = rexpand(cix, nrow(S));\n+ P12 = P1 + P2; # combined slice\n+ P = ((P1 %*% S + P2 %*% S) != 0) * 1;\n+ se = pmin(P1 %*% R[,2], P2 %*% R[,2])\n+ sm = pmin(P1 %*% R[,3], P2 %*% R[,3])\n+ ss = pmin(P1 %*% R[,4], P2 %*% R[,4])\n+\n+ # prune invalid self joins (>1 bit per feature)\n+ I = matrix(1, nrow(P), 1);\n+ for( j in 1:ncol(foffb) ) {\n+ beg = foffb[1,j]+1;\n+ end = foffe[1,j];\n+ I = I & (rowSums(P[,beg:end]) <= 1);\n+ }\n+ P12 = P12[I,]\n+ P = P[I,]\n+ ss = as.matrix(ss[I])\n+ se = as.matrix(se[I])\n+ sm = as.matrix(sm[I])\n+\n+ # prepare IDs for deduplication and pruning\n+ ID = matrix(0, nrow(P), 1);\n+ dom = foffe-foffb+1;\n+ for( j in 1:ncol(dom) ) {\n+ beg = foffb[1,j]+1;\n+ end = foffe[1,j];\n+ I = max.col(P[,beg:end],ties.method=\"last\") * rowSums(P[,beg:end]);\n+ prod = 1;\n+ if(j<ncol(dom))\n+ prod = prod(dom[1,(j+1):ncol(dom)])\n+ ID = ID + I * prod;\n+ }\n+\n+ # size pruning, with rowMin-rowMax transform\n+ # to avoid densification (ignored zeros)\n+ map = table(ID, seq(1,nrow(P)))\n+ ex = matrix(1,nrow(map),1)\n+ ubSizes = 1/rowMaxs(map * (1/ex%*%t(ss)));\n+ ubSizes = as.matrix(replace(ubSizes, Inf, 0));\n+ fSizes = (ubSizes >= minSup)\n+\n+ # error pruning\n+ ubError = 1/rowMaxs(map * (1/ex%*%t(se)));\n+ ubError = as.matrix(replace(ubError, Inf, 0));\n+ ubMError = 1/rowMaxs(map * (1/ex%*%t(sm)));\n+ ubMError = as.matrix(replace(ubMError, Inf, 0));\n+ ubScores = scoreUB(ubSizes, ubError, ubMError, eAvg, minSup, alpha, n2);\n+ TMP3 = analyzeTopK(TKC);\n+ minsc = TMP3[[\"minsc\"]]\n+ fScores = (ubScores > minsc & ubScores > 0)\n+\n+ # missing parents pruning\n+ numParents = rowSums((map %*% P12) != 0)\n+ fParents = (numParents == level);\n+\n+ # apply all pruning\n+ map = map * ((fSizes & fScores & fParents) %*% matrix(1,1,ncol(map)));\n+\n+ # deduplication of join outputs\n+ Dedup = as.matrix(map[rowMaxs(map)!=0,] != 0)\n+ P = (Dedup %*% P) != 0\n+ }\n+ return (P)\n+}\n+\n+evalSlice = function(X, e, eAvg, tS, l, alpha) {\n+ I = (X %*% tS) == l; # slice indicator\n+ ss = as.matrix(colSums(I)); # absolute slice size (nnz)\n+ se = as.matrix(t(t(e) %*% I)); # absolute slice error\n+ sm = as.matrix(colMaxs(I * e%*%matrix(1,1,ncol(I)))); # maximum tuple error in slice\n+\n+ # score of relative error and relative size\n+ sc = score(ss, se, eAvg, alpha, nrow(X));\n+ R = as.matrix(cbind(sc, se, sm, ss));\n+ return (R)\n+}\n+\n+decodeTopK = function(TK, foffb, foffe) {\n+ R = matrix(1, nrow(TK), ncol(foffb));\n+ if( nrow(TK) > 0 ) {\n+ for( j in 1:ncol(foffb) ) {\n+ beg = foffb[1,j]+1;\n+ end = foffe[1,j];\n+ I = rowSums(TK[,beg:end]) * max.col(TK[,beg:end],ties.method=\"last\");\n+ R[, j] = I;\n+ }\n+ }\n+ return (R)\n+}\n+\n+################################################################################\n+X = as.matrix(readMM(paste(args[1], \"X.mtx\", sep=\"\")))\n+e = as.matrix(readMM(paste(args[1], \"e.mtx\", sep=\"\")))\n+k = as.integer(args[2]);\n+tpEval = as.logical(args[3])\n+\n+TMP = slicefinder(X=X, e=e, k=k, alpha=0.95, minSup=4, tpEval=tpEval, verbose=TRUE);\n+R = TMP[[\"TKC\"]]\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[4], \"R\", sep=\"\"));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/slicefinder.dml",
"new_path": "src/test/scripts/functions/builtin/slicefinder.dml",
"diff": "#\n#-------------------------------------------------------------\n-FXY = read($1, data_type=\"frame\", format=\"csv\", header=TRUE);\n-\n-F = FXY[,1:ncol(FXY)-1];\n-y = as.matrix(FXY[,ncol(FXY)]);\n-\n-# data preparation\n-jspec= \"{ ids:true, recode:[1,2,3,6],bin:[\"\n- +\"{id:4, method:equi-width, numbins:14},\"\n- +\"{id:5, method:equi-width, numbins:12}]}\"\n-[X,M] = transformencode(target=F, spec=jspec);\n-X = X[,2:ncol(X)]\n-\n-# learn model\n-B = lm(X=X, y=y, verbose=FALSE);\n-yhat = X %*% B;\n-e = (y-yhat)^2;\n+X = read($1);\n+e = read($2);\n# call slice finding\n-[TK,TKC] = slicefinder(X=X, e=e, k=$2, alpha=0.95, minSup=4, tpEval=$3, verbose=$4);\n+[TS,TR] = slicefinder(X=X, e=e, k=$3, alpha=0.95, minSup=4, tpEval=$4, verbose=$5);\n-write(TKC, $5)\n+write(TR, $6)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/slicefinderPrep.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+FXY = read($1, data_type=\"frame\", format=\"csv\", header=TRUE);\n+\n+F = FXY[,1:ncol(FXY)-1];\n+y = as.matrix(FXY[,ncol(FXY)]);\n+\n+# data preparation\n+jspec= \"{ ids:true, recode:[1,2,3,6],bin:[\"\n+ +\"{id:4, method:equi-width, numbins:14},\"\n+ +\"{id:5, method:equi-width, numbins:12}]}\"\n+[X,M] = transformencode(target=F, spec=jspec);\n+X = X[,2:ncol(X)]\n+\n+# learn model\n+B = lm(X=X, y=y, verbose=FALSE);\n+yhat = X %*% B;\n+e = (y-yhat)^2;\n+\n+write(X, $2)\n+write(e, $3)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2641] Additional R baseline for slice finding tests |
49,706 | 21.09.2020 11:17:30 | -7,200 | e2c59779dacc6d28043a5de4a38bc620c450fb9e | [MINOR] Seperate compile and tests step function tests | [
{
"change_type": "MODIFY",
"old_path": "docker/entrypoint.sh",
"new_path": "docker/entrypoint.sh",
"diff": "cd /github/workspace\nlog=\"/tmp/sysdstest.log\"\n-\n+mvn -ntp compile test-compile\nmvn -ntp test -D maven.test.skip=false -D automatedtestbase.outputbuffering=true -D test=$1 | grep -v \"already exists in destination.\" | tee $log\ngrep_args=\"SUCCESS\"\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Seperate compile and tests step function tests |
49,700 | 21.09.2020 10:15:24 | -7,200 | 4a57b232bb57d18d429422d0f08e79ef63ed6be5 | [MINOR] Change Process to Thread FederatedWorkerHandlerTest
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/primitives/FederatedRCBindTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/primitives/FederatedRCBindTest.java",
"diff": "@@ -28,7 +28,6 @@ import org.apache.sysds.runtime.meta.MatrixCharacteristics;\nimport org.apache.sysds.test.AutomatedTestBase;\nimport org.apache.sysds.test.TestConfiguration;\nimport org.apache.sysds.test.TestUtils;\n-\nimport java.util.Arrays;\nimport java.util.Collection;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/privacy/FederatedWorkerHandlerTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/FederatedWorkerHandlerTest.java",
"diff": "@@ -233,7 +233,7 @@ public class FederatedWorkerHandlerTest extends AutomatedTestBase {\nwriteInputMatrixWithMTD(\"A\", A, false, new MatrixCharacteristics(rows, cols, blocksize, rows * cols), new PrivacyConstraint(privacyLevel));\nint port = getRandomAvailablePort();\n- Process t = startLocalFedWorker(port);\n+ Thread t = startLocalFedWorkerThread(port);\n// we need the reference file to not be written to hdfs, so we get the correct format\nrtplatform = Types.ExecMode.SINGLE_NODE;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Change Process to Thread FederatedWorkerHandlerTest
Closes #1059 |
49,706 | 21.09.2020 12:56:31 | -7,200 | 0484ee7cc38cb350d4087d518ec23c360ec6e6da | [MINOR] Docker run fix, and compile reduction in tests | [
{
"change_type": "MODIFY",
"old_path": "docker/entrypoint.sh",
"new_path": "docker/entrypoint.sh",
"diff": "cd /github/workspace\nlog=\"/tmp/sysdstest.log\"\n-mvn -ntp compile test-compile\n-mvn -ntp test -D maven.test.skip=false -D automatedtestbase.outputbuffering=true -D test=$1 | grep -v \"already exists in destination.\" | tee $log\n+mvn -ntp test-compile 2>&1 | grep -E \"BUILD|Total time:|---|Building SystemDS\"\n+mvn -ntp test -D maven.test.skip=false -D automatedtestbase.outputbuffering=true -D test=$1 2>&1 | grep -v \"already exists in destination.\" | tee $log\ngrep_args=\"SUCCESS\"\ngrepvals=\"$( tail -n 100 $log | grep $grep_args)\"\n"
},
{
"change_type": "MODIFY",
"old_path": "docker/runDocker.sh",
"new_path": "docker/runDocker.sh",
"diff": "# Execute the docker container\ndocker run \\\n-v $(pwd)/docker/mountFolder:/input \\\n- --rm sebaba/sysds:0.2\n+ --rm systemds/sysds:latest\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Docker run fix, and compile reduction in tests |
49,689 | 21.09.2020 19:58:07 | -7,200 | caa28818b425b2489d1d4e1abdfdeb05eceb89fd | Minor fixes in the release scripts | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "dev/release/deploy.sh",
"diff": "+#!/usr/bin/env bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+MVN=mvn\n+PUBLISH_PROFILES=\"-Pdistribution,rat\"\n+DRY_RUN=-DdryRun=true\n+GPG_KEYID=$1\n+GPG_PASSPHRASE=$2\n+DEVELOPMENT_VERSION=2.1.0-SNAPSHOT\n+RELEASE_STAGING_LOCATION=\"/c/virtual\\ D/SystemDS/systemds/temp\"\n+BASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\n+#BASE_DIR=\"../..\" #points to systemds directory\n+RELEASE_WORK_DIR=$BASE_DIR/target/release2\n+RELEASE_VERSION=2.0.0\n+RELEASE_RC=rc1\n+GIT_REF=-master\n+#export GNUPGHOME=\"/c/virtual\\ D/SystemDS/systemds/target/.gnupg_copy\"\n+export GNUPGHOME=\"../../../target/.gnupg_copy/\"\n+\n+function checkout_code {\n+ # Checkout code\n+ eval rm -rf $RELEASE_WORK_DIR\n+ eval mkdir -p $RELEASE_WORK_DIR\n+ eval cd $RELEASE_WORK_DIR\n+ git clone https://github.com/apache/systemds.git\n+ cd systemds\n+ git checkout $GIT_REF\n+ git_hash=`git rev-parse --short HEAD`\n+ echo \"Checked out SystemDS git hash $git_hash\"\n+\n+ git clean -d -f -x\n+ #rm .gitignore\n+ #rm -rf .git\n+\n+ eval cd \"$BASE_DIR\" #return to base dir\n+}\n+\n+# Pull the latest code (with committed pom changes) and deploy to the local target directory\n+checkout_code\n+# Remove SNAPSHOT from the version in pom\n+eval cd $RELEASE_WORK_DIR/systemds\n+sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" pom.xml\n+GPG_OPTS=\"-Dgpg.keyname=$GPG_KEYID -Dgpg.passphrase=$GPG_PASSPHRASE\"\n+# Deploy to /target folder for the next job to pick the artifacts up for there\n+CMD=\"$MVN $PUBLISH_PROFILES deploy \\\n+-DskiptTests \\\n+-DaltDeploymentRepository=altDepRepo::default::file:./target \\\n+${GPG_OPTS}\"\n+\n+echo \"Executing: \" \"$CMD\"\n+$CMD\n+\n+eval cd $RELEASE_WORK_DIR\n+\n+\n+exit 0\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/old-release-build.sh",
"new_path": "dev/release/old-release-build.sh",
"diff": "@@ -269,7 +269,6 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\n#sed -i .bak \"s|<tag>HEAD<\\/tag>|<tag>$RELEASE_TAG<\\/tag>|\" $BASE_DIR/target/release/systemds/pom.xml\n# Remove SNAPSHOT from the version in pom\n- sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" /c/virtual\\ D/SystemDS/systemds/target/release2/systemds/pom.xml\neval cd $RELEASE_WORK_DIR/systemds\n## Rerunning mvn with clean and package goals, as release:prepare changes ordeer for some dependencies like unpack and shade.\n$MVN $PUBLISH_PROFILES clean package $DRY_RUN \\\n@@ -277,11 +276,13 @@ if [[ \"$RELEASE_PREPARE\" == \"true\" ]]; then\n-DskipTests\" \\\n-DreleaseVersion=\"$RELEASE_VERSION\" -DdevelopmentVersion=\"$DEVELOPMENT_VERSION\" -Dtag=\"$RELEASE_TAG\" \\\n+ exit\n+\n# Pull the latest code (with committed pom changes) and deploy to the local target directory\ncheckout_code\n# Remove SNAPSHOT from the version in pom\n- sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" /c/virtual\\ D/SystemDS/systemds/target/release2/systemds/pom.xml\neval cd $RELEASE_WORK_DIR/systemds\n+ sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" pom.xml\nGPG_OPTS=\"-Dgpg.keyname=$GPG_KEYID -Dgpg.passphrase=$GPG_PASSPHRASE\"\n# Deploy to /target folder for the next job to pick the artifacts up for there\nCMD=\"$MVN $PUBLISH_PROFILES deploy \\\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/svn_dev_upload.sh",
"new_path": "dev/release/svn_dev_upload.sh",
"diff": "@@ -24,12 +24,12 @@ MVN=mvn\nPUBLISH_PROFILES=\"-Pdistribution,rat\"\nDRY_RUN=-DdryRun=true\nGPG_PASSPHRASE=$1\n-DEVELOPMENT_VERSION=2.0-SNAPSHOT\n+DEVELOPMENT_VERSION=2.1.0-SNAPSHOT\nRELEASE_TAG=v2.0\nRELEASE_STAGING_LOCATION=\"/c/virtual\\ D/SystemDS/systemds/temp\"\nBASE_DIR=\"/c/virtual\\ D/SystemDS/systemds\"\nRELEASE_WORK_DIR=\"/c/virtual\\ D/SystemDS/systemds/target/release2\"\n-RELEASE_VERSION=2.0\n+RELEASE_VERSION=2.0.0\nRELEASE_RC=rc1\nGIT_REF=-master\n#export GNUPGHOME=\"/c/virtual\\ D/SystemDS/systemds/target/.gnupg_copy\"\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2667] Minor fixes in the release scripts |
49,706 | 22.09.2020 18:24:08 | -7,200 | 89dd8feb292be9bce806c5f8c595be5799b331c2 | [MINOR] Fix ComponentTests
While debugging why the tests failed, the component tests
were changed such that they don't return exit codes based on parsing or
failing. This commit fixes that, by changing component tests to use
the same script as function- and application tests. | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/componentTests.yml",
"new_path": ".github/workflows/componentTests.yml",
"diff": "@@ -49,9 +49,9 @@ jobs:\n${{ runner.os }}-maven-test-\n- name: Component Tests\n- run: mvn -ntp test -D maven.test.skip=false -D automatedtestbase.outputbuffering=true -D test=org.apache.sysds.test.component.** | grep -v \"already exists in destination.\"\n+ run: ./docker/entrypoint.sh org.apache.sysds.test.component.**\n- name: User Tests\n- run: mvn -ntp test -D maven.test.skip=false -D automatedtestbase.outputbuffering=true -D test=org.apache.sysds.test.usertest.** | grep -v \"already exists in destination.\"\n+ run: ./docker/entrypoint.sh org.apache.sysds.test.usertest.**\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix ComponentTests
While debugging why the tests failed, the component tests
were changed such that they don't return exit codes based on parsing or
failing. This commit fixes that, by changing component tests to use
the same script as function- and application tests. |
49,720 | 23.09.2020 23:42:35 | -7,200 | f4081004db1a22c2d06ee3d62ef76dc4a387c986 | [MINOR] fix in dropInvalidType
Logical error fix, break is replaced with continue. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/FrameBlock.java",
"diff": "@@ -1996,7 +1996,7 @@ public class FrameBlock implements CacheBlock, Externalizable {\ntype = \"INT\";\n} else if (schemaCol.contains(\"STRING\")){\n// In case of String columns, don't do any verification or replacements.\n- break;\n+ continue;\n} else{\ntype = schemaCol;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] fix in dropInvalidType
Logical error fix, break is replaced with continue. |
49,693 | 25.09.2020 10:44:17 | -7,200 | fb6d63667db7f3062f09dc13e63432376314250e | 2664] Prepare native instructions for release 2.0
* pulled in a few CMake tricks from gpu codegen dev branch to unify lib naming
* verify MKL and OpenBLAS working
* cleanup native lib loader class (NativeHelper)
* build script for Windows
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/CMakeLists.txt",
"new_path": "src/main/cpp/CMakeLists.txt",
"diff": "cmake_minimum_required(VERSION 3.8)\ncmake_policy(SET CMP0074 NEW) # make use of <package>_ROOT variable\n-project (systemds)\n+project (systemds LANGUAGES CXX)\n# All custom find modules\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} \"${CMAKE_SOURCE_DIR}/cmake/\")\n@@ -41,8 +41,19 @@ set_target_properties(systemds PROPERTIES MACOSX_RPATH 1)\nset(MATH_LIBRARIES \"\")\n# sets the installation path to src/main/cpp/lib\n-set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR})\n-install(TARGETS systemds LIBRARY DESTINATION lib)\n+if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)\n+ set(CMAKE_INSTALL_PREFIX \"${CMAKE_SOURCE_DIR}\" CACHE PATH \"sets the installation path to src/main/cpp/lib\" FORCE)\n+endif()\n+\n+# sets the installation path to src/main/cpp/lib\n+# install(TARGETS systemds LIBRARY DESTINATION lib)\n+install(TARGETS systemds RUNTIME DESTINATION lib)\n+\n+# unify library filenames to libsystemds_<...>\n+if (WIN32)\n+ set(CMAKE_IMPORT_LIBRARY_PREFIX lib CACHE INTERNAL \"\")\n+ set(CMAKE_SHARED_LIBRARY_PREFIX lib CACHE INTERNAL \"\")\n+endif()\nset(CMAKE_BUILD_TYPE Release)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/cpp/build.bat",
"diff": "+@ECHO OFF\n+::-------------------------------------------------------------\n+::\n+:: Licensed to the Apache Software Foundation (ASF) under one\n+:: or more contributor license agreements. See the NOTICE file\n+:: distributed with this work for additional information\n+:: regarding copyright ownership. The ASF licenses this file\n+:: to you under the Apache License, Version 2.0 (the\n+:: \"License\"); you may not use this file except in compliance\n+:: with the License. You may obtain a copy of the License at\n+::\n+:: http://www.apache.org/licenses/LICENSE-2.0\n+::\n+:: Unless required by applicable law or agreed to in writing,\n+:: software distributed under the License is distributed on an\n+:: \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+:: KIND, either express or implied. See the License for the\n+:: specific language governing permissions and limitations\n+:: under the License.\n+::\n+::-------------------------------------------------------------\n+\n+:: This shell script compiles the required shared libraries for Windows on x86-64\n+:: Make sure to have run \"\\\"%INTEL_ROOT%\\\"\\compilervars.bat intel64 vs2019\" and set OpenBLAS_HOME to where\n+:: libopenblas.lib is located\n+\n+:: configure and compile INTEL MKL\n+cmake -S . -B INTEL -DUSE_INTEL_MKL=ON -DCMAKE_BUILD_TYPE=Release\n+cmake --build INTEL --target install --config Release\n+rmdir /Q /S INTEL\n+\n+:: configure and compile OPENBLAS\n+cmake . -B OPENBLAS -DUSE_OPEN_BLAS=ON -DCMAKE_BUILD_TYPE=Release\n+cmake --build OPENBLAS --target install --config Release\n+rmdir /Q /S OPENBLAS\n+\n+echo.\n+echo \"Make sure to re-run mvn package to make use of the newly compiled libraries\"\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/cmake/FindOpenBLAS.cmake",
"new_path": "src/main/cpp/cmake/FindOpenBLAS.cmake",
"diff": "@@ -46,7 +46,7 @@ SET(Open_BLAS_LIB_SEARCH_PATHS\n)\nFIND_PATH(OpenBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Open_BLAS_INCLUDE_SEARCH_PATHS})\n-FIND_LIBRARY(OpenBLAS_LIB NAMES openblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS})\n+FIND_LIBRARY(OpenBLAS_LIB NAMES openblas libopenblas PATHS ${Open_BLAS_LIB_SEARCH_PATHS})\nSET(OpenBLAS_FOUND ON)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/lib/libsystemds_mkl-Linux-x86_64.so",
"new_path": "src/main/cpp/lib/libsystemds_mkl-Linux-x86_64.so",
"diff": "Binary files a/src/main/cpp/lib/libsystemds_mkl-Linux-x86_64.so and b/src/main/cpp/lib/libsystemds_mkl-Linux-x86_64.so differ\n"
},
{
"change_type": "ADD",
"old_path": "src/main/cpp/lib/libsystemds_mkl-Windows-AMD64.dll",
"new_path": "src/main/cpp/lib/libsystemds_mkl-Windows-AMD64.dll",
"diff": "Binary files /dev/null and b/src/main/cpp/lib/libsystemds_mkl-Windows-AMD64.dll differ\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/lib/libsystemds_openblas-Linux-x86_64.so",
"new_path": "src/main/cpp/lib/libsystemds_openblas-Linux-x86_64.so",
"diff": "Binary files a/src/main/cpp/lib/libsystemds_openblas-Linux-x86_64.so and b/src/main/cpp/lib/libsystemds_openblas-Linux-x86_64.so differ\n"
},
{
"change_type": "ADD",
"old_path": "src/main/cpp/lib/libsystemds_openblas-Windows-AMD64.dll",
"new_path": "src/main/cpp/lib/libsystemds_openblas-Windows-AMD64.dll",
"diff": "Binary files /dev/null and b/src/main/cpp/lib/libsystemds_openblas-Windows-AMD64.dll differ\n"
},
{
"change_type": "DELETE",
"old_path": "src/main/cpp/lib/systemds_mkl-Windows-AMD64.dll",
"new_path": "src/main/cpp/lib/systemds_mkl-Windows-AMD64.dll",
"diff": "Binary files a/src/main/cpp/lib/systemds_mkl-Windows-AMD64.dll and /dev/null differ\n"
},
{
"change_type": "DELETE",
"old_path": "src/main/cpp/lib/systemds_openblas-Windows-AMD64.dll",
"new_path": "src/main/cpp/lib/systemds_openblas-Windows-AMD64.dll",
"diff": "Binary files a/src/main/cpp/lib/systemds_openblas-Windows-AMD64.dll and /dev/null differ\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysds/utils/NativeHelper.java",
"diff": "@@ -147,6 +147,19 @@ public class NativeHelper {\nreturn false;\n}\n+ /**\n+ * Note: we only support Windows and Linux at the moment.\n+ *\n+ * @return true if operating system is supported\n+ */\n+ private static boolean isSupportedOS() {\n+ if(SystemUtils.IS_OS_LINUX || SystemUtils.IS_OS_WINDOWS) {\n+ return true;\n+ }\n+ LOG.info(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\n+ return false;\n+ }\n+\n/**\n* Check if native BLAS libraries have been successfully loaded\n* @return true if CURRENT_NATIVE_BLAS_STATE is SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE or\n@@ -177,7 +190,8 @@ public class NativeHelper {\n// attemptedLoading variable ensures that we don't try to load SystemDS and other dependencies\n// again and again especially in the parfor (hence the double-checking with synchronized).\n- if(shouldReload(customLibPath) && isSupportedBLAS(userSpecifiedBLAS) && isSupportedArchitecture()) {\n+ if(shouldReload(customLibPath) && isSupportedBLAS(userSpecifiedBLAS) && isSupportedArchitecture()\n+ && isSupportedOS()) {\nlong start = System.nanoTime();\nsynchronized(NativeHelper.class) {\nif(shouldReload(customLibPath)) {\n@@ -188,23 +202,16 @@ public class NativeHelper {\nblas = new String[] { \"mkl\", \"openblas\" };\n}\n- if(SystemUtils.IS_OS_WINDOWS) {\n- if (checkAndLoadBLAS(customLibPath, blas) &&\n- (loadLibraryHelper(\"systemds_\" + blasType + \"-Windows-AMD64\") ||\n- loadBLAS(customLibPath, \"systemds_\" + blasType + \"-Windows-AMD64\", null))\n- )\n+ if(checkAndLoadBLAS(customLibPath, blas)) {\n+ String platform_suffix = (SystemUtils.IS_OS_WINDOWS ? \"-Windows-AMD64.dll\" : \"-Linux-x86_64.so\");\n+ String library_name = \"libsystemds_\" + blasType + platform_suffix;\n+ if(loadLibraryHelperFromResource(library_name) ||\n+ loadBLAS(customLibPath, library_name,\"Loading native helper with customLibPath.\"))\n{\nLOG.info(\"Using native blas: \" + blasType + getNativeBLASPath());\nCURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n}\n}\n- else {\n- if (checkAndLoadBLAS(customLibPath, blas) &&\n- loadLibraryHelper(\"libsystemds_\" + blasType + \"-Linux-x86_64.so\")) {\n- LOG.info(\"Using native blas: \" + blasType + getNativeBLASPath());\n- CURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n- }\n- }\n}\n}\ndouble timeToLoadInMilliseconds = (System.nanoTime()-start)*1e-6;\n@@ -212,8 +219,8 @@ public class NativeHelper {\nLOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n}\nelse if(LOG.isDebugEnabled() && !isSupportedBLAS(userSpecifiedBLAS)) {\n- LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'sysds.native.blas'=\" +\n- userSpecifiedBLAS + \".\");\n+ LOG.debug(\"Using internal Java BLAS as native BLAS support instead of the configuration \" +\n+ \"'sysds.native.blas'=\" + userSpecifiedBLAS + \".\");\n}\n}\n@@ -223,15 +230,19 @@ public class NativeHelper {\nboolean isLoaded = false;\nfor (String blas : listBLAS) {\n- if (blas.equalsIgnoreCase(\"mkl\")) {\n- isLoaded = loadBLAS(customLibPath, \"mkl_rt\", null);\n- } else if (blas.equalsIgnoreCase(\"openblas\")) {\n- // no need for gomp on windows\n- if (SystemUtils.IS_OS_WINDOWS || loadBLAS(customLibPath, \"gomp\",\n- \"gomp required for loading OpenBLAS-enabled SystemDS library\")) {\n- isLoaded = loadBLAS(customLibPath, \"openblas\", null);\n- }\n+ if (blas.equalsIgnoreCase(\"mkl\"))\n+ isLoaded = loadBLAS(customLibPath, \"mkl_rt\", \"\");\n+ else if (blas.equalsIgnoreCase(\"openblas\")) {\n+ // OpenBLAS 0.3.10 binary distribution [1] for windows comes with a libopenblas.dll, so let's try this\n+ // first. Make sure the directory of that dll is on your PATH env var or pointed to by customLibPath.\n+ // [1] https://github.com/xianyi/OpenBLAS/releases\n+ isLoaded = loadBLAS(customLibPath, \"libopenblas\", \"\");\n+ if(!isLoaded)\n+ isLoaded = loadBLAS(customLibPath, \"openblas\", \"\");\n}\n+ else\n+ LOG.warn(\"Not trying to load unknown blas type \" + blas);\n+\nif (isLoaded) {\nblasType = blas;\nbreak;\n@@ -288,13 +299,13 @@ public class NativeHelper {\nString libPath = customLibPath + File.separator + System.mapLibraryName(blas);\ntry {\nSystem.load(libPath);\n- // Print to stdout as this feature is intended for cloud environment\n- System.out.println(\"Loaded the library:\" + libPath);\n+ LOG.info(\"Loaded the library:\" + libPath);\nreturn true;\n}\n- catch (UnsatisfiedLinkError e1) {\n- // Print to stdout as this feature is intended for cloud environment\n- System.out.println(\"Unable to load \" + libPath + \":\" + e1.getMessage());\n+ catch (UnsatisfiedLinkError e) {\n+ LOG.warn(\"Unable to load \" + blas + \" from \" + libPath +\n+ \". Trying once more with System.loadLibrary(\" + blas +\n+ \") \\n Message from exception was: \" + e.getMessage());\n}\n}\n@@ -304,22 +315,25 @@ public class NativeHelper {\nreturn true;\n}\ncatch (UnsatisfiedLinkError e) {\n- System.out.println(System.getProperty(\"java.library.path\"));\n- if(optionalMsg != null)\n- LOG.debug(\"Unable to load \" + blas + \"(\" + optionalMsg + \"):\" + e.getMessage());\n- else\n- LOG.debug(\"Unable to load \" + blas + \":\" + e.getMessage());\n+ LOG.debug(\"java.library.path: \" + System.getProperty(\"java.library.path\"));\n+ LOG.debug(\"Unable to load \" + blas + (optionalMsg == null ? \"\" : (\" (\" + optionalMsg + \")\")) +\n+ \" \\n Message from exception was: \" + e.getMessage());\nreturn false;\n}\n}\n-\n- private static boolean loadLibraryHelper(String path) {\n+ /**\n+ * Attempts to load the JNI shared library from the sysds jar\n+ *\n+ * @param libFileName library file name)\n+ * @return true if successfully loaded BLAS\n+ */\n+ private static boolean loadLibraryHelperFromResource(String libFileName) {\nOutputStream out = null;\n- try(InputStream in = NativeHelper.class.getResourceAsStream(\"/lib/\"+path)) {\n+ try(InputStream in = NativeHelper.class.getResourceAsStream(\"/lib/\"+ libFileName)) {\n// This logic is added because Java does not allow to load library from a resource file.\nif(in != null) {\n- File temp = File.createTempFile(path, \"\");\n+ File temp = File.createTempFile(libFileName, \"\");\ntemp.deleteOnExit();\nout = FileUtils.openOutputStream(temp);\nIOUtils.copy(in, out);\n@@ -327,10 +341,10 @@ public class NativeHelper {\nreturn true;\n}\nelse\n- LOG.warn(\"No lib available in the jar:\" + path);\n+ LOG.warn(\"No lib available in the jar:\" + libFileName);\n}\ncatch(IOException e) {\n- LOG.warn(\"Unable to load library \" + path + \" from resource:\" + e.getMessage());\n+ LOG.warn(\"Unable to load library \" + libFileName + \" from resource:\" + e.getMessage());\n}\nfinally {\nIOUtilFunctions.closeSilently(out);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2663, 2664] Prepare native instructions for release 2.0
* pulled in a few CMake tricks from gpu codegen dev branch to unify lib naming
* verify MKL and OpenBLAS working
* cleanup native lib loader class (NativeHelper)
* build script for Windows
Closes #1063 |
49,693 | 25.09.2020 15:30:49 | -7,200 | 0b66c780e81182590cb9801c4b9eded62651ec34 | [MINOR] Add setOutputBuffering(true) to make PrintTest and StopTest2 pass | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysds/test/AutomatedTestBase.java",
"diff": "@@ -261,6 +261,10 @@ public abstract class AutomatedTestBase {\nTestUtils.clearAssertionInformation();\n}\n+ protected void setOutputBuffering(boolean value) {\n+ outputBuffering = value;\n+ }\n+\n/**\n* <p>\n* Returns a test configuration from the list of available configurations. If no configuration is added for the\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/unary/scalar/PrintTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/unary/scalar/PrintTest.java",
"diff": "@@ -38,7 +38,7 @@ public class PrintTest extends AutomatedTestBase\n@Override\npublic void setUp() {\n-\n+ setOutputBuffering(true);\naddTestConfiguration(\"PrintTest\", new TestConfiguration(TEST_CLASS_DIR, \"PrintTest\", new String[] {}));\naddTestConfiguration(\"PrintTest2\", new TestConfiguration(TEST_CLASS_DIR, \"PrintTest2\", new String[] {}));\naddTestConfiguration(\"PrintTest3\", new TestConfiguration(TEST_CLASS_DIR, \"PrintTest3\", new String[] {}));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/unary/scalar/StopTest2.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/unary/scalar/StopTest2.java",
"diff": "@@ -41,6 +41,7 @@ public class StopTest2 extends AutomatedTestBase\n@Override\npublic void setUp() {\n+ setOutputBuffering(true);\navailableTestConfigurations.put(TEST_STOP, new TestConfiguration(TEST_CLASS_DIR, TEST_STOP, new String[] {}));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add setOutputBuffering(true) to make PrintTest and StopTest2 pass |
49,693 | 25.09.2020 15:44:15 | -7,200 | edc9cf15e7072b10c96f6ae10cab0d647b619f85 | [Minor] Add proper winning condition for running FullSignTest and FullMinus1MultTest on GPU | [
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/binary/matrix_full_cellwise/FullMinus1MultTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/binary/matrix_full_cellwise/FullMinus1MultTest.java",
"diff": "@@ -129,7 +129,8 @@ public class FullMinus1MultTest extends AutomatedTestBase\n//check generated opcode\nif( rewrites ){\nif( instType == ExecType.CP )\n- Assert.assertTrue(\"Missing opcode: 1-*\", Statistics.getCPHeavyHitterOpCodes().contains(\"1-*\"));\n+ Assert.assertTrue(\"Missing opcode: 1-*\", Statistics.getCPHeavyHitterOpCodes().contains(\"1-*\") ||\n+ Statistics.getCPHeavyHitterOpCodes().contains(\"gpu_1-*\"));\nelse if( instType == ExecType.SPARK )\nAssert.assertTrue(\"Missing opcode: sp_1-* | sp_map1-*\",\nStatistics.getCPHeavyHitterOpCodes().contains(\"sp_1-*\") ||\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/unary/matrix/FullSignTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/unary/matrix/FullSignTest.java",
"diff": "@@ -165,8 +165,10 @@ public class FullSignTest extends AutomatedTestBase\nTestUtils.compareMatrices(dmlfile, rfile, 0, \"Stat-DML\", \"Stat-R\");\n//check generated opcode\n- if( instType == ExecType.CP )\n- Assert.assertTrue(\"Missing opcode: sign\", Statistics.getCPHeavyHitterOpCodes().contains(\"sign\"));\n+ if( instType == ExecType.CP ) {\n+ Assert.assertTrue(\"Missing opcode: sign\", Statistics.getCPHeavyHitterOpCodes().contains(\"sign\") ||\n+ Statistics.getCPHeavyHitterOpCodes().contains(\"gpu_sign\") );\n+ }\nelse if ( instType == ExecType.SPARK )\nAssert.assertTrue(\"Missing opcode: \"+Instruction.SP_INST_PREFIX+\"sign\", Statistics.getCPHeavyHitterOpCodes().contains(Instruction.SP_INST_PREFIX+\"sign\"));\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [Minor] Add proper winning condition for running FullSignTest and FullMinus1MultTest on GPU |
49,693 | 25.09.2020 16:00:45 | -7,200 | 4da7c6c7379bfbfd72f63de1cb9cf1715a5ca7a2 | 2671] Resolve problem with failing cumulative aggregate tests (indexing issue) | [
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/kernels/SystemDS.ptx",
"new_path": "src/main/cpp/kernels/SystemDS.ptx",
"diff": "@@ -112,7 +112,7 @@ BB1_2:\n.param .u32 cumulative_sum_up_sweep_d_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .b32 %r<20>;\n.reg .f64 %fd<8>;\n.reg .b64 %rd<11>;\n@@ -130,7 +130,7 @@ BB1_2:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB2_5;\n+ @%p1 bra BB2_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -155,16 +155,13 @@ BB2_2:\n@%p3 bra BB2_2;\nBB2_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB2_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 8;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f64 [%rd10], %fd7;\n-BB2_5:\n+BB2_4:\nret;\n}\n@@ -177,7 +174,7 @@ BB2_5:\n.param .u32 cumulative_sum_up_sweep_f_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .f32 %f<8>;\n.reg .b32 %r<20>;\n.reg .b64 %rd<11>;\n@@ -195,7 +192,7 @@ BB2_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB3_5;\n+ @%p1 bra BB3_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -220,16 +217,13 @@ BB3_2:\n@%p3 bra BB3_2;\nBB3_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB3_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 4;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f32 [%rd10], %f7;\n-BB3_5:\n+BB3_4:\nret;\n}\n@@ -243,8 +237,8 @@ BB3_5:\n.param .u32 cumulative_sum_down_sweep_d_param_5\n)\n{\n- .reg .pred %p<7>;\n- .reg .b32 %r<22>;\n+ .reg .pred %p<5>;\n+ .reg .b32 %r<21>;\n.reg .f64 %fd<11>;\n.reg .b64 %rd<15>;\n@@ -265,47 +259,44 @@ BB3_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB4_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f64 %fd9, 0d0000000000000000;\n- @%p4 bra BB4_3;\n+ @%p2 bra BB4_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 8;\n+ mul.wide.u32 %rd7, %r15, 8;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f64 %fd9, [%rd8];\nBB4_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 8;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 8;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f64 %fd7, [%rd10];\nadd.f64 %fd10, %fd9, %fd7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f64 [%rd11], %fd10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB4_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB4_5;\nBB4_4:\n- mul.wide.s32 %rd12, %r21, 8;\n+ mul.wide.s32 %rd12, %r20, 8;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f64 %fd8, [%rd13];\nadd.f64 %fd10, %fd10, %fd8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f64 [%rd14], %fd10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB4_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB4_4;\nBB4_5:\nret;\n@@ -321,9 +312,9 @@ BB4_5:\n.param .u32 cumulative_sum_down_sweep_f_param_5\n)\n{\n- .reg .pred %p<7>;\n+ .reg .pred %p<5>;\n.reg .f32 %f<11>;\n- .reg .b32 %r<22>;\n+ .reg .b32 %r<21>;\n.reg .b64 %rd<15>;\n@@ -343,47 +334,44 @@ BB4_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB5_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f32 %f9, 0f00000000;\n- @%p4 bra BB5_3;\n+ @%p2 bra BB5_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 4;\n+ mul.wide.u32 %rd7, %r15, 4;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f32 %f9, [%rd8];\nBB5_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 4;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 4;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f32 %f7, [%rd10];\nadd.f32 %f10, %f9, %f7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f32 [%rd11], %f10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB5_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB5_5;\nBB5_4:\n- mul.wide.s32 %rd12, %r21, 4;\n+ mul.wide.s32 %rd12, %r20, 4;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f32 %f8, [%rd13];\nadd.f32 %f10, %f10, %f8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f32 [%rd14], %f10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB5_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB5_4;\nBB5_5:\nret;\n@@ -398,7 +386,7 @@ BB5_5:\n.param .u32 cumulative_prod_up_sweep_d_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .b32 %r<20>;\n.reg .f64 %fd<8>;\n.reg .b64 %rd<11>;\n@@ -416,7 +404,7 @@ BB5_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB6_5;\n+ @%p1 bra BB6_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -441,16 +429,13 @@ BB6_2:\n@%p3 bra BB6_2;\nBB6_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB6_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 8;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f64 [%rd10], %fd7;\n-BB6_5:\n+BB6_4:\nret;\n}\n@@ -463,7 +448,7 @@ BB6_5:\n.param .u32 cumulative_prod_up_sweep_f_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .b32 %r<20>;\n.reg .f64 %fd<8>;\n.reg .b64 %rd<11>;\n@@ -481,7 +466,7 @@ BB6_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB7_5;\n+ @%p1 bra BB7_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -506,16 +491,13 @@ BB7_2:\n@%p3 bra BB7_2;\nBB7_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB7_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 8;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f64 [%rd10], %fd7;\n-BB7_5:\n+BB7_4:\nret;\n}\n@@ -529,8 +511,8 @@ BB7_5:\n.param .u32 cumulative_prod_down_sweep_d_param_5\n)\n{\n- .reg .pred %p<7>;\n- .reg .b32 %r<22>;\n+ .reg .pred %p<5>;\n+ .reg .b32 %r<21>;\n.reg .f64 %fd<11>;\n.reg .b64 %rd<15>;\n@@ -551,47 +533,44 @@ BB7_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB8_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f64 %fd9, 0d3FF0000000000000;\n- @%p4 bra BB8_3;\n+ @%p2 bra BB8_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 8;\n+ mul.wide.u32 %rd7, %r15, 8;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f64 %fd9, [%rd8];\nBB8_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 8;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 8;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f64 %fd7, [%rd10];\nmul.f64 %fd10, %fd9, %fd7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f64 [%rd11], %fd10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB8_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB8_5;\nBB8_4:\n- mul.wide.s32 %rd12, %r21, 8;\n+ mul.wide.s32 %rd12, %r20, 8;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f64 %fd8, [%rd13];\nmul.f64 %fd10, %fd10, %fd8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f64 [%rd14], %fd10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB8_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB8_4;\nBB8_5:\nret;\n@@ -607,9 +586,9 @@ BB8_5:\n.param .u32 cumulative_prod_down_sweep_f_param_5\n)\n{\n- .reg .pred %p<7>;\n+ .reg .pred %p<5>;\n.reg .f32 %f<11>;\n- .reg .b32 %r<22>;\n+ .reg .b32 %r<21>;\n.reg .b64 %rd<15>;\n@@ -629,47 +608,44 @@ BB8_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB9_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f32 %f9, 0f3F800000;\n- @%p4 bra BB9_3;\n+ @%p2 bra BB9_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 4;\n+ mul.wide.u32 %rd7, %r15, 4;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f32 %f9, [%rd8];\nBB9_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 4;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 4;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f32 %f7, [%rd10];\nmul.f32 %f10, %f9, %f7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f32 [%rd11], %f10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB9_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB9_5;\nBB9_4:\n- mul.wide.s32 %rd12, %r21, 4;\n+ mul.wide.s32 %rd12, %r20, 4;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f32 %f8, [%rd13];\nmul.f32 %f10, %f10, %f8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f32 [%rd14], %f10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB9_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB9_4;\nBB9_5:\nret;\n@@ -684,7 +660,7 @@ BB9_5:\n.param .u32 cumulative_min_up_sweep_d_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .b32 %r<20>;\n.reg .f64 %fd<8>;\n.reg .b64 %rd<11>;\n@@ -702,7 +678,7 @@ BB9_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB10_5;\n+ @%p1 bra BB10_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -727,16 +703,13 @@ BB10_2:\n@%p3 bra BB10_2;\nBB10_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB10_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 8;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f64 [%rd10], %fd7;\n-BB10_5:\n+BB10_4:\nret;\n}\n@@ -749,7 +722,7 @@ BB10_5:\n.param .u32 cumulative_min_up_sweep_f_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .f32 %f<8>;\n.reg .b32 %r<20>;\n.reg .b64 %rd<11>;\n@@ -767,7 +740,7 @@ BB10_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB11_5;\n+ @%p1 bra BB11_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -792,16 +765,13 @@ BB11_2:\n@%p3 bra BB11_2;\nBB11_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB11_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 4;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f32 [%rd10], %f7;\n-BB11_5:\n+BB11_4:\nret;\n}\n@@ -815,8 +785,8 @@ BB11_5:\n.param .u32 cumulative_min_down_sweep_d_param_5\n)\n{\n- .reg .pred %p<7>;\n- .reg .b32 %r<22>;\n+ .reg .pred %p<5>;\n+ .reg .b32 %r<21>;\n.reg .f64 %fd<11>;\n.reg .b64 %rd<15>;\n@@ -837,47 +807,44 @@ BB11_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB12_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f64 %fd9, 0d7FF0000000000000;\n- @%p4 bra BB12_3;\n+ @%p2 bra BB12_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 8;\n+ mul.wide.u32 %rd7, %r15, 8;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f64 %fd9, [%rd8];\nBB12_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 8;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 8;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f64 %fd7, [%rd10];\nmin.f64 %fd10, %fd9, %fd7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f64 [%rd11], %fd10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB12_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB12_5;\nBB12_4:\n- mul.wide.s32 %rd12, %r21, 8;\n+ mul.wide.s32 %rd12, %r20, 8;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f64 %fd8, [%rd13];\nmin.f64 %fd10, %fd10, %fd8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f64 [%rd14], %fd10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB12_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB12_4;\nBB12_5:\nret;\n@@ -893,9 +860,9 @@ BB12_5:\n.param .u32 cumulative_min_down_sweep_f_param_5\n)\n{\n- .reg .pred %p<7>;\n+ .reg .pred %p<5>;\n.reg .f32 %f<11>;\n- .reg .b32 %r<22>;\n+ .reg .b32 %r<21>;\n.reg .b64 %rd<15>;\n@@ -915,47 +882,44 @@ BB12_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB13_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f32 %f9, 0f7F800000;\n- @%p4 bra BB13_3;\n+ @%p2 bra BB13_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 4;\n+ mul.wide.u32 %rd7, %r15, 4;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f32 %f9, [%rd8];\nBB13_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 4;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 4;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f32 %f7, [%rd10];\nmin.f32 %f10, %f9, %f7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f32 [%rd11], %f10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB13_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB13_5;\nBB13_4:\n- mul.wide.s32 %rd12, %r21, 4;\n+ mul.wide.s32 %rd12, %r20, 4;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f32 %f8, [%rd13];\nmin.f32 %f10, %f10, %f8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f32 [%rd14], %f10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB13_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB13_4;\nBB13_5:\nret;\n@@ -970,7 +934,7 @@ BB13_5:\n.param .u32 cumulative_max_up_sweep_d_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .b32 %r<20>;\n.reg .f64 %fd<8>;\n.reg .b64 %rd<11>;\n@@ -988,7 +952,7 @@ BB13_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB14_5;\n+ @%p1 bra BB14_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -1013,16 +977,13 @@ BB14_2:\n@%p3 bra BB14_2;\nBB14_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB14_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 8;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f64 [%rd10], %fd7;\n-BB14_5:\n+BB14_4:\nret;\n}\n@@ -1035,7 +996,7 @@ BB14_5:\n.param .u32 cumulative_max_up_sweep_f_param_4\n)\n{\n- .reg .pred %p<5>;\n+ .reg .pred %p<4>;\n.reg .f32 %f<8>;\n.reg .b32 %r<20>;\n.reg .b64 %rd<11>;\n@@ -1053,7 +1014,7 @@ BB14_5:\nmad.lo.s32 %r1, %r10, %r11, %r12;\nadd.s32 %r13, %r8, -1;\nsetp.gt.u32 %p1, %r1, %r13;\n- @%p1 bra BB15_5;\n+ @%p1 bra BB15_4;\nmov.u32 %r14, %ctaid.y;\nmul.lo.s32 %r2, %r14, %r8;\n@@ -1078,16 +1039,13 @@ BB15_2:\n@%p3 bra BB15_2;\nBB15_3:\n- setp.ge.u32 %p4, %r9, %r7;\n- @%p4 bra BB15_5;\n-\nadd.s32 %r18, %r1, %r2;\ncvta.to.global.u64 %rd8, %rd2;\nmul.wide.u32 %rd9, %r18, 4;\nadd.s64 %rd10, %rd8, %rd9;\nst.global.f32 [%rd10], %f7;\n-BB15_5:\n+BB15_4:\nret;\n}\n@@ -1101,8 +1059,8 @@ BB15_5:\n.param .u32 cumulative_max_down_sweep_d_param_5\n)\n{\n- .reg .pred %p<7>;\n- .reg .b32 %r<22>;\n+ .reg .pred %p<5>;\n+ .reg .b32 %r<21>;\n.reg .f64 %fd<11>;\n.reg .b64 %rd<15>;\n@@ -1123,47 +1081,44 @@ BB15_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB16_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f64 %fd9, 0dFFF0000000000000;\n- @%p4 bra BB16_3;\n+ @%p2 bra BB16_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 8;\n+ mul.wide.u32 %rd7, %r15, 8;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f64 %fd9, [%rd8];\nBB16_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 8;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 8;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f64 %fd7, [%rd10];\nmax.f64 %fd10, %fd9, %fd7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f64 [%rd11], %fd10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB16_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB16_5;\nBB16_4:\n- mul.wide.s32 %rd12, %r21, 8;\n+ mul.wide.s32 %rd12, %r20, 8;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f64 %fd8, [%rd13];\nmax.f64 %fd10, %fd10, %fd8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f64 [%rd14], %fd10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB16_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB16_4;\nBB16_5:\nret;\n@@ -1179,9 +1134,9 @@ BB16_5:\n.param .u32 cumulative_max_down_sweep_f_param_5\n)\n{\n- .reg .pred %p<7>;\n+ .reg .pred %p<5>;\n.reg .f32 %f<11>;\n- .reg .b32 %r<22>;\n+ .reg .b32 %r<21>;\n.reg .b64 %rd<15>;\n@@ -1201,47 +1156,44 @@ BB16_5:\nsetp.gt.u32 %p1, %r1, %r13;\n@%p1 bra BB17_5;\n- mov.u32 %r14, %ctaid.y;\n- mul.lo.s32 %r2, %r14, %r8;\n- mov.u32 %r15, %nctaid.y;\n- setp.lt.u32 %p2, %r15, 2;\n- setp.eq.s32 %p3, %r14, 0;\n- or.pred %p4, %p2, %p3;\n+ mov.u32 %r2, %ctaid.y;\n+ setp.eq.s32 %p2, %r2, 0;\nmov.f32 %f9, 0fFF800000;\n- @%p4 bra BB17_3;\n+ @%p2 bra BB17_3;\n- add.s32 %r16, %r1, %r2;\n- add.s32 %r17, %r16, -1;\n+ add.s32 %r14, %r2, -1;\n+ mad.lo.s32 %r15, %r14, %r8, %r1;\ncvta.to.global.u64 %rd6, %rd3;\n- mul.wide.s32 %rd7, %r17, 4;\n+ mul.wide.u32 %rd7, %r15, 4;\nadd.s64 %rd8, %rd6, %rd7;\nld.global.f32 %f9, [%rd8];\nBB17_3:\n- mad.lo.s32 %r18, %r2, %r9, %r1;\n- mul.wide.u32 %rd9, %r18, 4;\n+ mul.lo.s32 %r16, %r9, %r8;\n+ mad.lo.s32 %r17, %r16, %r2, %r1;\n+ mul.wide.u32 %rd9, %r17, 4;\nadd.s64 %rd10, %rd2, %rd9;\nld.global.f32 %f7, [%rd10];\nmax.f32 %f10, %f9, %f7;\nadd.s64 %rd11, %rd1, %rd9;\nst.global.f32 [%rd11], %f10;\n- mad.lo.s32 %r19, %r9, %r8, %r18;\n- mul.lo.s32 %r20, %r8, %r7;\n- min.u32 %r3, %r19, %r20;\n- add.s32 %r21, %r18, %r8;\n- setp.ge.u32 %p5, %r21, %r3;\n- @%p5 bra BB17_5;\n+ mul.lo.s32 %r18, %r8, %r7;\n+ add.s32 %r19, %r17, %r16;\n+ min.u32 %r3, %r19, %r18;\n+ add.s32 %r20, %r17, %r8;\n+ setp.ge.u32 %p3, %r20, %r3;\n+ @%p3 bra BB17_5;\nBB17_4:\n- mul.wide.s32 %rd12, %r21, 4;\n+ mul.wide.s32 %rd12, %r20, 4;\nadd.s64 %rd13, %rd2, %rd12;\nld.global.f32 %f8, [%rd13];\nmax.f32 %f10, %f10, %f8;\nadd.s64 %rd14, %rd1, %rd12;\nst.global.f32 [%rd14], %f10;\n- add.s32 %r21, %r21, %r8;\n- setp.lt.u32 %p6, %r21, %r3;\n- @%p6 bra BB17_4;\n+ add.s32 %r20, %r20, %r8;\n+ setp.lt.u32 %p4, %r20, %r3;\n+ @%p4 bra BB17_4;\nBB17_5:\nret;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/cpp/kernels/cum_scan.cuh",
"new_path": "src/main/cpp/kernels/cum_scan.cuh",
"diff": "#pragma once\n/**\n- * Cumulative Scan - Applies <scanOp> to accumulate values over columns of an input matrix\n+ * Cumulative Scan - Applies <scanOp> to accumulate values over columns of an input matrix.\n+ * Up sweep writes per block accumulation results to offset buffer once\n* @param scanOp - Type of the functor object that implements the scan operation\n*/\n// --------------------------------------------------------\n@@ -35,8 +36,8 @@ __device__ void cumulative_scan_up_sweep(T *g_idata, T *g_tdata, uint rows, uint\nif (blockIdx.x * blockDim.x + threadIdx.x > cols - 1)\nreturn;\n- uint offset = blockIdx.y * cols * block_height + blockIdx.x * blockDim.x;\n- uint idx = offset + threadIdx.x;\n+ uint block_offset = blockIdx.y * cols * block_height + blockIdx.x * blockDim.x;\n+ uint idx = block_offset + threadIdx.x;\n// initial accumulator value\nT acc = g_idata[idx];\n@@ -49,14 +50,15 @@ __device__ void cumulative_scan_up_sweep(T *g_idata, T *g_tdata, uint rows, uint\nacc = scan_op(acc, g_idata[i]);\n// write out accumulated block offset\n- if (block_height < rows)\n- {\ng_tdata[blockIdx.y * cols + blockIdx.x * blockDim.x + threadIdx.x] = acc;\n- // if(threadIdx.x == 0)\n- // printf(\"blockIdx.y=%d, acc=%f\\n\", blockIdx.y, acc);\n- }\n}\n+// --------------------------------------------------------\n+/**\n+ * Cumulative Scan - Applies <scanOp> to accumulate values over columns of an input matrix.\n+ * Down sweep writes accumulated values to result buffer\n+ * @param scanOp - Type of the functor object that implements the scan operation\n+ */\n// --------------------------------------------------------\ntemplate<typename scanOp, typename NeutralElement, typename T>\n__device__ void cumulative_scan_down_sweep(T *g_idata, T *g_odata, T *g_tdata, uint rows, uint cols, uint block_height,\n@@ -67,16 +69,10 @@ __device__ void cumulative_scan_down_sweep(T *g_idata, T *g_odata, T *g_tdata, u\nreturn;\nuint idx = blockIdx.y * cols * block_height + blockIdx.x * blockDim.x + threadIdx.x;\n- int offset_idx = blockIdx.y * cols + blockIdx.x * blockDim.x + threadIdx.x;\n-\n- // initial accumulator value\n- T acc = (gridDim.y > 1) ? ((blockIdx.y > 0) ? g_tdata[offset_idx-1] : NeutralElement::get()) : NeutralElement::get();\n- // if(threadIdx.x == 0)\n- // {\n- // printf(\"gridDim.y=%d, blockIdx.y=%d, down sweep acc=%f\\n\", gridDim.y, blockIdx.y, acc);\n- // printf(\"gridDim.y=%d, blockIdx.y=%d, g_tdata[%d]=%f\\n\", gridDim.y, blockIdx.y, idx, g_tdata[offset_idx]);\n- // }\n+ // initial accumulator value: all but first row fetch from offset buffer\n+ T acc = (blockIdx.y > 0) ? g_tdata[(blockIdx.y -1) * cols + blockIdx.x * blockDim.x + threadIdx.x]\n+ : NeutralElement::get();\ng_odata[idx] = acc = scan_op(acc, g_idata[idx]);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2663, 2671] Resolve problem with failing cumulative aggregate tests (indexing issue) |
49,689 | 26.09.2020 13:51:19 | -7,200 | a2c17e90ff7cd6930648fd7b4a03403f88b81f28 | Fix groupid in pom | [
{
"change_type": "MODIFY",
"old_path": "dev/release/deploy.sh",
"new_path": "dev/release/deploy.sh",
"diff": "@@ -58,7 +58,8 @@ function checkout_code {\ncheckout_code\n# Remove SNAPSHOT from the version in pom\neval cd $RELEASE_WORK_DIR/systemds\n-sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" pom.xml\n+#sed -i \"s/<version>$RELEASE_VERSION-SNAPSHOT<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" pom.xml\n+sed -i \"s/<version>$DEVELOPMENT_VERSION<\\/version>/<version>$RELEASE_VERSION<\\/version>/\" pom.xml\nGPG_OPTS=\"-Dgpg.keyname=$GPG_KEYID -Dgpg.passphrase=$GPG_PASSPHRASE\"\n# Deploy to /target folder for the next job to pick the artifacts up for there\nCMD=\"$MVN $PUBLISH_PROFILES deploy \\\n"
},
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<artifactId>apache</artifactId>\n<version>18</version>\n</parent>\n- <groupId>org.apache.sysds</groupId>\n+ <groupId>org.apache.systemds</groupId>\n<version>2.1.0-SNAPSHOT</version>\n<artifactId>systemds</artifactId>\n<packaging>jar</packaging>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2667] Fix groupid in pom |
49,706 | 26.09.2020 14:34:45 | -7,200 | d243180cf6019e8bb94238db3de1107c4bfdf132 | [MINOR] update readme on executing systemds for release | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/bin/README.md",
"new_path": "src/assembly/bin/README.md",
"diff": "@@ -30,11 +30,12 @@ limitations under the License.\nRequirements for running SystemDS are a bash shell and OpenJDK 8 or a Spark 2 cluster installation (to run distributed jobs).\nThese requirements should be available via standard system packages in all major Linux distributions\n(make sure to have the right JDK version enabled, if you have multiple versions in your system).\n-For Windows, a bash comes with [git for windows](http://git-scm.com) and OpenJDK builds can be obtained at http://adoptopenjdk.net\n+For Windows, a bash comes with [git for windows](http://git-scm.com) and OpenJDK builds can be obtained at <http://adoptopenjdk.net>\n(tested version [jdk8u232-b09](https://adoptopenjdk.net/archive.html))\nTo start out with an example after having installed the requirements mentioned above, create a text file\n`hello.dml` in your unzipped SystemDS directory containing the following content:\n+\n```shell script\nX = rand(rows=$1, cols=$2, min=0, max=10, sparsity=$3)\nY = rand(rows=$2, cols=$1, min=0, max=10, sparsity=$3)\n@@ -44,46 +45,53 @@ print(toString(Z))\nwrite(Z, \"Z\")\n```\n-**Explaination:** The script takes three parameters for the creation of your matrices X and Y: rows, columns and degree\n+**Explanation:** The script takes three parameters for the creation of your matrices X and Y: rows, columns and degree\nof sparsity. As you can see, DML can access these parameters by specifying $1, $2, ... etc\n-\n**Execution:** Now run that first script you created by running one of the following commands depending on your operating system:\n-#### Running a script locally\n+### Running a script locally\n-```shell script\n-$ ./systemds hello.dml -args 10 10 1.0\n+``` bash\n+./bin/systemds hello.dml -args 10 10 1.0\n```\n-#### Running a script locally, providing your own SystemDS.jar file\n+### Running a script locally, providing your own SystemDS.jar file\nIf you compiled SystemDS from source, you can of course use the created JAR file with the run script.\n-```shell script\n-$ ./systemds path/to/the/SystemDS.jar hello.dml -args 10 10 1.0\n+``` bash\n+./bin/systemds path/to/the/SystemDS.jar hello.dml -args 10 10 1.0\n```\n-#### Running a script locally, in your SystemDS source environment\n+### Running a script locally, in your SystemDS source environment\n+\nIf you have cloned the SystemDS source repository and want to run your DML script with that, you can point the\nshell script to the source directory by setting the `SYSTEMDS_ROOT` environment variable.\n-```shell script\n-$ SYSTEMDS_ROOT=../../code/my-systemds/source ./systemds hello.dml -args 10 10 1.0\n+\n+``` bash\n+SYSTEMDS_ROOT=../../code/my-systemds/source\n+./bin/systemds hello.dml -args 10 10 1.0\n```\n-#### Running a script distributed on a Spark cluster\n+More about the environment setup can be found on : [running Systemds](http://apache.github.io/systemds/site/run).\n+\n+### Running a script distributed on a Spark cluster\n+\nFor running on a Spark cluster, the env variable SYSDS_DISTRIBUTED needs to be set (to something other than 0).\nPer default, SystemDS will run in hybrid mode, pushing some instructions to the cluster and running others locally.\nTo force cluster mode in this little test, we will increase the matrix size to give the worker nodes in the cluster\nsomething to do and force SystemDS to only generate Spark instructions by adding -exec spark to the command line\nparameters:\n-```shell script\n-$ SYSDS_DISTRIBUTED=1 ./systemds hello.dml -args 10000 10000 1.0 -exec spark\n+\n+``` bash\n+SYSDS_DISTRIBUTED=1\n+./bin/systemds hello.dml -args 10000 10000 1.0 -exec spark\n```\nThe output should read something similar to this (the warning can be safely ignored):\n-```shell script\n+``` bash\n20/03/09 16:40:29 INFO api.DMLScript: BEGIN DML run 03/09/2020 16:40:29\n20/03/09 16:40:30 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\nYour hello world matrix contains:\n@@ -106,4 +114,4 @@ Total execution time: 0,122 sec.\n## Further reading\n-More documentation is available in the [docs directory of our github repository](https://github.com/apache/systemds/tree/master/docs)\n+More documentation is available in the [SystemDS Homepage](https://systemds.apache.org/documentation)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] update readme on executing systemds for release |
49,706 | 25.09.2020 19:04:21 | -7,200 | 1e7b6d416c5553ae09fe99d7472443dd02c79fad | [SYSTEMDS-2675+2676] Python Strings and printing
Add strings to python interface to enable calling SystemDS without
enforcing an transfer and parsing into python. | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/context/systemds_context.py",
"new_path": "src/main/python/systemds/context/systemds_context.py",
"diff": "@@ -37,7 +37,7 @@ from py4j.protocol import Py4JNetworkError\nfrom systemds.utils.consts import VALID_INPUT_TYPES\nfrom systemds.utils.helpers import get_module_dir\nfrom systemds.operator import OperationNode\n-\n+from systemds.script_building import OutputType\nclass SystemDSContext(object):\n\"\"\"A context with a connection to a java instance with which SystemDS operations are executed.\n@@ -276,3 +276,6 @@ class SystemDSContext(object):\ndef read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]):\nreturn OperationNode(self, 'read', [f'\"{path}\"'], named_input_nodes=kwargs)\n+\n+ def scalar(self, v: Dict[str, VALID_INPUT_TYPES]):\n+ return OperationNode(self, v, output_type=OutputType.SCALAR)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "@@ -161,6 +161,8 @@ class OperationNode(DAGNode):\nreturn f'{output}={self.operation}({inputs_comma_sep});'\nelif self.output_type == OutputType.NONE:\nreturn f'{self.operation}({inputs_comma_sep});'\n+ elif self.output_type == OutputType.SCALAR:\n+ return f'{var_name}={self.operation};'\nelse:\nreturn f'{var_name}={self.operation}({inputs_comma_sep});'\n@@ -339,12 +341,17 @@ class OperationNode(DAGNode):\nreturn OperationNode(self.sds_context, 'moment', unnamed_inputs, output_type=OutputType.DOUBLE)\ndef write(self, destination: str, format:str = \"binary\", **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n-\nunnamed_inputs = [self, f'\"{destination}\"']\nnamed_parameters = {\"format\":f'\"{format}\"'}\nnamed_parameters.update(kwargs)\nreturn OperationNode(self.sds_context, 'write', unnamed_inputs, named_parameters, output_type= OutputType.NONE)\n+ def to_string(self, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ return OperationNode(self.sds_context, 'toString', [self], kwargs, output_type= OutputType.DOUBLE)\n+\n+ def print(self, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ return OperationNode(self.sds_context, 'print', [self], kwargs, output_type= OutputType.NONE)\n+\ndef rev(self) -> 'OperationNode':\n\"\"\" Reverses the rows in a matrix\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/script_building/dag.py",
"new_path": "src/main/python/systemds/script_building/dag.py",
"diff": "@@ -33,6 +33,7 @@ if TYPE_CHECKING:\nclass OutputType(Enum):\nMATRIX = auto()\nDOUBLE = auto()\n+ SCALAR = auto()\nLIST = auto()\nNONE = auto()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/manual_tests/save_log_reg_mnist_sysds.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+from systemds.context import SystemDSContext\n+from systemds.matrix import Matrix\n+from systemds.examples.tutorials.mnist import DataManager\n+\n+d = DataManager()\n+\n+base_path = \"systemds/examples/tutorials/mnist/\"\n+with SystemDSContext() as sds:\n+ # Train Data\n+ X = Matrix(sds, d.get_train_data().reshape((60000, 28*28)))\n+ X.write(base_path + \"train_data\").compute()\n+ Y = Matrix(sds, d.get_train_labels()) + 1.0\n+ Y.write(base_path + \"train_labels\").compute()\n+ Xt = Matrix(sds, d.get_test_data().reshape((10000, 28*28)))\n+ Xt.write(base_path + \"test_data\").compute()\n+ Yt = Matrix(sds, d.get_test_labels()) + 1.0\n+ Yt.write(base_path + \"test_labels\").compute()\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/matrix/test_print.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+import math\n+import os\n+import random\n+import shutil\n+import sys\n+import unittest\n+\n+import numpy as np\n+import scipy.stats as st\n+from systemds.context import SystemDSContext\n+from systemds.matrix import Matrix\n+\n+\n+class TestPrint(unittest.TestCase):\n+\n+ sds: SystemDSContext = None\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ cls.sds = SystemDSContext()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ cls.sds.close()\n+\n+ def test_print_01(self):\n+ Matrix(self.sds, np.array([1])).to_string().print().compute()\n+ self.assertEquals('1.000',self.sds.get_stdout()[0])\n+\n+ def test_print_02(self):\n+ self.sds.scalar(1).print().compute()\n+ self.assertEquals('1', self.sds.get_stdout()[0])\n+\n+if __name__ == \"__main__\":\n+ unittest.main(exit=False)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/matrix/test_write.py",
"new_path": "src/main/python/tests/matrix/test_write.py",
"diff": "@@ -56,6 +56,13 @@ class TestWrite(unittest.TestCase):\nres = NX.compute()\nself.assertTrue(np.allclose(original, res))\n+ def test_write_02(self):\n+ original = np.array([[1,2,3,4,5]])\n+ X = Matrix(self.sds, original)\n+ X.write(self.temp_dir + \"02\").compute()\n+ NX = self.sds.read(self.temp_dir + \"02\")\n+ res = NX.compute()\n+ self.assertTrue(np.allclose(original, res))\nif __name__ == \"__main__\":\nunittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2675+2676] Python Strings and printing
Add strings to python interface to enable calling SystemDS without
enforcing an transfer and parsing into python. |
49,706 | 25.09.2020 19:42:46 | -7,200 | de50269f820b570841d5a79c60161b41c5b59db5 | Allow Reading unknown dimensions for algorithm input | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/context/systemds_context.py",
"new_path": "src/main/python/systemds/context/systemds_context.py",
"diff": "@@ -39,6 +39,7 @@ from systemds.utils.helpers import get_module_dir\nfrom systemds.operator import OperationNode\nfrom systemds.script_building import OutputType\n+\nclass SystemDSContext(object):\n\"\"\"A context with a connection to a java instance with which SystemDS operations are executed.\nThe java process is started and is running using a random tcp port for instruction parsing.\"\"\"\n@@ -274,8 +275,21 @@ class SystemDSContext(object):\nreturn OperationNode(self, 'rand', [], named_input_nodes=named_input_nodes)\n- def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]):\n- return OperationNode(self, 'read', [f'\"{path}\"'], named_input_nodes=kwargs)\n+ def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ \"\"\" Read an file from disk. Supportted types include:\n+ CSV, Matrix Market(coordinate), Text(i,j,v), SystemDS Binay\n+ See: http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions for more details\n+ :return: an Operation Node, containing the read data.\n+ \"\"\"\n+ return OperationNode(self, 'read', [f'\"{path}\"'], named_input_nodes=kwargs, shape=(-1,))\n- def scalar(self, v: Dict[str, VALID_INPUT_TYPES]):\n- return OperationNode(self, v, output_type=OutputType.SCALAR)\n\\ No newline at end of file\n+ def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ \"\"\" Construct an scalar value, this can contain str, float, double, integers and booleans.\n+ :return: An `OperationNode` containing the scalar value.\n+ \"\"\"\n+ if type(v) is str:\n+ if not ((v[0] == '\"' and v[-1] == '\"') or (v[0] == \"'\" and v[-1] == \"'\")):\n+ v = f'\"{v}\"'\n+ # output type assign simply assigns the given variable to the value\n+ # therefore the output type is assign.\n+ return OperationNode(self, v, output_type=OutputType.ASSIGN)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "@@ -156,10 +156,14 @@ def multiLogReg(x: OperationNode, y: OperationNode, **kwargs: Dict[str, VALID_IN\nif y.shape[0] == 0:\nraise ValueError(\"Found array with 0 feature(s) (shape={s}) while a minimum of 1 is required.\"\n.format(s=y.shape))\n+ if -1 in x.shape:\n+ output_shape = (-1,)\n+ else:\n+ output_shape = (x.shape[1],)\nparams_dict = {'X': x, 'Y': y}\nparams_dict.update(kwargs)\n- return OperationNode(x.sds_context, 'multiLogReg', named_input_nodes=params_dict, shape = (x.shape[1],))\n+ return OperationNode(x.sds_context, 'multiLogReg', named_input_nodes=params_dict, shape = output_shape)\ndef multiLogRegPredict(x: OperationNode, b: OperationNode, y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "@@ -161,7 +161,7 @@ class OperationNode(DAGNode):\nreturn f'{output}={self.operation}({inputs_comma_sep});'\nelif self.output_type == OutputType.NONE:\nreturn f'{self.operation}({inputs_comma_sep});'\n- elif self.output_type == OutputType.SCALAR:\n+ elif self.output_type == OutputType.ASSIGN:\nreturn f'{var_name}={self.operation};'\nelse:\nreturn f'{var_name}={self.operation}({inputs_comma_sep});'\n@@ -341,15 +341,30 @@ class OperationNode(DAGNode):\nreturn OperationNode(self.sds_context, 'moment', unnamed_inputs, output_type=OutputType.DOUBLE)\ndef write(self, destination: str, format:str = \"binary\", **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ \"\"\" Write input to disk.\n+ The written format is easily read by SystemDSContext.read().\n+ There is no return on write.\n+\n+ :param destination: The location which the file is stored. Defaulting to HDFS paths if available.\n+ :param format: The format which the file is saved in. Default is binary to improve SystemDS reading times.\n+ :param kwargs: Contains multiple extra specific arguments, can be seen at http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions\n+ \"\"\"\nunnamed_inputs = [self, f'\"{destination}\"']\nnamed_parameters = {\"format\":f'\"{format}\"'}\nnamed_parameters.update(kwargs)\nreturn OperationNode(self.sds_context, 'write', unnamed_inputs, named_parameters, output_type= OutputType.NONE)\ndef to_string(self, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n- return OperationNode(self.sds_context, 'toString', [self], kwargs, output_type= OutputType.DOUBLE)\n+ \"\"\" Converts the input to a string representation.\n+ :return: `OperationNode` containing the string.\n+ \"\"\"\n+ return OperationNode(self.sds_context, 'toString', [self], kwargs, output_type= OutputType.SCALAR)\ndef print(self, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> 'OperationNode':\n+ \"\"\" Prints the given Operation Node.\n+ There is no return on calling.\n+ To get the returned string look at the stdout of SystemDSContext.\n+ \"\"\"\nreturn OperationNode(self.sds_context, 'print', [self], kwargs, output_type= OutputType.NONE)\ndef rev(self) -> 'OperationNode':\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/script_building/dag.py",
"new_path": "src/main/python/systemds/script_building/dag.py",
"diff": "@@ -34,6 +34,7 @@ class OutputType(Enum):\nMATRIX = auto()\nDOUBLE = auto()\nSCALAR = auto()\n+ ASSIGN = auto()\nLIST = auto()\nNONE = auto()\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/examples/tutorials/test_mnist.py",
"new_path": "src/main/python/tests/examples/tutorials/test_mnist.py",
"diff": "@@ -36,6 +36,7 @@ class Test_DMLScript(unittest.TestCase):\nsds: SystemDSContext = None\nd: DataManager = None\n+ base_path = \"systemds/examples/tutorials/mnist/\"\n@classmethod\ndef setUpClass(cls):\n@@ -84,6 +85,29 @@ class Test_DMLScript(unittest.TestCase):\nself.assertGreater(acc, 80)\n+ def test_multi_log_reg_with_read(self):\n+ train_count = 100\n+ test_count = 100\n+ X = Matrix(self.sds, self.d.get_train_data().reshape(\n+ (60000, 28*28))[:train_count])\n+ X.write(self.base_path + \"train_data\").compute()\n+ Y = Matrix(self.sds, self.d.get_train_labels()[:train_count]) + 1\n+ Y.write(self.base_path + \"train_labels\").compute()\n+\n+ Xr = self.sds.read(self.base_path + \"train_data\")\n+ Yr = self.sds.read(self.base_path + \"train_labels\")\n+\n+ bias = multiLogReg(Xr, Yr, verbose=False)\n+ # Test data\n+ Xt = Matrix(self.sds, self.d.get_test_data().reshape(\n+ (10000, 28*28))[:test_count])\n+ Yt = Matrix(self.sds, self.d.get_test_labels()[:test_count])\n+ Yt = Yt + 1.0\n+\n+ [_, _, acc] = multiLogRegPredict(Xt, bias, Yt).compute(verbose=True)\n+\n+ self.assertGreater(acc, 70)\n+\nif __name__ == \"__main__\":\nunittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2677] Allow Reading unknown dimensions for algorithm input |
49,693 | 29.09.2020 16:02:29 | -7,200 | 5461f427730656b1e18211907365eea94b32a052 | [MINOR][DOC] Added supported versions for MKL and CUDA to documentation | [
{
"change_type": "MODIFY",
"old_path": "docs/index.md",
"new_path": "docs/index.md",
"diff": "@@ -28,7 +28,8 @@ SystemDS's distinguishing characteristics are:\n2. **Multiple execution modes**, including Spark MLContext, Spark Batch, Standalone, and JMLC.\n3. **Automatic optimization** based on data and cluster characteristics to ensure both efficiency and scalability.\n-This version of SystemDS supports: Java 8+, Python 3.5+, Hadoop 2.6+ (Not 3.X), and Spark 2.1+ (Not 3.X).\n+This version of SystemDS supports: Java 8+, Python 3.5+, Hadoop 2.6+ (Not 3.X), and Spark 2.1+ (Not 3.X) Nvidia CUDA 10.2\n+ (CuDNN 7.x) Intel MKL (<=2019.x).\n# Links\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/site/run.md",
"new_path": "docs/site/run.md",
"diff": "@@ -92,8 +92,8 @@ bin/systemds Univar-Stats.dml -nvargs X=data/haberman.data TYPES=data/types.csv\n## Using Intel MKL native instructions\n-To use the MKL acceleration download and install the latest MKL library from [1],\n-set the environment variables with the MKL-provided script `$ compilervars.sh intel64` and set\n-the option `sysds.native.blas` in `SystemDS-config.xml`.\n+To use the MKL acceleration download and install the latest supported MKL library (<=2019.5) from [1],\n+set the environment variables with the MKL-provided script `. /opt/intel/bin/compilervars.sh intel64` (note the dot and\n+the default install location) and set the option `sysds.native.blas` in `SystemDS-config.xml` to mkl.\n[1]: https://software.intel.com/mkl \"Intel Math Kernel Library\"\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Added supported versions for MKL and CUDA to documentation |
49,689 | 30.09.2020 11:18:31 | -7,200 | 0d1d011f7cf9dcf9fb2013dba523081508cb99fc | Update bin.xml, source.xml and README | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -36,11 +36,8 @@ heterogeneous and nested schema.\n**Issue Tracker** [Jira Dashboard](https://issues.apache.org/jira/secure/Dashboard.jspa?selectPageId=12335852)\n-**Status and Build:** SystemDS is still in pre-alpha status. The original code base was forked from Apache SystemML 1.2 in\n-September 2018. We will continue to support linear algebra programs over matrices, while replacing the underlying data model\n-and compiler, as well as substantially extending the supported functionalities. Until the first release, you can build your own\n-snapshot via Apache Maven:\n- `mvn clean package -P distribution`.\n+**Status and Build:** SystemDS is renamed from SystemML which is an **Apache Top Level Project**.\n+To build from source visit [SystemDS Install from source](http://apache.github.io/systemds/site/install.html)\n[](https://github.com/apache/systemds/actions?query=workflow%3A%22Build%22+branch%3Amaster+event%3Apush)\n[](https://github.com/apache/systemds/actions?query=workflow%3ADocumentation+branch%3Amaster+event%3Apush)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/assembly/bin.xml",
"new_path": "src/assembly/bin.xml",
"diff": "<source>target/${artifactId}-${project.version}-extra.jar</source>\n<outputDirectory>./lib/</outputDirectory>\n</file>\n+ <file>\n+ <source>target/${artifactId}-${project.version}.jar</source>\n+ <outputDirectory>.</outputDirectory>\n+ </file>\n<file>\n<source>${basedir}/conf/log4j.properties.template</source>\n<outputDirectory>./conf/</outputDirectory>\n"
},
{
"change_type": "MODIFY",
"old_path": "src/assembly/source.xml",
"new_path": "src/assembly/source.xml",
"diff": "<exclude>./src/test/config/hadoop_bin_windows/**</exclude>\n<exclude>./src/main/cpp/lib/**</exclude>\n<exclude>pom.xml.versionsBackup</exclude>\n+ <exclude>./dev/Tasks-obsolete.txt</exclude>\n</excludes>\n</fileSet>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2667] Update bin.xml, source.xml and README |
49,698 | 29.09.2020 09:18:04 | -19,080 | 8d1dfe92499e3138d2397423930865e64d77e91c | [MINOR][DOC] Update header links and image files
Fix relative path when inside site
closes | [
{
"change_type": "ADD",
"old_path": "docs/site/img/dml-language-reference/Conv2d.gif",
"new_path": "docs/site/img/dml-language-reference/Conv2d.gif",
"diff": "Binary files /dev/null and b/docs/site/img/dml-language-reference/Conv2d.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/site/img/dml-language-reference/Conv2d1.gif",
"new_path": "docs/site/img/dml-language-reference/Conv2d1.gif",
"diff": "Binary files /dev/null and b/docs/site/img/dml-language-reference/Conv2d1.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/site/img/dml-language-reference/Conv2d_backward_data.gif",
"new_path": "docs/site/img/dml-language-reference/Conv2d_backward_data.gif",
"diff": "Binary files /dev/null and b/docs/site/img/dml-language-reference/Conv2d_backward_data.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/site/img/dml-language-reference/Conv2d_backward_data1.gif",
"new_path": "docs/site/img/dml-language-reference/Conv2d_backward_data1.gif",
"diff": "Binary files /dev/null and b/docs/site/img/dml-language-reference/Conv2d_backward_data1.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/site/img/dml-language-reference/Conv2d_backward_data2.gif",
"new_path": "docs/site/img/dml-language-reference/Conv2d_backward_data2.gif",
"diff": "Binary files /dev/null and b/docs/site/img/dml-language-reference/Conv2d_backward_data2.gif differ\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR][DOC] Update header links and image files
Fix relative path when inside site
closes #1072 |
49,720 | 05.10.2020 14:06:40 | -7,200 | 8501fb325309249e6b4414aa468f69b1a2539153 | [MINOR] minor fixes in smote
replace the rbind/cbind with indexing
rand call is updated with a seed value | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/smote.dml",
"new_path": "scripts/builtin/smote.dml",
"diff": "@@ -46,41 +46,54 @@ return (Matrix[Double] Y) {\nprint(\"the number of samples should be an integral multiple of 100. Setting s = 100\")\ns = 100\n}\n+\n+ if(k < 1) {\n+ print(\"k should not be less than 1. Setting k value to default k = 1.\")\n+ k = 1\n+ }\n+\n# matrix to keep the index of KNN for each minority sample\n- knn_index = matrix(0,k,0)\n+ knn_index = matrix(0,k,nrow(X))\n# find nearest neighbour\nfor(i in 1:nrow(X))\n{\nknn = nn(X, X[i, ], k)\n- knn_index = cbind(knn_index, knn)\n+ knn_index[, i] = knn\n}\n# number of synthetic samples from each minority class sample\n- iter = (s/100)\n+ iter = 0\n+ iterLim = (s/100)\n# matrix to store synthetic samples\n- synthetic_samples = matrix(0, 0, ncol(X))\n- while(iter > 0)\n+ synthetic_samples = matrix(0, iterLim*ncol(knn_index), ncol(X))\n+\n+ # shuffle the nn indexes\n+ if(k < iterLim)\n+ rand_index = sample(k, iterLim, TRUE)\n+ else\n+ rand_index = sample(k, iterLim)\n+\n+ while(iter < iterLim)\n{\n- # generate a random number\n- # TODO avoid duplicate random numbers\n- rand_index = as.integer(as.scalar(Rand(rows=1, cols=1, min=1, max=k)))\n# pick the random NN\n- knn_sample = knn_index[rand_index,]\n+ knn_sample = knn_index[as.scalar(rand_index[iter+1]),]\n# generate sample\nfor(i in 1:ncol(knn_index))\n{\nindex = as.scalar(knn_sample[1,i])\nX_diff = X[index,] - X[i, ]\n- gap = as.scalar(Rand(rows=1, cols=1, min=0, max=1))\n+ gap = as.scalar(Rand(rows=1, cols=1, min=0, max=1, seed = 41))\nX_sys = X[i, ] + (gap*X_diff)\n- synthetic_samples = rbind(synthetic_samples, X_sys)\n+ synthetic_samples[iter*ncol(knn_index)+i,] = X_sys;\n}\n- iter = iter - 1\n+ iter = iter + 1\n}\nY = synthetic_samples\n+\nif(verbose)\nprint(nrow(Y)+ \" synthesized samples generated.\")\n+\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSmoteTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinSmoteTest.java",
"diff": "@@ -47,9 +47,14 @@ public class BuiltinSmoteTest extends AutomatedTestBase {\naddTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"C\"}));\n}\n+ @Test\n+ public void testSmote0CP() {\n+ runSmoteTest(100, 1, LopProperties.ExecType.CP);\n+ }\n+\n@Test\npublic void testSmote1CP() {\n- runSmoteTest(300, 3, LopProperties.ExecType.CP);\n+ runSmoteTest(300, 10, LopProperties.ExecType.CP);\n}\n@Test\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] minor fixes in smote
replace the rbind/cbind with indexing
rand call is updated with a seed value |
49,720 | 05.10.2020 14:21:28 | -7,200 | 18628f07cd027b8dd9938b5f23fee7183e8357e7 | [MINOR] minor fixes in smote.dml
for loop converted to parfor
syntax reformations suggested by Arnab | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/smote.dml",
"new_path": "scripts/builtin/smote.dml",
"diff": "@@ -55,7 +55,7 @@ return (Matrix[Double] Y) {\n# matrix to keep the index of KNN for each minority sample\nknn_index = matrix(0,k,nrow(X))\n# find nearest neighbour\n- for(i in 1:nrow(X))\n+ parfor(i in 1:nrow(X))\n{\nknn = nn(X, X[i, ], k)\nknn_index[, i] = knn\n@@ -68,10 +68,7 @@ return (Matrix[Double] Y) {\nsynthetic_samples = matrix(0, iterLim*ncol(knn_index), ncol(X))\n# shuffle the nn indexes\n- if(k < iterLim)\n- rand_index = sample(k, iterLim, TRUE)\n- else\n- rand_index = sample(k, iterLim)\n+ rand_index = ifelse(k < iterLim, sample(k, iterLim, TRUE, 42), sample(k, iterLim, 42))\nwhile(iter < iterLim)\n{\n@@ -82,7 +79,7 @@ return (Matrix[Double] Y) {\n{\nindex = as.scalar(knn_sample[1,i])\nX_diff = X[index,] - X[i, ]\n- gap = as.scalar(Rand(rows=1, cols=1, min=0, max=1, seed = 41))\n+ gap = as.scalar(Rand(rows=1, cols=1, min=0, max=1, seed = 42))\nX_sys = X[i, ] + (gap*X_diff)\nsynthetic_samples[iter*ncol(knn_index)+i,] = X_sys;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"new_path": "src/test/scripts/functions/builtin/imputeFD.dml",
"diff": "#-------------------------------------------------------------\n#\n-# Copyright 2020 Graz University of Technology\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n#\n#-------------------------------------------------------------\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] minor fixes in smote.dml
for loop converted to parfor
syntax reformations suggested by Arnab |
49,738 | 10.10.2020 14:40:20 | -7,200 | ab08fc5f45b74f4dbd57136ec2c735b0d37cd7a6 | [MINOR] Fix various warnings (UINT handling, generics, static) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/LiteralOp.java",
"new_path": "src/main/java/org/apache/sysds/hops/LiteralOp.java",
"diff": "@@ -246,6 +246,7 @@ public class LiteralOp extends Hop\nswitch( getValueType() ) {\ncase BOOLEAN:\nreturn String.valueOf(value_boolean);\n+ case UINT8:\ncase INT32:\ncase INT64:\nreturn String.valueOf(value_long);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysds/parser/BuiltinFunctionExpression.java",
"diff": "@@ -608,6 +608,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbreak;\ncase INT64:\ncase INT32:\n+ case UINT8:\ncase BOOLEAN:\noutput.setValueType(ValueType.INT64);\nbreak;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/data/LibTensorAgg.java",
"new_path": "src/main/java/org/apache/sysds/runtime/data/LibTensorAgg.java",
"diff": "@@ -245,7 +245,8 @@ public class LibTensorAgg {\nbreak;\n}\ncase INT64:\n- case INT32: {\n+ case INT32:\n+ case UINT8: {\nDenseBlock a = in.getDenseBlock();\nlong sum = 0;\nint[] ix = new int[a.numDims()];\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/data/TensorBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/data/TensorBlock.java",
"diff": "@@ -590,6 +590,8 @@ public class TensorBlock implements CacheBlock, Externalizable {\nlong size = 8 + 1;\nif (!bt.isSparse()) {\nswitch (bt._vt) {\n+ case UINT8:\n+ size += 1 * getLength(); break;\ncase INT32:\ncase FP32:\nsize += 4 * getLength(); break;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/util/DataConverter.java",
"new_path": "src/main/java/org/apache/sysds/runtime/util/DataConverter.java",
"diff": "@@ -1137,6 +1137,7 @@ public class DataConverter\nvalue = 0.0;\nsb.append(dfFormat(df, value));\nbreak;\n+ case UINT8:\ncase INT32:\ncase INT64:\nsb.append(tb.get(ix));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/privacy/PrivacyPropagatorTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/PrivacyPropagatorTest.java",
"diff": "@@ -123,28 +123,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nmmGeneralNoFineGrainedGeneralized(constraint1, constraint2, propagator);\n}\n-\n- private void mmGeneralNoFineGrainedGeneralized(PrivacyConstraint constraint1, PrivacyConstraint constraint2, MatrixMultiplicationPropagator propagator){\n- MatrixBlock inputMatrix1 = new MatrixBlock(10,20,15);\n- MatrixBlock inputMatrix2 = new MatrixBlock(20,30,12);\n- propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n- PrivacyConstraint mergedConstraint = propagator.propagate();\n- assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n- assertFalse(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n- }\n-\n- private void mmPropagationPrivateGeneralized(PrivacyLevel fineGrainedPrivacyLevel, MatrixMultiplicationPropagator propagator){\n- MatrixBlock inputMatrix1 = new MatrixBlock(10,20,15);\n- MatrixBlock inputMatrix2 = new MatrixBlock(20,30,12);\n- PrivacyConstraint constraint1 = new PrivacyConstraint(PrivacyLevel.Private);\n- constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{3,8},new long[]{2,5}), fineGrainedPrivacyLevel);\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n- PrivacyConstraint mergedConstraint = propagator.propagate();\n- assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n- assertFalse(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestPrivateGeneral(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -181,32 +159,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nmmPropagationPrivateGeneralized(PrivacyLevel.PrivateAggregation, propagator);\n}\n- private void mmPropagationTestPrivateFineGrainedGeneralized(MatrixMultiplicationPropagator propagator){\n- MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n- MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n- PrivacyConstraint constraint1 = new PrivacyConstraint();\n- constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n- PrivacyConstraint mergedConstraint = propagator.propagate();\n- assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n- assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n- assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n- Map<DataRange, PrivacyLevel> outputElement1 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,0});\n- Map<DataRange, PrivacyLevel> outputElement2 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,1});\n- Map<DataRange, PrivacyLevel> outputElement3 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,2});\n- assertEquals(1, outputElement1.size());\n- assertEquals(1, outputElement2.size());\n- assertEquals(1, outputElement3.size());\n- assertTrue(\"Privacy level of element 1 is Private\", outputElement1.containsValue(PrivacyLevel.Private));\n- assertTrue(\"Privacy level of element 2 is Private\", outputElement2.containsValue(PrivacyLevel.Private));\n- assertTrue(\"Privacy level of element 3 is Private\", outputElement3.containsValue(PrivacyLevel.Private));\n- Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{2,0}, new long[]{3,2}));\n- assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n- (!expectedEmpty.containsValue(PrivacyLevel.Private)\n- && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestPrivateFineGrained(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -225,25 +177,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nmmPropagationTestPrivateFineGrainedGeneralized(propagator);\n}\n- private void mmPropagationTestPrivateFineGrained2Generalized(MatrixMultiplicationPropagator propagator){\n- MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n- MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n- PrivacyConstraint constraint1 = new PrivacyConstraint();\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- constraint2.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n- propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n- PrivacyConstraint mergedConstraint = propagator.propagate();\n- assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n- assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n- assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n- Map<DataRange, PrivacyLevel> outputRange = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,0},new long[]{3,1}));\n- assertTrue(\"Privacy level is Private\", outputRange.containsValue(PrivacyLevel.Private));\n- Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,2}, new long[]{3,2}));\n- assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n- (!expectedEmpty.containsValue(PrivacyLevel.Private)\n- && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestPrivateFineGrained2(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -262,38 +195,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nmmPropagationTestPrivateFineGrained2Generalized(propagator);\n}\n- private void mmPropagationTestPrivatePrivateAggregationFineGrainedGeneralized(MatrixMultiplicationPropagator propagator){\n- //Build\n- MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n- MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n- PrivacyConstraint constraint1 = new PrivacyConstraint();\n- constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- constraint2.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.PrivateAggregation);\n-\n- //Execute\n- propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n- PrivacyConstraint mergedConstraint = propagator.propagate();\n-\n- //Assert\n- assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n- assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n- assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n- Map<DataRange, PrivacyLevel> outputElement1 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,0});\n- Map<DataRange, PrivacyLevel> outputElement2 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,1});\n- Map<DataRange, PrivacyLevel> outputElement3 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,2});\n- assertEquals(1, outputElement1.size());\n- assertEquals(1, outputElement2.size());\n- assertEquals(1, outputElement3.size());\n- assertTrue(\"Privacy level of element 1 is Private\", outputElement1.containsValue(PrivacyLevel.Private));\n- assertTrue(\"Privacy level of element 2 is Private\", outputElement2.containsValue(PrivacyLevel.Private));\n- assertTrue(\"Privacy level of element 3 is Private\", outputElement3.containsValue(PrivacyLevel.Private));\n- Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{2,0}, new long[]{3,2}));\n- assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n- (!expectedEmpty.containsValue(PrivacyLevel.Private)\n- && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestPrivatePrivateAggregationFineGrained(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -337,21 +238,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nassertEquals(\"All values except one should be OperatorType.Aggregate\", expected, actualArray[nonAggRow]);\n}\n- private void getOperatorTypesRowMultipleNonAggTestGeneralized(MatrixMultiplicationPropagator propagator){\n- int rows = 4;\n- int cols = 2;\n- int nonAggRow = 2;\n- MatrixBlock m1 = getMatrixBlock(rows, cols);\n- // Make two rows NNZ=1\n- m1.getDenseBlock().set(nonAggRow,0,0);\n- m1.getDenseBlock().set(nonAggRow+1,0,0);\n- propagator.setFields(m1, null, null, null);\n- OperatorType[] actualArray = propagator.getOperatorTypesRow();\n- OperatorType expected = OperatorType.NonAggregate;\n- assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggRow]);\n- assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggRow+1]);\n- }\n-\n@Test\npublic void getOperatorTypesRowMultipleNonAggTest(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -384,37 +270,12 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nassertEquals(\"All values except one should be OperatorType.Aggregate\", expected, actualArray[nonAggCol]);\n}\n- private void getOperatorTypesColMultipleNonAggTestGeneralized(MatrixMultiplicationPropagator propagator){\n- int rows = 2;\n- int cols = 3;\n- int nonAggCol = 1;\n- MatrixBlock m2 = getMatrixBlock(rows, cols);\n- // Make two cols NNZ=1\n- m2.getDenseBlock().set(0,nonAggCol,0);\n- m2.getDenseBlock().set(0,nonAggCol+1,0);\n- propagator.setFields(null, null, m2, null);\n- OperatorType[] actualArray = propagator.getOperatorTypesCol();\n- OperatorType expected = OperatorType.NonAggregate;\n- assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggCol]);\n- assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggCol+1]);\n- }\n-\n@Test\npublic void getOperatorTypesColMultipleNonAggTest(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\ngetOperatorTypesColMultipleNonAggTestGeneralized(propagator);\n}\n- private MatrixBlock getMatrixBlock(int rows, int cols){\n- DenseBlock denseM = new DenseBlockLFP64(new int[]{rows,cols});\n- for ( int r = 0; r < rows; r++ ){\n- for ( int c = 0; c < cols; c++ ){\n- denseM.set(r,c,r+c+1);\n- }\n- }\n- return new MatrixBlock(rows,cols,denseM);\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestNonAgg(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -451,27 +312,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nNonAggGeneralizedTest(PrivacyLevel.Private, propagator);\n}\n- private void NonAggGeneralizedTest(PrivacyLevel privacyLevel, MatrixMultiplicationPropagator propagator){\n- int nonAggRow = 2;\n- MatrixBlock m1 = getMatrixBlock(4,2);\n- MatrixBlock m2 = getMatrixBlock(2, 3);\n- m1.getDenseBlock().set(nonAggRow,0,0);\n- PrivacyConstraint constraint1 = new PrivacyConstraint();\n- constraint1.getFineGrainedPrivacy().putRow(nonAggRow,2,privacyLevel);\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- propagator.setFields(m1, constraint1, m2, constraint2);\n- PrivacyConstraint mergedPrivacyConstraint = propagator.propagate();\n- Map<DataRange, PrivacyLevel> constraints = mergedPrivacyConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{nonAggRow,0}, new long[]{nonAggRow,1}));\n- assertTrue(\"Output constraints should contain the privacy level \" + privacyLevel.toString(),\n- constraints.containsValue(privacyLevel));\n- if ( privacyLevel == PrivacyLevel.Private)\n- assertFalse(\"Output constraints should not contain the privacy level PrivateAggregation\",\n- constraints.containsValue(PrivacyLevel.PrivateAggregation));\n- else if ( privacyLevel == PrivacyLevel.PrivateAggregation )\n- assertFalse(\"Output constraints should not contain the privacy level Private\",\n- constraints.containsValue(PrivacyLevel.Private));\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestNonAgg2(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -508,27 +348,6 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nNonAggGeneralizedColTest(PrivacyLevel.Private, propagator);\n}\n- private void NonAggGeneralizedColTest(PrivacyLevel privacyLevel, MatrixMultiplicationPropagator propagator){\n- int nonAggCol = 2;\n- MatrixBlock m1 = getMatrixBlock(4,2);\n- MatrixBlock m2 = getMatrixBlock(2, 3);\n- m2.getDenseBlock().set(0,nonAggCol,0);\n- PrivacyConstraint constraint1 = new PrivacyConstraint();\n- PrivacyConstraint constraint2 = new PrivacyConstraint();\n- constraint2.getFineGrainedPrivacy().putCol(nonAggCol,4,privacyLevel);\n- propagator.setFields(m1, constraint1, m2, constraint2);\n- PrivacyConstraint mergedPrivacyConstraint = propagator.propagate();\n- Map<DataRange, PrivacyLevel> constraints = mergedPrivacyConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,nonAggCol}, new long[]{3,nonAggCol}));\n- assertTrue(\"Output constraints should contain the privacy level \" + privacyLevel.toString(),\n- constraints.containsValue(privacyLevel));\n- if ( privacyLevel == PrivacyLevel.Private)\n- assertFalse(\"Output constraints should not contain the privacy level PrivateAggregation\",\n- constraints.containsValue(PrivacyLevel.PrivateAggregation));\n- else if ( privacyLevel == PrivacyLevel.PrivateAggregation )\n- assertFalse(\"Output constraints should not contain the privacy level Private\",\n- constraints.containsValue(PrivacyLevel.Private));\n- }\n-\n@Test\npublic void matrixMultiplicationPropagationTestNonAggRowColNA(){\nMatrixMultiplicationPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst();\n@@ -565,7 +384,187 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nNonAggGeneralizedRowColTest(PrivacyLevel.PrivateAggregation, false, propagator);\n}\n- private void NonAggGeneralizedRowColTest(PrivacyLevel privacyLevel, boolean putElement, MatrixMultiplicationPropagator propagator){\n+ private static void mmGeneralNoFineGrainedGeneralized(PrivacyConstraint constraint1, PrivacyConstraint constraint2, MatrixMultiplicationPropagator propagator){\n+ MatrixBlock inputMatrix1 = new MatrixBlock(10,20,15);\n+ MatrixBlock inputMatrix2 = new MatrixBlock(20,30,12);\n+ propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n+ PrivacyConstraint mergedConstraint = propagator.propagate();\n+ assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n+ assertFalse(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n+ }\n+\n+ private static void mmPropagationPrivateGeneralized(PrivacyLevel fineGrainedPrivacyLevel, MatrixMultiplicationPropagator propagator){\n+ MatrixBlock inputMatrix1 = new MatrixBlock(10,20,15);\n+ MatrixBlock inputMatrix2 = new MatrixBlock(20,30,12);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint(PrivacyLevel.Private);\n+ constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{3,8},new long[]{2,5}), fineGrainedPrivacyLevel);\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n+ PrivacyConstraint mergedConstraint = propagator.propagate();\n+ assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n+ assertFalse(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n+ }\n+\n+ private static void mmPropagationTestPrivateFineGrainedGeneralized(MatrixMultiplicationPropagator propagator){\n+ MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n+ MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint();\n+ constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n+ PrivacyConstraint mergedConstraint = propagator.propagate();\n+ assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n+ assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n+ assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n+ Map<DataRange, PrivacyLevel> outputElement1 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,0});\n+ Map<DataRange, PrivacyLevel> outputElement2 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,1});\n+ Map<DataRange, PrivacyLevel> outputElement3 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,2});\n+ assertEquals(1, outputElement1.size());\n+ assertEquals(1, outputElement2.size());\n+ assertEquals(1, outputElement3.size());\n+ assertTrue(\"Privacy level of element 1 is Private\", outputElement1.containsValue(PrivacyLevel.Private));\n+ assertTrue(\"Privacy level of element 2 is Private\", outputElement2.containsValue(PrivacyLevel.Private));\n+ assertTrue(\"Privacy level of element 3 is Private\", outputElement3.containsValue(PrivacyLevel.Private));\n+ Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{2,0}, new long[]{3,2}));\n+ assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n+ (!expectedEmpty.containsValue(PrivacyLevel.Private)\n+ && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n+ }\n+\n+ private static void mmPropagationTestPrivatePrivateAggregationFineGrainedGeneralized(MatrixMultiplicationPropagator propagator){\n+ //Build\n+ MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n+ MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint();\n+ constraint1.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ constraint2.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.PrivateAggregation);\n+\n+ //Execute\n+ propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n+ PrivacyConstraint mergedConstraint = propagator.propagate();\n+\n+ //Assert\n+ assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n+ assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n+ assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n+ Map<DataRange, PrivacyLevel> outputElement1 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,0});\n+ Map<DataRange, PrivacyLevel> outputElement2 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,1});\n+ Map<DataRange, PrivacyLevel> outputElement3 = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevelOfElement(new long[]{1,2});\n+ assertEquals(1, outputElement1.size());\n+ assertEquals(1, outputElement2.size());\n+ assertEquals(1, outputElement3.size());\n+ assertTrue(\"Privacy level of element 1 is Private\", outputElement1.containsValue(PrivacyLevel.Private));\n+ assertTrue(\"Privacy level of element 2 is Private\", outputElement2.containsValue(PrivacyLevel.Private));\n+ assertTrue(\"Privacy level of element 3 is Private\", outputElement3.containsValue(PrivacyLevel.Private));\n+ Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{2,0}, new long[]{3,2}));\n+ assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n+ (!expectedEmpty.containsValue(PrivacyLevel.Private)\n+ && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n+ }\n+\n+ private static void mmPropagationTestPrivateFineGrained2Generalized(MatrixMultiplicationPropagator propagator){\n+ MatrixBlock inputMatrix1 = new MatrixBlock(4,3,2);\n+ MatrixBlock inputMatrix2 = new MatrixBlock(3,3,4);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint();\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ constraint2.getFineGrainedPrivacy().put(new DataRange(new long[]{1,0},new long[]{1,1}), PrivacyLevel.Private);\n+ propagator.setFields(inputMatrix1, constraint1, inputMatrix2, constraint2);\n+ PrivacyConstraint mergedConstraint = propagator.propagate();\n+ assertTrue(\"Privacy should be set to Private\", mergedConstraint.hasPrivateElements());\n+ assertTrue(\"Fine grained constraint should not be propagated\", mergedConstraint.hasFineGrainedConstraints());\n+ assertTrue(\"Merged constraint should not contain privacy level PrivateAggregation\", mergedConstraint.getFineGrainedPrivacy().getDataRangesOfPrivacyLevel(PrivacyLevel.PrivateAggregation).length == 0);\n+ Map<DataRange, PrivacyLevel> outputRange = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,0},new long[]{3,1}));\n+ assertTrue(\"Privacy level is Private\", outputRange.containsValue(PrivacyLevel.Private));\n+ Map<DataRange, PrivacyLevel> expectedEmpty = mergedConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,2}, new long[]{3,2}));\n+ assertTrue(\"Any other index has no privacy constraint\", expectedEmpty.isEmpty() ||\n+ (!expectedEmpty.containsValue(PrivacyLevel.Private)\n+ && !expectedEmpty.containsValue(PrivacyLevel.PrivateAggregation)));\n+ }\n+\n+ private static void getOperatorTypesRowMultipleNonAggTestGeneralized(MatrixMultiplicationPropagator propagator){\n+ int rows = 4;\n+ int cols = 2;\n+ int nonAggRow = 2;\n+ MatrixBlock m1 = getMatrixBlock(rows, cols);\n+ // Make two rows NNZ=1\n+ m1.getDenseBlock().set(nonAggRow,0,0);\n+ m1.getDenseBlock().set(nonAggRow+1,0,0);\n+ propagator.setFields(m1, null, null, null);\n+ OperatorType[] actualArray = propagator.getOperatorTypesRow();\n+ OperatorType expected = OperatorType.NonAggregate;\n+ assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggRow]);\n+ assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggRow+1]);\n+ }\n+\n+ private static void getOperatorTypesColMultipleNonAggTestGeneralized(MatrixMultiplicationPropagator propagator){\n+ int rows = 2;\n+ int cols = 3;\n+ int nonAggCol = 1;\n+ MatrixBlock m2 = getMatrixBlock(rows, cols);\n+ // Make two cols NNZ=1\n+ m2.getDenseBlock().set(0,nonAggCol,0);\n+ m2.getDenseBlock().set(0,nonAggCol+1,0);\n+ propagator.setFields(null, null, m2, null);\n+ OperatorType[] actualArray = propagator.getOperatorTypesCol();\n+ OperatorType expected = OperatorType.NonAggregate;\n+ assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggCol]);\n+ assertEquals(\"All values except two should be OperatorType.Aggregate\", expected, actualArray[nonAggCol+1]);\n+ }\n+\n+ private static MatrixBlock getMatrixBlock(int rows, int cols){\n+ DenseBlock denseM = new DenseBlockLFP64(new int[]{rows,cols});\n+ for ( int r = 0; r < rows; r++ ){\n+ for ( int c = 0; c < cols; c++ ){\n+ denseM.set(r,c,r+c+1);\n+ }\n+ }\n+ return new MatrixBlock(rows,cols,denseM);\n+ }\n+\n+ private static void NonAggGeneralizedTest(PrivacyLevel privacyLevel, MatrixMultiplicationPropagator propagator){\n+ int nonAggRow = 2;\n+ MatrixBlock m1 = getMatrixBlock(4,2);\n+ MatrixBlock m2 = getMatrixBlock(2, 3);\n+ m1.getDenseBlock().set(nonAggRow,0,0);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint();\n+ constraint1.getFineGrainedPrivacy().putRow(nonAggRow,2,privacyLevel);\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ propagator.setFields(m1, constraint1, m2, constraint2);\n+ PrivacyConstraint mergedPrivacyConstraint = propagator.propagate();\n+ Map<DataRange, PrivacyLevel> constraints = mergedPrivacyConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{nonAggRow,0}, new long[]{nonAggRow,1}));\n+ assertTrue(\"Output constraints should contain the privacy level \" + privacyLevel.toString(),\n+ constraints.containsValue(privacyLevel));\n+ if ( privacyLevel == PrivacyLevel.Private)\n+ assertFalse(\"Output constraints should not contain the privacy level PrivateAggregation\",\n+ constraints.containsValue(PrivacyLevel.PrivateAggregation));\n+ else if ( privacyLevel == PrivacyLevel.PrivateAggregation )\n+ assertFalse(\"Output constraints should not contain the privacy level Private\",\n+ constraints.containsValue(PrivacyLevel.Private));\n+ }\n+\n+ private static void NonAggGeneralizedColTest(PrivacyLevel privacyLevel, MatrixMultiplicationPropagator propagator){\n+ int nonAggCol = 2;\n+ MatrixBlock m1 = getMatrixBlock(4,2);\n+ MatrixBlock m2 = getMatrixBlock(2, 3);\n+ m2.getDenseBlock().set(0,nonAggCol,0);\n+ PrivacyConstraint constraint1 = new PrivacyConstraint();\n+ PrivacyConstraint constraint2 = new PrivacyConstraint();\n+ constraint2.getFineGrainedPrivacy().putCol(nonAggCol,4,privacyLevel);\n+ propagator.setFields(m1, constraint1, m2, constraint2);\n+ PrivacyConstraint mergedPrivacyConstraint = propagator.propagate();\n+ Map<DataRange, PrivacyLevel> constraints = mergedPrivacyConstraint.getFineGrainedPrivacy().getPrivacyLevel(new DataRange(new long[]{0,nonAggCol}, new long[]{3,nonAggCol}));\n+ assertTrue(\"Output constraints should contain the privacy level \" + privacyLevel.toString(),\n+ constraints.containsValue(privacyLevel));\n+ if ( privacyLevel == PrivacyLevel.Private)\n+ assertFalse(\"Output constraints should not contain the privacy level PrivateAggregation\",\n+ constraints.containsValue(PrivacyLevel.PrivateAggregation));\n+ else if ( privacyLevel == PrivacyLevel.PrivateAggregation )\n+ assertFalse(\"Output constraints should not contain the privacy level Private\",\n+ constraints.containsValue(PrivacyLevel.Private));\n+ }\n+\n+ private static void NonAggGeneralizedRowColTest(PrivacyLevel privacyLevel, boolean putElement, MatrixMultiplicationPropagator propagator){\nint nonAgg = 2;\nMatrixBlock m1 = getMatrixBlock(4,2);\nMatrixBlock m2 = getMatrixBlock(2, 3);\n@@ -583,7 +582,7 @@ public class PrivacyPropagatorTest extends AutomatedTestBase {\nint privacyLevelSum = 0;\nDataRange levelRange = null;\nPrivacyLevel level = PrivacyLevel.None;\n- for ( Map.Entry constraint : constraints )\n+ for ( Map.Entry<DataRange, PrivacyLevel> constraint : constraints )\nif ( constraint.getValue() == privacyLevel ){\nprivacyLevelSum++;\nlevelRange = (DataRange)constraint.getKey();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix various warnings (UINT handling, generics, static) |
49,694 | 10.10.2020 14:55:29 | -7,200 | f2f6dbdab9fd451cfe64093b1f3b8738c314fce7 | New ALS built-in functions (als, alsCG, alsDS)
AMLS project SS2020.
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/als.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#\n+# This script computes an approximate factorization of a low-rank matrix X into two matrices U and V\n+# using different implementations of the Alternating-Least-Squares (ALS) algorithm.\n+# Matrices U and V are computed by minimizing a loss function (with regularization).\n+#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Location to read the input matrix X to be factorized\n+# rank Int 10 Rank of the factorization\n+# reg String \"L2\" Regularization:\n+# \"L2\" = L2 regularization;\n+# f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+# + 0.5 * lambda * (sum (U ^ 2) + sum (V ^ 2))\n+# \"wL2\" = weighted L2 regularization\n+# f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+# + 0.5 * lambda * (sum (U ^ 2 * row_nonzeros)\n+# + sum (V ^ 2 * col_nonzeros))\n+# lambda Double 0.000001 Regularization parameter, no regularization if 0.0\n+# maxi Int 50 Maximum number of iterations\n+# check Boolean TRUE Check for convergence after every iteration, i.e., updating U and V once\n+# thr Double 0.0001 Assuming check is set to TRUE, the algorithm stops and convergence is declared\n+# if the decrease in loss in any two consecutive iterations falls below this threshold;\n+# if check is FALSE thr is ignored\n+# ---------------------------------------------------------------------------------------------\n+# OUTPUT:\n+# 1- An m x r matrix U, where r is the factorization rank\n+# 2- An r x n matrix V\n+\n+m_als = function(Matrix[Double] X, Integer rank = 10, String reg = \"L2\", Double lambda = 0.000001,\n+ Integer maxi = 50, Boolean check = TRUE, Double thr = 0.0001, Boolean verbose = TRUE)\n+ return (Matrix[Double] U, Matrix[Double] V)\n+{\n+ N = 10000; # for large problems, use scalable alsCG\n+ if( reg != \"L2\" | nrow(X) > N | ncol(X) > N )\n+ [U, V] = alsCG(X=X, rank=rank, reg=reg, lambda=lambda,\n+ maxi=maxi, check=check, thr=thr, verbose=verbose);\n+ else\n+ [U, V] = alsDS(X=X, rank=rank, lambda=lambda, maxi=maxi,\n+ check=check, thr=thr, verbose=verbose);\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/alsCG.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#\n+# This script computes an approximate factorization of a low-rank matrix X into two matrices U and V\n+# using the Alternating-Least-Squares (ALS) algorithm with conjugate gradient.\n+# Matrices U and V are computed by minimizing a loss function (with regularization).\n+#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X String --- Location to read the input matrix X to be factorized\n+# rank Int 10 Rank of the factorization\n+# reg String \"L2\" Regularization:\n+# \"L2\" = L2 regularization;\n+# f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+# + 0.5 * lambda * (sum (U ^ 2) + sum (V ^ 2))\n+# \"wL2\" = weighted L2 regularization\n+# f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+# + 0.5 * lambda * (sum (U ^ 2 * row_nonzeros)\n+# + sum (V ^ 2 * col_nonzeros))\n+# lambda Double 0.000001 Regularization parameter, no regularization if 0.0\n+# maxi Int 50 Maximum number of iterations\n+# check Boolean TRUE Check for convergence after every iteration, i.e., updating U and V once\n+# thr Double 0.0001 Assuming check is set to TRUE, the algorithm stops and convergence is declared\n+# if the decrease in loss in any two consecutive iterations falls below this threshold;\n+# if check is FALSE thr is ignored\n+# ---------------------------------------------------------------------------------------------\n+# OUTPUT:\n+# 1- An m x r matrix U, where r is the factorization rank\n+# 2- An r x n matrix V\n+\n+\n+m_alsCG = function(Matrix[Double] X, Integer rank = 10, String reg = \"L2\", Double lambda = 0.000001, Integer maxi = 50, Boolean check = TRUE, Double thr = 0.0001, Boolean verbose = TRUE)\n+ return (Matrix[Double] U, Matrix[Double] V)\n+{\n+ r = rank;\n+ max_iter = maxi;\n+\n+ ###### MAIN PART ######\n+ m = nrow (X);\n+ n = ncol (X);\n+\n+ # initializing factor matrices\n+ U = rand (rows = m, cols = r, min = -0.5, max = 0.5); # mxr\n+ V = rand (rows = n, cols = r, min = -0.5, max = 0.5); # nxr\n+\n+ W = (X != 0);\n+\n+ # check for regularization\n+ row_nonzeros = matrix(0,rows=1,cols=1);\n+ col_nonzeros = matrix(0,rows=1,cols=1);\n+ if( reg == \"L2\" ) {\n+ # Loss Function with L2:\n+ # f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+ # + 0.5 * lambda * (sum (U ^ 2) + sum (V ^ 2))\n+ if( verbose )\n+ print (\"BEGIN ALS-CG SCRIPT WITH NONZERO SQUARED LOSS + L2 WITH LAMBDA - \" + lambda);\n+ row_nonzeros = matrix(1, nrow(W), 1);\n+ col_nonzeros = matrix(1, ncol(W), 1);\n+ }\n+ else if( reg == \"wL2\" ) {\n+ # Loss Function with weighted L2:\n+ # f (U, V) = 0.5 * sum (W * (U %*% V - X) ^ 2)\n+ # + 0.5 * lambda * (sum (U ^ 2 * row_nonzeros) + sum (V ^ 2 * col_nonzeros))\n+ if( verbose )\n+ print (\"BEGIN ALS-CG SCRIPT WITH NONZERO SQUARED LOSS + WEIGHTED L2 WITH LAMBDA - \" + lambda);\n+ row_nonzeros = rowSums(W);\n+ col_nonzeros = t(colSums(W));\n+ }\n+ else {\n+ stop (\"wrong regularization! \" + reg);\n+ }\n+\n+ is_U = TRUE; # start optimizing U, alternated\n+ maxinneriter = r ; # min (ncol (U), 15);\n+\n+ loss_init = 0.0; # only used if check is TRUE\n+ if( check ) {\n+ loss_init = 0.5 * sum( (X != 0) * (U %*% t(V) - X) ^ 2);\n+ loss_init = loss_init + 0.5 * lambda * (sum (U ^ 2 * row_nonzeros) + sum (V ^ 2 * col_nonzeros));\n+ if( verbose )\n+ print (\"----- Initial train loss: \" + loss_init + \" -----\");\n+ }\n+\n+ it = 0;\n+ converged = FALSE;\n+ while( as.integer(it/2) < max_iter & ! converged ) {\n+ it = it + 1;\n+ if( is_U )\n+ G = ((X != 0) * (U %*% t(V) - X)) %*% V + lambda * U * row_nonzeros;\n+ else\n+ G = t(t(U) %*% ((X != 0) * (U %*% t(V) - X))) + lambda * V * col_nonzeros;\n+\n+ R = -G;\n+ S = R;\n+ norm_G2 = sum (G ^ 2);\n+ norm_R2 = norm_G2;\n+\n+ inneriter = 1;\n+ tt = 0.000000001;\n+ while( norm_R2 > tt * norm_G2 & inneriter <= maxinneriter ) {\n+ if( is_U ) {\n+ HS = (W * (S %*% t(V))) %*% V + lambda * S * row_nonzeros;\n+ alpha = norm_R2 / sum (S * HS);\n+ U = U + alpha * S; # OK since U is not used in HS\n+ }\n+ else {\n+ HS = t(t(U) %*% (W * (U %*% t(S)))) + lambda * S * col_nonzeros;\n+ alpha = norm_R2 / sum (S * HS);\n+ V = V + alpha * S; # OK since V is not used in HS\n+ }\n+\n+ R = R - alpha * HS;\n+ old_norm_R2 = norm_R2;\n+ norm_R2 = sum (R ^ 2);\n+ S = R + (norm_R2 / old_norm_R2) * S;\n+ inneriter = inneriter + 1;\n+ }\n+\n+ is_U = ! is_U;\n+\n+ # check for convergence\n+ if( check & (it%%2 == 0) ) {\n+ loss_cur = 0.5 * sum( (X != 0) * (U %*% t(V) - X) ^ 2);\n+ loss_cur = loss_cur + 0.5 * lambda * (sum (U ^ 2 * row_nonzeros) + sum (V ^ 2 * col_nonzeros));\n+\n+ loss_dec = (loss_init - loss_cur) / loss_init;\n+ if( verbose )\n+ print (\"Train loss at iteration (\" + as.integer(it/2) + \"): \" + loss_cur + \" loss-dec \" + loss_dec);\n+ if( loss_dec >= 0 & loss_dec < thr | loss_init == 0 ) {\n+ if( verbose )\n+ print (\"----- ALS-CG converged after \" + as.integer(it/2) + \" iterations!\");\n+ converged = TRUE;\n+ }\n+ loss_init = loss_cur;\n+ }\n+ }\n+\n+ if(verbose) {\n+ if(check)\n+ print (\"----- Final train loss: \" + loss_init + \" -----\");\n+ if(!converged )\n+ print (\"Max iteration achieved but not converged!\");\n+ }\n+\n+ V = t(V);\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/alsDS.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+#\n+# Alternating-Least-Squares (ALS) algorithm using a direct solve method for\n+# individual least squares problems (reg=\"L2\"). This script computes an\n+# approximate factorization of a low-rank matrix V into two matrices L and R.\n+# Matrices L and R are computed by minimizing a loss function (with regularization).\n+#\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# V String --- Location to read the input matrix V to be factorized\n+# L String --- Location to write the factor matrix L\n+# R String --- Location to write the factor matrix R\n+# rank Int 10 Rank of the factorization\n+# lambda Double 0.000001 Regularization parameter, no regularization if 0.0\n+# maxi Int 50 Maximum number of iterations\n+# check Boolean FALSE Check for convergence after every iteration, i.e., updating L and R once\n+# thr Double 0.0001 Assuming check is set to TRUE, the algorithm stops and convergence is declared\n+# if the decrease in loss in any two consecutive iterations falls below this threshold;\n+# if check is FALSE thr is ignored\n+# ---------------------------------------------------------------------------------------------\n+# OUTPUT:\n+# 1- An m x r matrix L, where r is the factorization rank\n+# 2- An r x n matrix R\n+#\n+\n+m_alsDS = function(Matrix[Double] X, Integer rank = 10, Double lambda = 0.000001,\n+ Integer maxi = 50, Boolean check = FALSE, Double thr = 0.0001, Boolean verbose = TRUE)\n+ return (Matrix[Double] U, Matrix[Double] V)\n+{\n+ r = rank;\n+ max_iter = maxi;\n+\n+ # check the input matrix V, if some rows or columns contain only zeros remove them from V\n+ X_nonzero_ind = X != 0;\n+ row_nonzeros = rowSums (X_nonzero_ind);\n+ col_nonzeros = t (colSums (X_nonzero_ind));\n+ orig_nonzero_rows_ind = row_nonzeros != 0;\n+ orig_nonzero_cols_ind = col_nonzeros != 0;\n+ num_zero_rows = nrow (X) - sum (orig_nonzero_rows_ind);\n+ num_zero_cols = ncol (X) - sum (orig_nonzero_cols_ind);\n+ if (num_zero_rows > 0) {\n+ if( verbose )\n+ print (\"Matrix X contains empty rows! These rows will be removed.\");\n+ X = removeEmpty (target = X, margin = \"rows\");\n+ }\n+ if (num_zero_cols > 0) {\n+ if( verbose )\n+ print (\"Matrix X contains empty columns! These columns will be removed.\");\n+ X = removeEmpty (target = X, margin = \"cols\");\n+ }\n+ if (num_zero_rows > 0 | num_zero_cols > 0) {\n+ if( verbose )\n+ print (\"Recomputing nonzero rows and columns!\");\n+ X_nonzero_ind = X != 0;\n+ row_nonzeros = rowSums (X_nonzero_ind);\n+ col_nonzeros = t (colSums (X_nonzero_ind));\n+ }\n+\n+ ###### MAIN PART ######\n+ m = nrow (X);\n+ n = ncol (X);\n+\n+ # initializing factor matrices\n+ U = rand (rows = m, cols = r, min = -0.5, max = 0.5);\n+ V = rand (rows = n, cols = r, min = -0.5, max = 0.5);\n+\n+ # initializing transformed matrices\n+ Xt = t(X);\n+\n+ # check for regularization\n+ if ( verbose )\n+ print (\"BEGIN ALS SCRIPT WITH NONZERO SQUARED LOSS + L2 WITH LAMBDA - \" + lambda);\n+\n+ loss_init = 0.0; # only used if check is TRUE\n+ if (check) {\n+ loss_init = sum (X_nonzero_ind * (X - (U %*% t(V)))^2)\n+ + lambda * (sum ((U^2) * row_nonzeros) + sum ((V^2) * col_nonzeros));\n+ if( verbose )\n+ print (\"----- Initial train loss: \" + loss_init + \" -----\");\n+ }\n+\n+ lambda_I = diag (matrix (lambda, rows = r, cols = 1));\n+ it = 0;\n+ converged = FALSE;\n+ while ((it < max_iter) & (!converged)) {\n+ it = it + 1;\n+ # keep V fixed and update U\n+ parfor (i in 1:m) {\n+ V_nonzero_ind = t(X[i,] != 0);\n+ V_nonzero = removeEmpty (target=V * V_nonzero_ind, margin=\"rows\");\n+ A1 = (t(V_nonzero) %*% V_nonzero) + (as.scalar(row_nonzeros[i,1]) * lambda_I); # coefficient matrix\n+ U[i,] = t(solve (A1, t(X[i,] %*% V)));\n+ }\n+\n+ # keep U fixed and update V\n+ parfor (j in 1:n) {\n+ U_nonzero_ind = t(Xt[j,] != 0)\n+ U_nonzero = removeEmpty (target=U * U_nonzero_ind, margin=\"rows\");\n+ A2 = (t(U_nonzero) %*% U_nonzero) + (as.scalar(col_nonzeros[j,1]) * lambda_I); # coefficient matrix\n+ V[j,] = t(solve (A2, t(Xt[j,] %*% U)));\n+ }\n+\n+ # check for convergence\n+ if (check) {\n+ loss_cur = sum (X_nonzero_ind * (X - (U %*% t(V)))^2)\n+ + lambda * (sum ((U^2) * row_nonzeros) + sum ((V^2) * col_nonzeros));\n+ loss_dec = (loss_init - loss_cur) / loss_init;\n+ if( verbose )\n+ print (\"Train loss at iteration (X) \" + it + \": \" + loss_cur + \" loss-dec \" + loss_dec);\n+ if (loss_dec >= 0 & loss_dec < thr | loss_init == 0) {\n+ if( verbose )\n+ print (\"----- ALS converged after \" + it + \" iterations!\");\n+ converged = TRUE;\n+ }\n+ loss_init = loss_cur;\n+ }\n+ } # end of while loop\n+\n+ if(verbose) {\n+ if(check)\n+ print (\"----- Final train loss: \" + loss_init + \" -----\");\n+ if(!converged )\n+ print (\"Max iteration achieved but not converged!\");\n+ }\n+\n+ # inject 0s in U if original X had empty rows\n+ if (num_zero_rows > 0)\n+ U = removeEmpty (target = diag (orig_nonzero_rows_ind), margin = \"cols\") %*% U;\n+ # inject 0s in V if original X had empty rows\n+ if (num_zero_cols > 0)\n+ V = removeEmpty (target = diag (orig_nonzero_cols_ind), margin = \"cols\") %*% V;\n+ V = t(V);\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -40,6 +40,9 @@ public enum Builtins {\n//builtin functions\nABS(\"abs\", false),\nACOS(\"acos\", false),\n+ ALS(\"als\", true),\n+ ALS_CG(\"alsCG\", true),\n+ ALS_DS(\"alsDS\", true),\nASIN(\"asin\", false),\nATAN(\"atan\", false),\nAVG_POOL(\"avg_pool\", false),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/TestUtils.java",
"new_path": "src/test/java/org/apache/sysds/test/TestUtils.java",
"diff": "@@ -2339,14 +2339,15 @@ public class TestUtils\n* @return computed result\n*/\npublic static double[][] performMatrixMultiplication(double[][] a, double[][] b) {\n- int rows = a.length;\n- int cols = b[0].length;\n- double[][] result = new double[rows][cols];\n+ int m = a.length;\n+ int n = a[0].length;\n+ int l = b[0].length;\n+ double[][] result = new double[m][l];\n- for (int i = 0; i < rows; i++) {\n- for (int j = 0; j < cols; j++) {\n+ for (int i = 0; i < m; i++) {\n+ for (int j = 0; j < l; j++) {\ndouble value = 0;\n- for (int k = 0; k < a[i].length; k++) {\n+ for (int k = 0; k < n; k++) {\nvalue += (a[i][k] * b[k][j]);\n}\nresult[i][j] = value;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinALSTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.runtime.matrix.data.MatrixValue;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.ArrayList;\n+import java.util.HashMap;\n+import java.util.List;\n+\n+public class BuiltinALSTest extends AutomatedTestBase {\n+\n+ private final static String TEST_NAME = \"als\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinALSTest.class.getSimpleName() + \"/\";\n+\n+ private final static double eps = 0.00001;\n+ private final static int rows = 6;\n+ private final static int cols = 6;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME,new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,new String[]{\"B\"}));\n+ }\n+\n+ @Test\n+ public void testALSCG() {\n+ runtestALS(\"alsCG\");\n+ }\n+\n+ @Test\n+ public void testALSDS() {\n+ runtestALS(\"alsDS\");\n+ }\n+\n+ @Test\n+ public void testALS() {\n+ runtestALS(\"als\");\n+ }\n+\n+ private void runtestALS(String alg) {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ List<String> proArgs = new ArrayList<>();\n+ proArgs.add(\"-args\");\n+ proArgs.add(input(\"X\"));\n+ proArgs.add(alg);\n+ proArgs.add(output(\"U\"));\n+ proArgs.add(output(\"V\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+\n+ double[][] X = {\n+ {7,1,1,2,2,1},{7,2,2,3,2,1},\n+ {7,3,1,4,1,1},{7,4,2,5,3,1},\n+ {7,5,3,6,5,1}, {7,6,5,1,4,1}};\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+\n+ //compare expected results\n+ HashMap<MatrixValue.CellIndex, Double> matrixV = readDMLMatrixFromHDFS(\"V\");\n+ HashMap<MatrixValue.CellIndex, Double> matrixU = readDMLMatrixFromHDFS(\"U\");\n+ double[][] doubleV = TestUtils.convertHashMapToDoubleArray(matrixV);\n+ double[][] doubleU = TestUtils.convertHashMapToDoubleArray(matrixU);\n+ double[][] result = TestUtils.performMatrixMultiplication(doubleU, doubleV);\n+\n+ TestUtils.compareMatrices(X, result, rows, cols, eps);\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/als.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+alg = $2\n+\n+if( alg == \"alsCG\" )\n+ [U, V] = alsCG(X=X)\n+else if( alg == \"alsDS\" )\n+ [U, V] = alsDS(X=X)\n+else if( alg == \"als\" )\n+ [U, V] = als(X=X)\n+\n+write(U, $3)\n+write(V, $4)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2680] New ALS built-in functions (als, alsCG, alsDS)
AMLS project SS2020.
Closes #1070. |
49,723 | 10.10.2020 19:38:10 | -7,200 | e929457e2f1850e6f1c2b5c490523f9526e51be5 | [SYSTEMDS-2682/3] New Lasso and PPCA built-in functions
AMLS project SS2020.
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/lasso.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Builtin function for the SpaRSA algorithm to perform lasso regression\n+# (SpaRSA .. Sparse Reconstruction by Separable Approximation)\n+#\n+# INPUTS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X Double --- input feature matrix\n+# y Double --- matrix Y columns of the design matrix\n+# tol Double 1e-15 target convergence tolerance\n+# M Integer 5 history length\n+# tau Double 1 regularization component\n+# maxi Integer 100 maximum number of iterations until convergence\n+# ---------------------------------------------------------------------------------------------\n+# OUTPUTS\n+# w Double --- model matrix\n+\n+\n+m_lasso = function(Matrix[Double] X, Matrix[Double] y, Double tol = 10^(-15),\n+ Integer M = 5, Double tau = 1, Integer maxi = 100, Boolean verbose = TRUE)\n+ return(Matrix[Double] w)\n+{\n+ n = nrow(X)\n+ m = ncol(X)\n+\n+ #constants\n+ eta = 2\n+ sigma = 0.01\n+ alpha_min = 10^(-30)\n+ alpha_max = 10^(30)\n+\n+ #init\n+ alpha = 1\n+ w = Rand(rows=m, cols=1, min=0, max=1, pdf=\"uniform\")\n+ history = -1e30 * matrix(1, rows=M, cols=1)\n+\n+ r = X %*% w - y\n+ g = t(X) %*% r\n+ obj = 0.5 * sum(r*r) + tau*sum(abs(w))\n+\n+ if( verbose )\n+ print(\"Initial OBJ=\" + obj)\n+\n+ history[M,1] = obj\n+\n+ inactive_set = matrix(1, rows=m, cols=1)\n+ iter = 0\n+ continue = TRUE\n+ while(iter < maxiter & continue) {\n+ dw = matrix(0, rows=m, cols=1)\n+ dg = matrix(0, rows=m, cols=1)\n+ relChangeObj = -1.0\n+\n+ inner_iter = 0\n+ inner_continue = TRUE\n+ inner_maxiter = 100\n+ while(inner_iter < inner_maxiter & inner_continue) {\n+ u = w - g/alpha\n+ lambda = tau/alpha\n+\n+ wnew = sign(u) * (abs(u) - lambda) * ((abs(u) - lambda) > 0)\n+ dw = wnew - w\n+ dw2 = sum(dw*dw)\n+\n+ r = X %*% wnew - y\n+ gnew = t(X) %*% r\n+ objnew = 0.5 * sum(r*r) + tau*sum(abs(wnew))\n+ obj_threshold = max(history) - 0.5*sigma*alpha*dw2\n+\n+ if(objnew <= obj_threshold) {\n+ w = wnew\n+ dg = gnew - g\n+ g = gnew\n+ inner_continue = FALSE\n+\n+ history[1:(M-1),] = history[2:M,]\n+ history[M,1] = objnew\n+ relChangeObj = abs(objnew - obj)/obj\n+ obj = objnew\n+ }\n+ else\n+ alpha = eta*alpha\n+\n+ inner_iter = inner_iter + 1\n+ }\n+\n+ if(inner_continue)\n+ print(\"Inner loop did not converge\")\n+\n+ alphanew = sum(dw*dg)/sum(dw*dw)\n+ alpha = max(alpha_min, min(alpha_max, alphanew))\n+\n+ old_inactive_set = inactive_set\n+ inactive_set = w != 0\n+ diff = sum(abs(old_inactive_set - inactive_set))\n+\n+ if(diff == 0 & relChangeObj < tol)\n+ continue = FALSE\n+\n+ num_inactive = sum(w != 0)\n+ if( verbose )\n+ print(\"ITER=\" + iter + \" OBJ=\" + obj + \" relative change=\" + relChangeObj + \" num_inactive=\" + num_inactive)\n+ iter = iter + 1\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/ppca.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# This script performs Probabilistic Principal Component Analysis (PCA) on the given input data.\n+# It is based on paper: sPCA: Scalable Principal Component Analysis for Big Data on Distributed\n+# Platforms. Tarek Elgamal et.al.\n+\n+# INPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# X Matrix --- n x m input feature matrix\n+# k Integer --- indicates dimension of the new vector space constructed from eigen vectors\n+# maxi Integer --- maximum number of iterations until convergence\n+# tolobj Double 0.00001 objective function tolerance value to stop ppca algorithm\n+# tolrecerr Double 0.02 reconstruction error tolerance value to stop the algorithm\n+# verbose Boolen TRUE verbose debug output\n+# ---------------------------------------------------------------------------------------------\n+# OUTPUT PARAMETERS:\n+# ---------------------------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ---------------------------------------------------------------------------------------------\n+# Xout Matrix --- Output feature matrix with K columns\n+# Mout Matrix --- Output dominant eigen vectors (can be used for projections)\n+\n+\n+m_ppca = function(Matrix[Double] X, Integer K=2, Integer maxi = 10,\n+ Double tolobj = 0.00001, Double tolrecerr = 0.02, Boolean verbose = TRUE)\n+ return(Matrix[Double] Xout, Matrix[Double] Mout)\n+{\n+ n = nrow(X);\n+ m = ncol(X);\n+\n+ #initializing principal components matrix\n+ C = rand(rows=m, cols=K, pdf=\"normal\");\n+ ss = rand(rows=1, cols=1, pdf=\"normal\");\n+ ss = as.scalar(ss);\n+ ssPrev = ss;\n+\n+ # best selected principle components - with the lowest reconstruction error\n+ PC = C;\n+\n+ # initilizing reconstruction error\n+ RE = tolrecerr+1;\n+ REBest = RE;\n+\n+ Z = matrix(0,rows=1,cols=1);\n+\n+ #Objective function value\n+ ObjRelChng = tolobj+1;\n+\n+ # mean centered input matrix - dim -> [n,m]\n+ Xm = X - colMeans(X);\n+\n+ #I -> k x k\n+ ITMP = matrix(1,rows=K,cols=1);\n+ I = diag(ITMP);\n+\n+ i = 0;\n+ while (i < maxi & ObjRelChng > tolobj & RE > tolrecerr){\n+ #Estimation step - Covariance matrix\n+ #M -> k x k\n+ M = t(C) %*% C + I*ss;\n+\n+ #Auxilary matrix with n latent variables\n+ # Z -> n x k\n+ Z = Xm %*% (C %*% inv(M));\n+\n+ #ZtZ -> k x k\n+ ZtZ = t(Z) %*% Z + inv(M)*ss;\n+\n+ #XtZ -> m x k\n+ XtZ = t(Xm) %*% Z;\n+\n+ #Maximization step\n+ #C -> m x k\n+ ZtZ_sum = sum(ZtZ); #+n*inv(M));\n+ C = XtZ/ZtZ_sum;\n+\n+ #ss2 -> 1 x 1\n+ ss2 = trace(ZtZ * (t(C) %*% C));\n+\n+ #ss3 -> 1 x 1\n+ ss3 = sum((Z %*% t(C)) %*% t(Xm));\n+\n+ #Frobenius norm of reconstruction error -> Euclidean norm\n+ #Fn -> 1 x 1\n+ Fn = sum(Xm*Xm);\n+\n+ #ss -> 1 x 1\n+ ss = (Fn + ss2 - 2*ss3)/(n*m);\n+\n+ #calculating objective function relative change\n+ ObjRelChng = abs(1 - ss/ssPrev);\n+ #print(\"Objective Relative Change: \" + ObjRelChng + \", Objective: \" + ss);\n+\n+ #Reconstruction error\n+ R = ((Z %*% t(C)) - Xm);\n+\n+ #calculate the error\n+ #TODO rethink calculation of reconstruction error ....\n+ #1-Norm of reconstruction error - a big dense matrix\n+ #RE -> n x m\n+ RE = abs(sum(R)/sum(Xm));\n+ if (RE < REBest){\n+ PC = C;\n+ REBest = RE;\n+ }\n+ #print(\"ss: \" + ss +\" = Fn( \"+ Fn +\" ) + ss2( \" + ss2 +\" ) - 2*ss3( \" + ss3 + \" ), Reconstruction Error: \" + RE);\n+\n+ ssPrev = ss;\n+ i = i+1;\n+ }\n+ if( verbose )\n+ print(\"Objective Relative Change: \" + ObjRelChng);\n+ if( verbose )\n+ print (\"Number of iterations: \" + i + \", Reconstruction Err: \" + REBest);\n+\n+ # reconstructs data\n+ # RD -> n x k\n+ Xout = X %*% PC;\n+\n+ # calculate eigenvalues - principle component variance\n+ RDMean = colMeans(Xout);\n+ V = t(colMeans(Xout^2) - (RDMean^2));\n+\n+ # sorting eigenvalues and eigenvectors in decreasing order\n+ V_decr_idx = order(target=V,by=1,decreasing=TRUE,index.return=TRUE);\n+ VF_decr = table(seq(1,nrow(V)),V_decr_idx);\n+ Mout = PC %*% VF_decr; # vectors (values via VF_decr %*% V)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -120,6 +120,7 @@ public enum Builtins {\nISINF(\"is.infinite\", false),\nKMEANS(\"kmeans\", true),\nL2SVM(\"l2svm\", true),\n+ LASSO(\"lasso\", true),\nLENGTH(\"length\", false),\nLINEAGE(\"lineage\", false),\nLIST(\"list\", false), //note: builtin and parbuiltin\n@@ -155,6 +156,7 @@ public enum Builtins {\nOUTLIER_IQR(\"outlierByIQR\", true),\nPCA(\"pca\", true),\nPNMF(\"pnmf\", true),\n+ PPCA(\"ppca\", true),\nPPRED(\"ppred\", false),\nPROD(\"prod\", false),\nQR(\"qr\", false, ReturnType.MULTI_RETURN),\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinLassoTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import java.util.ArrayList;\n+import java.util.List;\n+\n+public class BuiltinLassoTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"lasso\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + BuiltinLassoTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 100;\n+ private final static int cols = 10;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[]{\"B\"}));\n+ }\n+\n+ @Test\n+ public void testLasso() {\n+ runLassoTest();\n+ }\n+\n+ private void runLassoTest() {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ List<String> proArgs = new ArrayList<>();\n+\n+ proArgs.add(\"-args\");\n+ proArgs.add(input(\"X\"));\n+ proArgs.add(input(\"y\"));\n+ proArgs.add(output(\"w\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, 0.8, -1);\n+ double[][] y = getRandomMatrix(rows, 1, 0, 1, 0.8, -1);\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+ writeInputMatrixWithMTD(\"y\", y, true);\n+\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+ MatrixCharacteristics mc = readDMLMetaDataFile(\"w\");\n+ Assert.assertEquals(cols, mc.getRows());\n+ Assert.assertEquals(1, mc.getCols());\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinPPCATest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import java.util.ArrayList;\n+import java.util.List;\n+\n+public class BuiltinPPCATest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"ppca\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + BuiltinPPCATest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 200;\n+ private final static int cols = 20;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[]{\"PC\",\"V\"}));\n+ }\n+\n+ @Test\n+ public void testPpca4() {\n+ runPPCATest(4);\n+ }\n+\n+ @Test\n+ public void testPpca16() {\n+ runPPCATest(16);\n+ }\n+\n+ private void runPPCATest(int k) {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ List<String> proArgs = new ArrayList<>();\n+\n+ proArgs.add(\"-args\");\n+ proArgs.add(input(\"X\"));\n+ proArgs.add(String.valueOf(k));\n+ proArgs.add(output(\"PC\"));\n+ proArgs.add(output(\"V\"));\n+ programArgs = proArgs.toArray(new String[proArgs.size()]);\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, 0.8, -1);\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+\n+ runTest(true, EXCEPTION_NOT_EXPECTED, null, -1);\n+ MatrixCharacteristics mc = readDMLMetaDataFile(\"PC\");\n+ Assert.assertEquals(rows, mc.getRows());\n+ Assert.assertEquals(k, mc.getCols());\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/lasso.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+y = read($2)\n+w = lasso(X = X, y = y)\n+write(w, $3)\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/ppca.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+k = $2;\n+[PC, V] = ppca(X=X, K=k)\n+write(PC, $3)\n+write(V, $4)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2682/3] New Lasso and PPCA built-in functions
AMLS project SS2020.
Closes #1071. |
49,700 | 14.10.2020 13:10:25 | -7,200 | 76a79629db740572a0f7af116fce5afddc9f4f28 | Move Privacy Post-Processing To Be Before Lineage Cache | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ProgramBlock.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/ProgramBlock.java",
"diff": "@@ -45,6 +45,7 @@ import org.apache.sysds.runtime.instructions.cp.StringObject;\nimport org.apache.sysds.runtime.lineage.LineageCache;\nimport org.apache.sysds.runtime.lineage.LineageCacheConfig.ReuseCacheType;\nimport org.apache.sysds.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysds.runtime.privacy.propagation.PrivacyPropagator;\nimport org.apache.sysds.utils.Statistics;\nimport java.util.ArrayList;\n@@ -260,6 +261,9 @@ public abstract class ProgramBlock implements ParseInfo\n}\n}\n+ // propagate input privacy constraints to output\n+ PrivacyPropagator.postProcessInstruction(tmp, ec);\n+\n// optional trace information (instruction and runtime)\nif( LOG.isTraceEnabled() ) {\nlong t1 = System.nanoTime();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -225,6 +225,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_hdfsFileName = that._hdfsFileName;\n_hdfsFileExists = that._hdfsFileExists;\n_gpuObjects = that._gpuObjects;\n+ _privacyConstraint = that._privacyConstraint;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/Instruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/Instruction.java",
"diff": "@@ -27,7 +27,6 @@ import org.apache.sysds.parser.DataIdentifier;\nimport org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysds.runtime.matrix.operators.Operator;\nimport org.apache.sysds.runtime.privacy.PrivacyConstraint;\n-import org.apache.sysds.runtime.privacy.propagation.PrivacyPropagator;\npublic abstract class Instruction\n{\n@@ -251,7 +250,5 @@ public abstract class Instruction\n*\n* @param ec execution context\n*/\n- public void postprocessInstruction(ExecutionContext ec) {\n- PrivacyPropagator.postProcessInstruction(this, ec);\n- }\n+ public void postprocessInstruction(ExecutionContext ec) {}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/privacy/propagation/PrivacyPropagator.java",
"new_path": "src/main/java/org/apache/sysds/runtime/privacy/propagation/PrivacyPropagator.java",
"diff": "@@ -150,6 +150,13 @@ public class PrivacyPropagator\nreturn mergedPrivacyConstraint;\n}\n+ /**\n+ * Propagate privacy constraints from input to output CPOperands\n+ * in case the privacy constraints of the input are activated.\n+ * @param inst instruction for which the privacy constraints are propagated\n+ * @param ec execution context\n+ * @return instruction with propagated privacy constraints (usually the same instance as the input inst)\n+ */\npublic static Instruction preprocessInstruction(Instruction inst, ExecutionContext ec){\nswitch ( inst.getType() ){\ncase CONTROL_PROGRAM:\n@@ -284,6 +291,7 @@ public class PrivacyPropagator\nMatrixBlock input2 = ec.getMatrixInput(inst.input2.getName());\nPropagator propagator = new MatrixMultiplicationPropagatorPrivateFirst(input1, privacyConstraint1, input2, privacyConstraint2);\nmergedPrivacyConstraint = propagator.propagate();\n+ ec.releaseMatrixInput(inst.input1.getName(), inst.input2.getName());\n}\nelse {\nmergedPrivacyConstraint = mergeNary(new PrivacyConstraint[]{privacyConstraint1,privacyConstraint2},\n@@ -562,13 +570,20 @@ public class PrivacyPropagator\n}\n}\n+ /**\n+ * Propagate privacy constraints to output variables\n+ * based on privacy constraint of CPOperand output in instruction\n+ * which has been set during privacy propagation preprocessing.\n+ * @param inst instruction for which privacy constraints are propagated\n+ * @param ec execution context\n+ */\npublic static void postProcessInstruction(Instruction inst, ExecutionContext ec){\n// if inst has output\nList<CPOperand> instOutputs = getOutputOperands(inst);\nif (!instOutputs.isEmpty()){\nfor ( CPOperand output : instOutputs ){\nPrivacyConstraint outputPrivacyConstraint = output.getPrivacyConstraint();\n- if ( privacyConstraintActivated(outputPrivacyConstraint) )\n+ if ( privacyConstraintActivated(outputPrivacyConstraint) || (outputPrivacyConstraint != null && outputPrivacyConstraint.hasFineGrainedConstraints()))\nsetOutputPrivacyConstraint(ec, outputPrivacyConstraint, output.getName());\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/privacy/PrivacyLineageTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.privacy;\n+\n+import org.apache.sysds.parser.DataExpression;\n+import org.apache.sysds.runtime.meta.MatrixCharacteristics;\n+import org.apache.sysds.runtime.privacy.PrivacyConstraint;\n+import org.apache.sysds.runtime.privacy.finegrained.DataRange;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+public class PrivacyLineageTest extends AutomatedTestBase {\n+\n+ private static final String TEST_DIR = \"functions/privacy/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + PrivacyLineageTest.class.getSimpleName() + \"/\";\n+\n+ @Override public void setUp() {\n+ addTestConfiguration(\"LineageReuse\",\n+ new TestConfiguration(TEST_CLASS_DIR, \"PrivacyLineageTest\", new String[]{\"c\"}));\n+ }\n+\n+ @Test\n+ public void propagatePrivacyWithLineageFullReuseTest() {\n+ propagationWithLineage(PrivacyConstraint.PrivacyLevel.PrivateAggregation);\n+ }\n+\n+ private void propagationWithLineage(PrivacyConstraint.PrivacyLevel privacyLevel) {\n+\n+ TestConfiguration config = availableTestConfigurations.get(\"LineageReuse\");\n+ loadTestConfiguration(config);\n+ fullDMLScriptName = SCRIPT_DIR + TEST_DIR + config.getTestScript() + \".dml\";\n+\n+ int n = 20;\n+ int m = 20;\n+ double[][] a = getRandomMatrix(m, n, -1, 1, 1, -1);\n+ int k = 20;\n+ double[][] b = getRandomMatrix(n, k, -1, 1, 1, -1);\n+\n+ PrivacyConstraint privacyConstraint = new PrivacyConstraint(privacyLevel);\n+ privacyConstraint.getFineGrainedPrivacy().put(new DataRange(new long[]{0,0}, new long[]{4,4}), PrivacyConstraint.PrivacyLevel.Private);\n+ MatrixCharacteristics aCharacteristics = new MatrixCharacteristics(m, n, k, k);\n+ MatrixCharacteristics bCharacteristics = new MatrixCharacteristics(n, k, k, k);\n+\n+ writeInputMatrixWithMTD(\"A\", a, false, aCharacteristics, privacyConstraint);\n+ writeInputMatrixWithMTD(\"B\", b, false, bCharacteristics);\n+\n+ programArgs = new String[]{\"-lineage\", \"reuse_full\", \"-nvargs\",\n+ \"A=\" + input(\"A\"), \"B=\" + input(\"B\"), \"C=\" + output(\"C\")};\n+\n+ runTest(true,false,null,-1);\n+\n+ finegrainedAssertions();\n+\n+ programArgs = new String[]{\"-nvargs\",\n+ \"A=\" + input(\"A\"), \"B=\" + input(\"B\"), \"C=\" + output(\"C\")};\n+ runTest(true,false,null,-1);\n+\n+ finegrainedAssertions();\n+ }\n+\n+ private void finegrainedAssertions(){\n+ String outputFineGrained = readDMLMetaDataValueCatchException(\"C\", OUTPUT_DIR, DataExpression.FINE_GRAINED_PRIVACY);\n+ Assert.assertEquals(\n+ \"{\\\"Private\\\":[[[0,0],[0,19]],[[1,0],[1,19]],[[2,0],[2,19]],[[3,0],[3,19]],[[4,0],[4,19]]],\\\"PrivateAggregation\\\":[]}\",\n+ outputFineGrained);\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/privacy/PrivacyLineageTest.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($A)\n+B = read($B)\n+C = A %*% B;\n+for (i in 1:10)\n+ C = A %*% B;\n+write(C, $C);\n+\n"
}
] | Java | Apache License 2.0 | apache/systemds | Move Privacy Post-Processing To Be Before Lineage Cache |
49,704 | 18.10.2020 12:40:46 | -7,200 | 0dc27cfa8572d20abd697797440da45c5c261e2c | [MINOR] Fix runtime plan cost estimator (createvar instruction)
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/hops/cost/CostEstimator.java",
"new_path": "src/main/java/org/apache/sysds/hops/cost/CostEstimator.java",
"diff": "@@ -215,7 +215,7 @@ public abstract class CostEstimator\nlong rlen = Long.parseLong(parts[6]);\nlong clen = Long.parseLong(parts[7]);\nint blen = Integer.parseInt(parts[8]);\n- long nnz = Long.parseLong(parts[10]);\n+ long nnz = Long.parseLong(parts[9]);\nVarStats vs = new VarStats(rlen, clen, blen, nnz, false);\nstats.put(varname, vs);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix runtime plan cost estimator (createvar instruction)
Closes #1079. |
49,706 | 18.10.2020 19:12:41 | -7,200 | 0ea51487153fc7bfa64d7fcd77c16fa300222030 | [MINOR] Fix sparse transpose self multiply | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixMult.java",
"new_path": "src/main/java/org/apache/sysds/runtime/matrix/data/LibMatrixMult.java",
"diff": "@@ -1932,7 +1932,7 @@ public class LibMatrixMult\nif( alen == n ) { //dense row\nfor( int i=rl; i<ru; i++ ) {\nvectMultiplyAdd(avals[i], avals,\n- c.values(i), i, c.pos(i), n-i);\n+ c.values(i), i, c.pos(i) + i, n-i);\n}\n}\nelse { //non-full sparse row\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix sparse transpose self multiply |
49,706 | 19.10.2020 13:33:25 | -7,200 | 3560a1e0a632e3c1f712d68392342648a787003c | [MINOR] Install guide for MacOS
closes | [
{
"change_type": "MODIFY",
"old_path": "docs/site/install.md",
"new_path": "docs/site/install.md",
"diff": "@@ -23,11 +23,17 @@ limitations under the License.\nThis guide helps in the install and setup of SystemDS from source code.\n+- [Windows Guide](#Windows)\n+- [Ubuntu/Linux Guide](#Ubuntu%2020.04)\n+- [Mac](#Mac)\n+\n## Windows\nTODO\n-## Build from source on Ubuntu 20.04\n+---\n+\n+## Ubuntu 20.04\n### Java and Maven\n@@ -72,6 +78,54 @@ sudo add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-\nsudo apt install r-base\n```\n+See [Build the project](#Build%20the%20project) to compile the code from here.\n+\n+---\n+\n+## MAC\n+\n+Prerequisite install homebrew on the device.\n+\n+```bash\n+# To allow relative paths:\n+brew install coreutils\n+# To install open jdk 8.\n+brew tap adoptopenjdk/openjdk\n+brew cask install adoptopenjdk8\n+# Install maven to enable compilation of SystemDS.\n+brew install maven\n+```\n+\n+Then afterwards verify the install:\n+\n+```bash\n+java --version\n+mvn --version\n+```\n+\n+This should print something like:\n+\n+```bash\n+Java version: 1.8.0_242, vendor: AdoptOpenJDK, runtime: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre\n+Default locale: ru_UA, platform encoding: UTF-8\n+OS name: \"mac os x\", version: \"10.15.5\", arch: \"x86_64\", family: \"mac\"\n+\n+Apache Maven 3.6.3 (cecedd343002696d0abb50b32b541b8a6ba2883f)\n+Maven home: /usr/local/Cellar/maven/3.6.3_1/libexec\n+```\n+\n+Note that if you have multiple __java__ versions installed then you have to change the used version to 8, on __both java and javadoc__. This is done by setting the environment variable JAVA_HOME to the install path of open JDK 8 :\n+\n+``` bash\n+export JAVA_HOME=`/usr/libexec/java_home -v 1.8`\n+```\n+\n+For running all tests [r-base](https://cran.r-project.org/bin/macosx/) has to be installed as well since this is used as a secondary system to verify the correctness of our code, but it is not a requirement to enable building the project.\n+\n+See [Build the project](#Build%20the%20project) to compile the code from here.\n+\n+---\n+\n## Build the project\nTo compile the project use:\n@@ -80,8 +134,6 @@ To compile the project use:\nmvn package -P distribution\n```\n-After some time it should return with:\n-\n```bash\n[INFO] ------------------------------------------------------------------------\n[INFO] BUILD SUCCESS\n"
},
{
"change_type": "DELETE",
"old_path": "docs/site/run_issues.md",
"new_path": null,
"diff": "-Error: Could not find or load main class org.apache.sysds.api.DMLScript\n-\n-Solution for macOS: Install `realpath` with Homebrew\n-```bash\n-brew install coreutils\n-```\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Install guide for MacOS
closes #1076 |
49,706 | 19.10.2020 14:13:10 | -7,200 | 47c52f185bb193a8d20a6a04e00a16e1881378de | [MINOR] Remove debug printing in python tests | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/examples/tutorials/test_mnist.py",
"new_path": "src/main/python/tests/examples/tutorials/test_mnist.py",
"diff": "@@ -104,7 +104,7 @@ class Test_DMLScript(unittest.TestCase):\nYt = Matrix(self.sds, self.d.get_test_labels()[:test_count])\nYt = Yt + 1.0\n- [_, _, acc] = multiLogRegPredict(Xt, bias, Yt).compute(verbose=True)\n+ [_, _, acc] = multiLogRegPredict(Xt, bias, Yt).compute(verbose=False)\nself.assertGreater(acc, 70)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/matrix/test_print.py",
"new_path": "src/main/python/tests/matrix/test_print.py",
"diff": "@@ -46,11 +46,11 @@ class TestPrint(unittest.TestCase):\ndef test_print_01(self):\nMatrix(self.sds, np.array([1])).to_string().print().compute()\n- self.assertEquals('1.000',self.sds.get_stdout()[0])\n+ self.assertEqual('1.000',self.sds.get_stdout()[0])\ndef test_print_02(self):\nself.sds.scalar(1).print().compute()\n- self.assertEquals('1', self.sds.get_stdout()[0])\n+ self.assertEqual('1', self.sds.get_stdout()[0])\nif __name__ == \"__main__\":\nunittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove debug printing in python tests |
49,706 | 20.10.2020 15:08:22 | -7,200 | c436af04f5ff28604ff2a72cea46a0736084a50d | [MINOR] Fix links install guide | [
{
"change_type": "MODIFY",
"old_path": "docs/site/install.md",
"new_path": "docs/site/install.md",
"diff": "@@ -23,9 +23,9 @@ limitations under the License.\nThis guide helps in the install and setup of SystemDS from source code.\n-- [Windows Guide](#Windows)\n-- [Ubuntu/Linux Guide](#Ubuntu%2020.04)\n-- [Mac](#Mac)\n+- [Windows Guide](#windows)\n+- [Ubuntu/Linux Guide](#ubuntu%2020.04)\n+- [Mac](#mac)\n## Windows\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix links install guide |
49,706 | 20.10.2020 15:16:14 | -7,200 | 87d3a497c4eb08b4048ed07219882b92c8bae7cc | [MINOR] Second fix for install guide | [
{
"change_type": "MODIFY",
"old_path": "docs/site/install.md",
"new_path": "docs/site/install.md",
"diff": "@@ -24,8 +24,8 @@ limitations under the License.\nThis guide helps in the install and setup of SystemDS from source code.\n- [Windows Guide](#windows)\n-- [Ubuntu/Linux Guide](#ubuntu%2020.04)\n-- [Mac](#mac)\n+- [Ubuntu/Linux Guide](#ubuntu-2004)\n+- [Mac Guide](#mac)\n## Windows\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Second fix for install guide |
49,706 | 23.10.2020 11:43:45 | -7,200 | bc3b4f59c3f4e8fb99fcd3b9d8de8bb3fda3a85f | [MINOR] Python Left hand side ops
This commit add the left hand side ops to enable ops like:
3 > M
3 * M
furthermore this commit fixes a :bug: in transpose that did not set
shape correctly, and the same for matrix multiply. | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "@@ -180,38 +180,73 @@ class OperationNode(DAGNode):\ndef __add__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '+', [self, other], shape=self.shape)\n+ # Left hand side\n+ def __radd__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '+', [other, self], shape=self.shape)\n+\ndef __sub__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '-', [self, other], shape=self.shape)\n+ # Left hand side\n+ def __rsub__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '-', [other, self], shape=self.shape)\n+\ndef __mul__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '*', [self, other], shape=self.shape)\n+ def __rmul__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '*', [other, self], shape=self.shape)\n+\ndef __truediv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '/', [self, other], shape=self.shape)\n+ def __rtruediv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '/', [other, self], shape=self.shape)\n+\ndef __floordiv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '//', [self, other], shape=self.shape)\n+ def __rfloordiv__(self, other: VALID_ARITHMETIC_TYPES) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '//', [other, self], shape=self.shape)\n+\ndef __lt__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '<', [self, other], shape=self.shape)\n+ def __rlt__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '<', [other, self], shape=self.shape)\n+\ndef __le__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '<=', [self, other], shape=self.shape)\n+ def __rle__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '<=', [other, self], shape=self.shape)\n+\ndef __gt__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '>', [self, other], shape=self.shape)\n+ def __rgt__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '>', [other, self], shape=self.shape)\n+\ndef __ge__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '>=', [self, other], shape=self.shape)\n+ def __rge__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '>=', [other, self], shape=self.shape)\n+\ndef __eq__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '==', [self, other], shape=self.shape)\n+ def __req__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '==', [other, self], shape=self.shape)\n+\ndef __ne__(self, other) -> 'OperationNode':\nreturn OperationNode(self.sds_context, '!=', [self, other], shape=self.shape)\n+ def __rne__(self, other) -> 'OperationNode':\n+ return OperationNode(self.sds_context, '!=', [other, self], shape=self.shape)\n+\ndef __matmul__(self, other: 'OperationNode') -> 'OperationNode':\n- return OperationNode(self.sds_context, '%*%', [self, other], shape=(self.shape[0], other.shape[0]))\n+ return OperationNode(self.sds_context, '%*%', [self, other], shape=(self.shape[0], other.shape[1]))\ndef sum(self, axis: int = None) -> 'OperationNode':\n\"\"\"Calculate sum of matrix.\n@@ -331,7 +366,7 @@ class OperationNode(DAGNode):\n\"\"\"\nreturn OperationNode(self.sds_context, 'tanh', [self], shape=self.shape)\n- def moment(self, moment, weights: DAGNode = None) -> 'OperationNode':\n+ def moment(self, moment: int, weights: DAGNode = None) -> 'OperationNode':\n# TODO write tests\nself._check_matrix_op()\nunnamed_inputs = [self]\n@@ -374,7 +409,7 @@ class OperationNode(DAGNode):\n\"\"\"\nself._check_matrix_op()\n- return OperationNode(self.sds_context, 'rev', [self])\n+ return OperationNode(self.sds_context, 'rev', [self], shape=self.shape)\ndef order(self, by: int = 1, decreasing: bool = False,\nindex_return: bool = False) -> 'OperationNode':\n@@ -396,7 +431,7 @@ class OperationNode(DAGNode):\nnamed_input_nodes = {'target': self, 'by': by, 'decreasing': str(decreasing).upper(),\n'index.return': str(index_return).upper()}\n- return OperationNode(self.sds_context, 'order', [], named_input_nodes=named_input_nodes)\n+ return OperationNode(self.sds_context, 'order', [], named_input_nodes=named_input_nodes, shape=self.shape)\ndef t(self) -> 'OperationNode':\n\"\"\" Transposes the input matrix\n@@ -405,7 +440,11 @@ class OperationNode(DAGNode):\n\"\"\"\nself._check_matrix_op()\n- return OperationNode(self.sds_context, 't', [self])\n+ if(len(self.shape) > 1):\n+ shape = (self.shape[1], self.shape[0])\n+ else:\n+ shape = (0, self.shape[0])\n+ return OperationNode(self.sds_context, 't', [self], shape=shape)\ndef cholesky(self, safe: bool = False) -> 'OperationNode':\n\"\"\" Computes the Cholesky decomposition of a symmetric, positive definite matrix\n@@ -429,7 +468,7 @@ class OperationNode(DAGNode):\nif not np.allclose(self._np_array, self._np_array.transpose()):\nraise ValueError(\"Matrix is not symmetric\")\n- return OperationNode(self.sds_context, 'cholesky', [self])\n+ return OperationNode(self.sds_context, 'cholesky', [self], shape=self.shape)\ndef to_one_hot(self, num_classes: int) -> 'OperationNode':\n\"\"\" OneHot encode the matrix.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/matrix/test_binary_op.py",
"new_path": "src/main/python/tests/matrix/test_binary_op.py",
"diff": "@@ -63,31 +63,57 @@ class TestBinaryOp(unittest.TestCase):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) / Matrix(self.sds, m2)).compute(), m1 / m2))\n- # TODO arithmetic with numpy rhs\n-\n- # TODO arithmetic with numpy lhs\n-\n- def test_plus3(self):\n+ def test_plus3_rhs(self):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) + s).compute(), m1 + s))\n- def test_minus3(self):\n+ def test_plus3_lhs(self):\n+ self.assertTrue(np.allclose(\n+ (s + Matrix(self.sds, m1) ).compute(), s + m1))\n+\n+ def test_minus3_rhs(self):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) - s).compute(), m1 - s))\n- def test_mul3(self):\n+ def test_minus3_lhs(self):\n+ self.assertTrue(np.allclose(\n+ (s - Matrix(self.sds, m1)).compute(), s - m1 ))\n+\n+ def test_mul3_rhs(self):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) * s).compute(), m1 * s))\n- def test_div3(self):\n+ def test_mul3_lhs(self):\n+ self.assertTrue(np.allclose(\n+ (s * Matrix(self.sds, m1)).compute(), s * m1))\n+\n+ def test_div3_rhs(self):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) / s).compute(), m1 / s))\n+ def test_div3_lhs(self):\n+ self.assertTrue(np.allclose(\n+ (s / Matrix(self.sds, m1) ).compute(), s / m1))\n+\ndef test_matmul(self):\nself.assertTrue(np.allclose(\n(Matrix(self.sds, m1) @ Matrix(self.sds, m2)).compute(), m1.dot(m2)))\n- # TODO arithmetic with scala lhs\n+ def test_matmul_chain(self):\n+ m3 = np.ones((m2.shape[1], 10), dtype=np.uint8)\n+ m = Matrix(self.sds, m1) @ Matrix(self.sds, m2) @ Matrix(\n+ self.sds, m3)\n+ res = (m).compute()\n+ np_res = m1.dot(m2).dot(m3)\n+ self.assertTrue(np.allclose(res, np_res))\n+ self.assertTrue(np.allclose(m.shape, np_res.shape))\n+\n+ def test_matmul_self(self):\n+ m = Matrix(self.sds, m1).t() @ Matrix(self.sds, m1)\n+ res = (m).compute()\n+ np_res = np.transpose(m1).dot(m1)\n+ self.assertTrue(np.allclose(res, np_res))\n+ self.assertTrue(np.allclose(m.shape, np_res.shape))\ndef test_lt(self):\nself.assertTrue(np.allclose(\n@@ -109,6 +135,25 @@ class TestBinaryOp(unittest.TestCase):\nself.assertTrue(np.allclose(\nMatrix(self.sds, m1).abs().compute(), np.abs(m1)))\n+ def test_lt3_rhs(self):\n+ self.assertTrue(np.allclose(\n+ (Matrix(self.sds, m1) <3).compute(), m1 < 3))\n+\n+ def test_lt3_lhs(self):\n+ self.assertTrue(np.allclose(\n+ (3 < Matrix(self.sds, m1)).compute(), 3 < m1 ))\n+\n+ def test_gt3_rhs(self):\n+ self.assertTrue(np.allclose(\n+ (3 > Matrix(self.sds, m1)).compute(), 3 > m1 ))\n+\n+ def test_le3_rhs(self):\n+ self.assertTrue(np.allclose(\n+ (3<= Matrix(self.sds, m1) ).compute(), 3 <= m1 ))\n+\n+ def test_ge3_rhs(self):\n+ self.assertTrue(np.allclose(\n+ (3 >= Matrix(self.sds, m1)).compute(), 3>= m1))\nif __name__ == \"__main__\":\nunittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Python Left hand side ops
This commit add the left hand side ops to enable ops like:
3 > M
3 * M
furthermore this commit fixes a :bug: in transpose that did not set
shape correctly, and the same for matrix multiply. |
49,706 | 23.10.2020 11:49:34 | -7,200 | d7dab73c2f4af9156dbf672d4165f06e59988d88 | [MINOR] Fix python pca args | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "@@ -113,18 +113,6 @@ def pca(x: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNo\nraise ValueError(\n\"Invalid number of dimensions in PCA, number must be integer above 0\")\n- if 'scale' in kwargs.keys():\n- if kwargs.get('scale') == True:\n- kwargs.set('scale', \"TRUE\")\n- elif kwargs.get('scale' == False):\n- kwargs.set('scale', \"FALSE\")\n-\n- if 'center' in kwargs.keys():\n- if kwargs.get('center') == True:\n- kwargs.set('center', \"TRUE\")\n- elif kwargs.get('center' == False):\n- kwargs.set('center', \"FALSE\")\n-\nparams_dict = {'X': x}\nparams_dict.update(kwargs)\nreturn OperationNode(x.sds_context, 'pca', named_input_nodes=params_dict, output_type=OutputType.LIST, number_of_outputs=2)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix python pca args |
49,706 | 23.10.2020 13:08:22 | -7,200 | bcac553e00173fcd8ec222146b476909b138d54e | KMeans Predict builtin
This commit include tests in dml and the addition of
the predict in python. | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/builtin/kmeansPredict.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-----------------------------------------------------------------------------\n+\n+# Builtin function that does predictions based on a set of centroids provided.\n+#\n+# INPUT PARAMETERS:\n+# ----------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ----------------------------------------------------------------------------\n+# X Double --- The input Matrix to do KMeans on.\n+# C Double --- The input Centroids to map X onto.\n+#\n+# RETURN VALUES\n+# ----------------------------------------------------------------------------\n+# NAME TYPE DEFAULT MEANING\n+# ----------------------------------------------------------------------------\n+# Y String \"Y.mtx\" The mapping of records to centroids\n+# ----------------------------------------------------------------------------\n+\n+\n+m_kmeansPredict = function(Matrix[Double] X, Matrix[Double] C)\n+ return (Matrix[Double] Y)\n+{\n+\n+ D = -2 * (X %*% t(C)) + t(rowSums (C ^ 2));\n+ P = (D <= rowMins (D));\n+ aggr_P = t(cumsum (t(P)));\n+ Y = rowSums (aggr_P == 0) + 1\n+\n+}\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"new_path": "src/main/java/org/apache/sysds/common/Builtins.java",
"diff": "@@ -119,6 +119,7 @@ public enum Builtins {\nISNAN(\"is.nan\", false),\nISINF(\"is.infinite\", false),\nKMEANS(\"kmeans\", true),\n+ KMEANSPREDICT(\"kmeansPredict\", true),\nL2SVM(\"l2svm\", true),\nLASSO(\"lasso\", true),\nLENGTH(\"length\", false),\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/algorithm.py",
"new_path": "src/main/python/systemds/operator/algorithm.py",
"diff": "@@ -92,6 +92,21 @@ def kmeans(x: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Operatio\nparams_dict.update(kwargs)\nreturn OperationNode(x.sds_context, 'kmeans', named_input_nodes=params_dict, output_type=OutputType.LIST, number_of_outputs=2)\n+def kmeansPredict(X: OperationNode, C: OperationNode) -> OperationNode:\n+ \"\"\"\n+ Perform Kmeans Predict, note that the Ids returned are 1 indexed.\n+\n+ :param X: The matrix to classify.\n+ :param Y: The Clusters to use for classification into.\n+ :return: `OperationNode` containing a matrix of classifications of Id's of specific clusters in C.\n+ \"\"\"\n+ X._check_matrix_op()\n+ C._check_matrix_op()\n+\n+ params_dict = {'X' : X, 'C' : C}\n+ return OperationNode(X.sds_context, 'kmeansPredict', named_input_nodes=params_dict, output_type=OutputType.MATRIX, shape=(1, X.shape[0]))\n+\n+\ndef pca(x: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:\n\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/tests/algorithms/test_kmeans.py",
"new_path": "src/main/python/tests/algorithms/test_kmeans.py",
"diff": "@@ -24,7 +24,7 @@ import unittest\nimport numpy as np\nfrom systemds.context import SystemDSContext\nfrom systemds.matrix import Matrix\n-from systemds.operator.algorithm import kmeans\n+from systemds.operator.algorithm import kmeans, kmeansPredict\nclass TestKMeans(unittest.TestCase):\n@@ -59,6 +59,29 @@ class TestKMeans(unittest.TestCase):\ncorners.add(\"nn\")\nself.assertTrue(len(corners) == 4)\n+ def test_500x2(self):\n+ \"\"\"\n+ This test is based on statistics, that if we run kmeans, on a normal distributed dataset, centered around 0\n+ and use 4 clusters then they will be located in each one corner.\n+ This test uses the prediction builtin.\n+ \"\"\"\n+ features = self.generate_matrices_for_k_means((500, 2), seed=1304)\n+ [c, _] = kmeans(features, k=4).compute()\n+ C = Matrix(self.sds, c)\n+ elm = Matrix(self.sds, np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]]))\n+ res = kmeansPredict(elm, C).compute()\n+ corners = set()\n+ for x in res:\n+ if x == 1:\n+ corners.add(\"pp\")\n+ elif x == 2:\n+ corners.add(\"pn\")\n+ elif x == 3:\n+ corners.add(\"np\")\n+ else:\n+ corners.add(\"nn\")\n+ self.assertTrue(len(corners) == 4)\n+\ndef test_invalid_input_1(self):\nfeatures = Matrix(self.sds, np.array([]))\nwith self.assertRaises(ValueError) as context:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysds/test/functions/builtin/BuiltinKmeansPredictTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.test.functions.builtin;\n+\n+import java.util.HashMap;\n+\n+import org.apache.sysds.api.DMLScript;\n+import org.apache.sysds.common.Types;\n+import org.apache.sysds.hops.OptimizerUtils;\n+import org.apache.sysds.lops.LopProperties;\n+import org.apache.sysds.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysds.test.AutomatedTestBase;\n+import org.apache.sysds.test.TestConfiguration;\n+import org.apache.sysds.test.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+public class BuiltinKmeansPredictTest extends AutomatedTestBase {\n+ private final static String TEST_NAME = \"kmeansPredict\";\n+ private final static String TEST_DIR = \"functions/builtin/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + BuiltinKmeansPredictTest.class.getSimpleName() + \"/\";\n+ private final static double eps = 1e-10;\n+ private final static int rows = 1320;\n+ private final static int cols = 32;\n+ private final static double spSparse = 0.3;\n+ private final static double spDense = 0.7;\n+ private final static double max_iter = 50;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"res\"}));\n+ }\n+\n+ @Test\n+ public void testKMeansDenseBinSingleRewritesCP() {\n+ runKMeansTest(false, 2, 1, true, LopProperties.ExecType.CP);\n+ }\n+\n+ private void runKMeansTest(boolean sparse, int centroids, int runs, boolean rewrites,\n+ LopProperties.ExecType instType) {\n+ Types.ExecMode platformOld = setExecMode(instType);\n+\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+\n+ try {\n+ loadTestConfiguration(getTestConfiguration(TEST_NAME));\n+\n+ double sparsity = sparse ? spSparse : spDense;\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[] {\"-nvargs\", \"X=\" + input(\"X\"), \"res=\" + output(\"res\"), \"k=\" + centroids,\n+ \"runs=\" + runs, \"eps=\" + eps, \"max_iter=\" + max_iter};\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ // generate actual datasets\n+ double[][] X = getRandomMatrix(rows, cols, 0, 1, sparsity, 714);\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+\n+ runTest(null);\n+ HashMap<CellIndex, Double> res = readDMLScalarFromHDFS(\"res\");\n+ Assert.assertTrue(res.values().size() == 1);\n+ Assert.assertEquals(res.values().toArray()[0] , 1.);\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/builtin/kmeansPredict.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($X)\n+\n+[C, Y] = kmeans(X, $k, $runs, $max_iter, $eps, TRUE, 50)\n+Y_1 = kmeansPredict(X, C)\n+\n+res = mean(Y==Y_1)\n+write(res, $res)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2701] KMeans Predict builtin
This commit include tests in dml and the addition of
the predict in python. |
49,706 | 23.10.2020 13:58:35 | -7,200 | 1622e241340619916831568090d15144e0f39f94 | Row & Column Bind Python | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/operator/operation_node.py",
"new_path": "src/main/python/systemds/operator/operation_node.py",
"diff": "@@ -489,3 +489,33 @@ class OperationNode(DAGNode):\nnamed_input_nodes = {\"X\": self, \"numClasses\": num_classes}\nreturn OperationNode(self.sds_context, 'toOneHot', named_input_nodes=named_input_nodes, shape=(self.shape[0], num_classes))\n+\n+ def rbind(self, other) -> 'OperationNode':\n+ \"\"\"\n+ Row-wise matrix concatenation, by concatenating the second matrix as additional rows to the first matrix.\n+ :param: The other matrix to bind to the right hand side\n+ :return: The OperationNode containing the concatenated matrices.\n+ \"\"\"\n+\n+ self._check_matrix_op()\n+ other._check_matrix_op()\n+\n+ if self.shape[1] != other.shape[1]:\n+ raise ValueError(\"The input matrices to rbind does not have the same number of columns\")\n+\n+ return OperationNode(self.sds_context, 'rbind', [self, other], shape=(self.shape[0] + other.shape[0], self.shape[1]))\n+\n+ def cbind(self, other) -> 'OperationNode':\n+ \"\"\"\n+ Column-wise matrix concatenation, by concatenating the second matrix as additional columns to the first matrix.\n+ :param: The other matrix to bind to the right hand side.\n+ :return: The OperationNode containing the concatenated matrices.\n+ \"\"\"\n+\n+ self._check_matrix_op()\n+ other._check_matrix_op()\n+\n+ if self.shape[0] != other.shape[0]:\n+ raise ValueError(\"The input matrices to cbind does not have the same number of columns\")\n+\n+ return OperationNode(self.sds_context, 'cbind', [self, other], shape=(self.shape[0], self.shape[1] + other.shape[1]))\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/python/tests/matrix/test_r_c_bind.py",
"diff": "+# -------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+# -------------------------------------------------------------\n+\n+import math\n+import os\n+import random\n+import sys\n+import unittest\n+\n+import numpy as np\n+import scipy.stats as st\n+from systemds.context import SystemDSContext\n+from systemds.matrix import Matrix\n+\n+\n+class TestRBind(unittest.TestCase):\n+\n+ sds: SystemDSContext = None\n+\n+ @classmethod\n+ def setUpClass(cls):\n+ cls.sds = SystemDSContext()\n+\n+ @classmethod\n+ def tearDownClass(cls):\n+ cls.sds.close()\n+\n+ def test_r_bind(self):\n+ m1 = Matrix(self.sds, np.zeros((10, 1)))\n+ m2 = Matrix(self.sds, np.ones((10, 1)))\n+ res = m1.rbind(m2).compute()\n+ npres = np.vstack((np.zeros((10, 1)), np.ones((10, 1))))\n+ self.assertTrue(np.allclose(res, npres))\n+\n+ def test_r_bind_shape(self):\n+ m1 = Matrix(self.sds, np.zeros((10, 3)))\n+ m2 = Matrix(self.sds, np.ones((11, 3)))\n+ res = m1.rbind(m2).shape\n+ npres = np.vstack((np.zeros((10, 3)), np.ones((11, 3)))).shape\n+ self.assertTrue(np.allclose(res, npres))\n+\n+ def test_c_bind(self):\n+ m1 = Matrix(self.sds, np.zeros((10, 6)))\n+ m2 = Matrix(self.sds, np.ones((10, 7)))\n+ res = m1.cbind(m2).compute()\n+ npres = np.hstack((np.zeros((10, 6)), np.ones((10, 7))))\n+ self.assertTrue(np.allclose(res, npres))\n+\n+ def test_c_bind_shape(self):\n+ m1 = Matrix(self.sds, np.zeros((10, 3)))\n+ m2 = Matrix(self.sds, np.ones((10, 4)))\n+ res = m1.cbind(m2).shape\n+ npres = np.hstack((np.zeros((10, 3)), np.ones((10, 4)))).shape\n+ self.assertTrue(np.allclose(res, npres))\n+\n+\n+if __name__ == \"__main__\":\n+ unittest.main(exit=False)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2702] Row & Column Bind Python |
49,689 | 25.10.2020 10:55:49 | -3,600 | c05f8e2538ec2a05ccd99658f02a4beeb2823604 | [MINOR] Lineage trace and cache transform frame operations | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"diff": "@@ -444,7 +444,8 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nreturn Pair.of(output.getName(), new LineageItem(getOpcode(),\nLineageItemUtils.getLineage(ec, target, max, dir, cast, ignore)));\n}\n- else if (opcode.equalsIgnoreCase(\"transformdecode\")) {\n+ else if (opcode.equalsIgnoreCase(\"transformdecode\") ||\n+ opcode.equalsIgnoreCase(\"transformapply\")) {\nCPOperand target = getTargetOperand();\nCPOperand meta = getLiteral(\"meta\", ValueType.UNKNOWN, DataType.FRAME);\nCPOperand spec = getStringLiteral(\"spec\");\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -39,7 +39,8 @@ public class LineageCacheConfig\n\"rightIndex\", \"leftIndex\", \"groupedagg\", \"r'\", \"solve\", \"spoof\",\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n- \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\", \"uarimax\", \"qsort\", \"qpick\"\n+ \"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\", \"uarimax\", \"qsort\",\n+ \"qpick\", \"transformencode\", \"transformapply\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Lineage trace and cache transform frame operations |
49,689 | 25.10.2020 10:58:21 | -3,600 | a047fe5ad223f889de46373d851573fcf9123fba | Fix python distribution setup files | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/setup.py",
"new_path": "src/main/python/setup.py",
"diff": "@@ -37,7 +37,7 @@ ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split(\"-\")[0]\nREQUIRED_PACKAGES = [\n'numpy >= 1.8.2',\n- 'py4j >= 0.10.0',\n+ 'py4j >= 0.10.9',\n'jinja2 >= 2.11.2',\n'onnx >= 1.7.0',\n'requests >= 2.24.0'\n@@ -54,7 +54,7 @@ setup(\nlong_description=open('README.md').read(),\nurl='https://github.com/apache/systemds',\nauthor='SystemDS',\n- author_email='[email protected]',\n+ author_email='[email protected]',\npackages=find_packages(),\ninstall_requires=REQUIRED_PACKAGES,\ninclude_package_data=True,\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemds/project_info.py",
"new_path": "src/main/python/systemds/project_info.py",
"diff": "# via string substitutions using the maven-resources-plugin\n__project_group_id__ = 'org.apache.systemds'\n__project_artifact_id__ = 'systemds'\n-__project_version__ = '2.0.0-SNAPSHOT'\n+__project_version__ = '2.1.0-SNAPSHOT'\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMDS-2667] Fix python distribution setup files |
49,689 | 27.10.2020 11:07:23 | -3,600 | ad590ddebfa3328a4260c5320f957115cc203211 | [MINOR] Lineage tests cleanup
This patch removes unnecessary prints of lineage traces.
This also removes transformencode from reusable instruction list
as we don't support caching of frames yet. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"new_path": "src/main/java/org/apache/sysds/runtime/lineage/LineageCacheConfig.java",
"diff": "@@ -40,7 +40,7 @@ public class LineageCacheConfig\n\"uamean\", \"max\", \"min\", \"ifelse\", \"-\", \"sqrt\", \">\", \"uak+\", \"<=\",\n\"^\", \"uamax\", \"uark+\", \"uacmean\", \"eigen\", \"ctableexpand\", \"replace\",\n\"^2\", \"uack+\", \"tak+*\", \"uacsqk+\", \"uark+\", \"n+\", \"uarimax\", \"qsort\",\n- \"qpick\", \"transformencode\", \"transformapply\"\n+ \"qpick\", \"transformapply\"\n//TODO: Reuse everything.\n};\nprivate static String[] REUSE_OPCODES = new String[] {};\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceBuiltin.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceBuiltin.dml",
"diff": "@@ -24,9 +24,11 @@ X = rand(rows=$3, cols=$4, seed=7);\nR = X;\nfor(i in 1:7) {\nR = R + 1 / 2;\n- print(lineage(R));\n+ #print(lineage(R));\n+ linR = lineage(R);\n}\nR = R + X;\n-print(lineage(R));\n+#print(lineage(R));\n+linR = lineage(R);\nwrite(R, $2);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceFun1.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceFun1.dml",
"diff": "@@ -37,5 +37,5 @@ Y = X^2;\n[R, S] = foo(Y, X);\nR = R - S;\n-print(lineage(R));\n+#print(lineage(R));\nwrite(R, $2);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceFun2.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceFun2.dml",
"diff": "@@ -36,5 +36,5 @@ Y = X^2;\n[R, S] = foo(Y, X);\nR = R - S;\n-print(lineage(R));\n+#print(lineage(R));\nwrite(R, $2);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceParfor1.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceParfor1.dml",
"diff": "@@ -28,5 +28,5 @@ parfor(i in 1:ncol(X)) {\nR[,i] = rowSums(Xi)+colSums(Xi);\n}\n-print(lineage(R));\n+#print(lineage(R));\nwrite(R, $1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceParfor2.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceParfor2.dml",
"diff": "@@ -28,5 +28,5 @@ parfor(i in 1:ncol(X), check=0, opt=None, mode=REMOTE_SPARK) {\nR[,i] = rowSums(Xi)+colSums(Xi);\n}\n-print(lineage(R));\n+#print(lineage(R));\nwrite(R, $1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceParfor3.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceParfor3.dml",
"diff": "@@ -27,5 +27,5 @@ parfor(i in 1:ncol(X), check=0, opt=None, mode=REMOTE_SPARK) {\nR[1, i] = sum(Xi)\n}\n-print(lineage(R));\n+#print(lineage(R));\nwrite(R, $1);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/lineage/LineageTraceSpark5.dml",
"new_path": "src/test/scripts/functions/lineage/LineageTraceSpark5.dml",
"diff": "@@ -33,6 +33,6 @@ for (v in 1:10, check=0) { # parallelizable\nms[,(v-1)*3+1:v*3] = mv\n}\n-print(lineage(ms));\n+#print(lineage(ms));\nwrite(ms, $2);\nwrite(X, $3);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Lineage tests cleanup
This patch removes unnecessary prints of lineage traces.
This also removes transformencode from reusable instruction list
as we don't support caching of frames yet. |
49,706 | 29.10.2020 10:36:53 | -3,600 | 23cc33ab2f0dfa7cbb0db6b5087a5b255dea6bfa | [MINOR] Fix fed local write
This commit fixes the federated write, such that it does not
pull the data to master, if a write is called with the federated
file format.
(This commit still does not allow writing federated modified matrices) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -482,6 +482,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nif( _data==null && isEmpty(true) ) {\ntry {\nif( isFederated() ) {\n+ LOG.error(\"Federated pull all data\");\n_data = readBlobFromFederated( _fedMapping );\n//mark for initial local write despite read operation\n@@ -747,7 +748,6 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Export data \"+hashCode()+\" \"+fName);\nlong t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\n-\n//prevent concurrent modifications\nif ( !isAvailableToRead() )\nthrow new DMLRuntimeException(\"MatrixObject not available to read.\");\n@@ -783,6 +783,9 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n{\n// CASE 1: dirty in-mem matrix or pWrite w/ different format (write matrix to fname; load into memory if evicted)\n// a) get the matrix\n+ boolean federatedWrite = outputFormat.contains(\"federated\");\n+ if( ! federatedWrite){\n+\nif( isEmpty(true))\n{\n//read data from HDFS if required (never read before), this applies only to pWrite w/ different output formats\n@@ -793,8 +796,9 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_data = readBlobFromHDFS( _hdfsFileName );\nelse if( getRDDHandle() != null )\n_data = readBlobFromRDD( getRDDHandle(), new MutableBoolean() );\n- else\n+ else {\n_data = readBlobFromFederated( getFedMapping() );\n+ }\nsetDirty(false);\n}\n@@ -806,6 +810,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nif( _data == null )\ngetCache();\nacquire( false, _data==null ); //incl. read matrix if evicted\n+ }\n// b) write the matrix\ntry {\n@@ -818,6 +823,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nthrow new DMLRuntimeException(\"Export to \" + fName + \" failed.\", e);\n}\nfinally {\n+ if(!federatedWrite)\nrelease();\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/FEDInstructionUtils.java",
"diff": "@@ -36,6 +36,7 @@ import org.apache.sysds.runtime.instructions.cp.MultiReturnParameterizedBuiltinC\nimport org.apache.sysds.runtime.instructions.cp.ParameterizedBuiltinCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.ReorgCPInstruction;\nimport org.apache.sysds.runtime.instructions.cp.VariableCPInstruction;\n+import org.apache.sysds.runtime.instructions.cp.VariableCPInstruction.VariableOperationCode;\nimport org.apache.sysds.runtime.instructions.spark.AggregateUnarySPInstruction;\nimport org.apache.sysds.runtime.instructions.spark.AppendGAlignedSPInstruction;\nimport org.apache.sysds.runtime.instructions.spark.AppendGSPInstruction;\n@@ -126,6 +127,16 @@ public class FEDInstructionUtils {\nif( mo.isFederated() )\nfedinst = ReorgFEDInstruction.parseInstruction(rinst.getInstructionString());\n}\n+ else if(inst instanceof VariableCPInstruction ){\n+ VariableCPInstruction ins = (VariableCPInstruction) inst;\n+\n+ if(ins.getVariableOpcode() == VariableOperationCode.Write\n+ && ins.getInput3().getName().contains(\"federated\")){\n+ fedinst = VariableFEDInstruction.parseInstruction(ins);\n+ }\n+\n+ }\n+\n//set thread id for federated context management\nif( fedinst != null ) {\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/VariableFEDInstruction.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysds.runtime.instructions.fed;\n+\n+import org.apache.commons.lang3.tuple.Pair;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\n+import org.apache.sysds.runtime.DMLRuntimeException;\n+import org.apache.sysds.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysds.runtime.instructions.cp.VariableCPInstruction;\n+import org.apache.sysds.runtime.instructions.cp.VariableCPInstruction.VariableOperationCode;\n+import org.apache.sysds.runtime.lineage.LineageItem;\n+import org.apache.sysds.runtime.lineage.LineageTraceable;\n+\n+public class VariableFEDInstruction extends FEDInstruction implements LineageTraceable {\n+ private static final Log LOG = LogFactory.getLog(VariableFEDInstruction.class.getName());\n+\n+ private final VariableCPInstruction _in;\n+\n+ protected VariableFEDInstruction(VariableCPInstruction in) {\n+ super(null, in.getOperator(), in.getOpcode(), in.getInstructionString());\n+ _in = in;\n+ }\n+\n+ public static VariableFEDInstruction parseInstruction(VariableCPInstruction cpInstruction) {\n+ return new VariableFEDInstruction(cpInstruction);\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) {\n+ VariableOperationCode opcode = _in.getVariableOpcode();\n+ switch(opcode) {\n+\n+ case Write:\n+ processWriteInstruction(ec);\n+ break;\n+\n+ default:\n+ throw new DMLRuntimeException(\"Unsupported Opcode for federated Variable Instruction : \" + opcode);\n+ }\n+ }\n+\n+ private void processWriteInstruction(ExecutionContext ec) {\n+ LOG.error(\"processing write command federated\");\n+ // TODO Add write command to the federated site if the matrix has been modified\n+ // this has to be done while appending some string to the federated output file.\n+ // furthermore the outputted file on the federated sites path should be returned\n+ // the controller.\n+ _in.processInstruction(ec);\n+ }\n+\n+ @Override\n+ public Pair<String, LineageItem> getLineageItem(ExecutionContext ec) {\n+ return _in.getLineageItem(ec);\n+ }\n+\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/io/FederatedWriterTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/io/FederatedWriterTest.java",
"diff": "@@ -21,6 +21,8 @@ package org.apache.sysds.test.functions.federated.io;\nimport java.util.Arrays;\nimport java.util.Collection;\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\n@@ -36,7 +38,7 @@ import org.junit.runners.Parameterized;\[email protected]\npublic class FederatedWriterTest extends AutomatedTestBase {\n- // private static final Log LOG = LogFactory.getLog(FederatedWriterTest.class.getName());\n+ private static final Log LOG = LogFactory.getLog(FederatedWriterTest.class.getName());\nprivate final static String TEST_DIR = \"functions/federated/\";\nprivate final static String TEST_NAME = \"FederatedWriterTest\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FederatedWriterTest.class.getSimpleName() + \"/\";\n@@ -96,8 +98,10 @@ public class FederatedWriterTest extends AutomatedTestBase {\n// Run reader and write a federated json to enable the rest of the test\nfullDMLScriptName = SCRIPT_DIR + \"functions/federated/io/FederatedReaderTestCreate.dml\";\nprogramArgs = new String[] {\"-stats\", \"-explain\",\"-args\", input(\"X1\"), input(\"X2\"), port1 + \"\", port2 + \"\", input(\"X.json\")};\n- // String writer = runTest(null).toString();\n- runTest(null);\n+ String writer = runTest(null).toString();\n+ // runTest(null);\n+ LOG.error(writer);\n+ LOG.error(\"Writing Done\");\n// Run reference dml script with normal matrix\nfullDMLScriptName = SCRIPT_DIR + \"functions/federated/io/FederatedReaderTest.dml\";\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix fed local write
This commit fixes the federated write, such that it does not
pull the data to master, if a write is called with the federated
file format.
(This commit still does not allow writing federated modified matrices) |
49,706 | 28.10.2020 18:12:54 | -3,600 | c71a4a658009c2765b6111c99d67e31c4c0b6c74 | [MINOR] Add seed to kmeans | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/kmeans.dml",
"new_path": "scripts/builtin/kmeans.dml",
"diff": "m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer max_iter = 1000,\n- Double eps = 0.000001, Boolean is_verbose = FALSE, Integer avg_sample_size_per_centroid = 50)\n+ Double eps = 0.000001, Boolean is_verbose = FALSE, Integer avg_sample_size_per_centroid = 50,\n+ Integer seed = -1)\nreturn (Matrix[Double] C, Matrix[Double] Y)\n{\n+ if( is_verbose )\nprint (\"BEGIN K-MEANS SCRIPT\");\n+\nnum_records = nrow (X);\nnum_features = ncol (X);\nnum_centroids = k;\nnum_runs = runs;\n- if(is_verbose == TRUE)\n+ if(is_verbose)\nprint(\"dim X=\" + nrow(X) + \"x\" + ncol(X))\nsumXsq = sum (X ^ 2);\n# STEP 1: INITIALIZE CENTROIDS FOR ALL RUNS FROM DATA SAMPLES:\n+ if( is_verbose )\nprint (\"Taking data samples for initialization...\");\n[sample_maps, samples_vs_runs_map, sample_block_size] = get_sample_maps(\n@@ -69,7 +73,9 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\nX_samples = sample_maps %*% X;\nX_samples_sq_norms = rowSums (X_samples ^ 2);\n+ if( is_verbose )\nprint (\"Initializing the centroids for all runs...\");\n+\nAll_Centroids = matrix (0, num_runs * num_centroids, num_features);\n# We select centroids according to the k-Means++ heuristic applied to a sample of X\n@@ -86,7 +92,7 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\n# Select the i-th centroid in each sample as a random sample row id with\n# probability ~ min_distances:\n- random_row = rand(rows = 1, cols = num_runs);\n+ random_row = rand(rows = 1, cols = num_runs, seed = seed);\nthreshold_matrix = random_row * cdf_min_distances [sample_block_size, ];\ncentroid_ids = t(colSums (cdf_min_distances < threshold_matrix)) + 1;\n@@ -112,6 +118,7 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\nfinal_wcss = matrix (0, rows = num_runs, cols = 1);\nnum_iterations = matrix (0, rows = num_runs, cols = 1);\n+ if( is_verbose )\nprint (\"Performing k-means iterations for all runs...\");\nparfor (run_index in 1 : num_runs, check = 0)\n@@ -177,9 +184,12 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\ntermination_bitmap [, 1 : ncol(termination_bitmap_raw)] = termination_bitmap_raw;\ntermination_stats = colSums (termination_bitmap);\n+ if(is_verbose){\n+\nprint (\"Number of successful runs = \" + as.integer (as.scalar (termination_stats [1, 1])));\nprint (\"Number of incomplete runs = \" + as.integer (as.scalar (termination_stats [1, 2])));\nprint (\"Number of failed runs (with lost centroids) = \" + as.integer (as.scalar (termination_stats [1, 3])));\n+ }\nnum_successful_runs = as.scalar (termination_stats [1, 1]);\n@@ -193,6 +203,7 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\naggr_best_index_vector = cumsum (best_index_vector);\nbest_index = as.integer (sum (aggr_best_index_vector == 0) + 1);\n+ if(is_verbose)\nprint (\"Successful runs: Best run is \" + best_index + \" with Centroid WCSS = \" + best_wcss\n+ \"; Avg WCSS = \" + avg_wcss + \"; Worst WCSS = \" + worst_wcss);\n@@ -202,12 +213,16 @@ m_kmeans = function(Matrix[Double] X, Integer k = 10, Integer runs = 10, Integer\naggr_P = t(cumsum (t(P)));\nY = rowSums (aggr_P == 0) + 1\n- if(is_verbose == TRUE)\n+ if(is_verbose)\nprint(\"dim C=\" + nrow(C) + \"x\" + ncol(C) + \", dim Y=\" + nrow(Y) + \"x\" + ncol(Y))\n- print (\"DONE.\");\n+\n}\n- else\n- stop (\"No output is produced. Try increasing the number of iterations and/or runs.\");\n+ else{\n+ print (\"K-means: No output is produced. Try increasing the number of iterations and/or lower eps.\");\n+ C = matrix(0, num_centroids, num_records)\n+ Y = matrix(-1, 1, num_records)\n+ }\n+\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/kmeans.dml",
"new_path": "src/test/scripts/functions/builtin/kmeans.dml",
"diff": "#-------------------------------------------------------------\nX = read($X)\n-[C, Y] = kmeans(X, $k, $runs, $max_iter, $eps, TRUE, 50)\n+[C, Y] = kmeans(X, $k, $runs, $max_iter, $eps, TRUE, 50, 1845774737537)\nwrite(C, $C)\nwrite(Y, $Y)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Add seed to kmeans |
49,706 | 29.10.2020 15:59:08 | -3,600 | 966908b60c40861a2bec84607510a4163f75396f | [MINOR] Fix error in exportData
Fix error in logic of export data, introduces while making
federated write instruction.
This error resulted in null matrices, if the matrices were not fully
generated, which some of our tests exploit. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -482,7 +482,6 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nif( _data==null && isEmpty(true) ) {\ntry {\nif( isFederated() ) {\n- LOG.error(\"Federated pull all data\");\n_data = readBlobFromFederated( _fedMapping );\n//mark for initial local write despite read operation\n@@ -783,8 +782,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n{\n// CASE 1: dirty in-mem matrix or pWrite w/ different format (write matrix to fname; load into memory if evicted)\n// a) get the matrix\n- boolean federatedWrite = outputFormat.contains(\"federated\");\n- if( ! federatedWrite){\n+ boolean federatedWrite = (outputFormat != null ) && outputFormat.contains(\"federated\");\nif( isEmpty(true))\n{\n@@ -796,9 +794,8 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_data = readBlobFromHDFS( _hdfsFileName );\nelse if( getRDDHandle() != null )\n_data = readBlobFromRDD( getRDDHandle(), new MutableBoolean() );\n- else {\n+ else if(!federatedWrite)\n_data = readBlobFromFederated( getFedMapping() );\n- }\nsetDirty(false);\n}\n@@ -807,6 +804,8 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n}\n}\n//get object from cache\n+ if(!federatedWrite){\n+\nif( _data == null )\ngetCache();\nacquire( false, _data==null ); //incl. read matrix if evicted\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/VariableFEDInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/fed/VariableFEDInstruction.java",
"diff": "@@ -58,7 +58,7 @@ public class VariableFEDInstruction extends FEDInstruction implements LineageTra\n}\nprivate void processWriteInstruction(ExecutionContext ec) {\n- LOG.error(\"processing write command federated\");\n+ LOG.warn(\"Processing write command federated\");\n// TODO Add write command to the federated site if the matrix has been modified\n// this has to be done while appending some string to the federated output file.\n// furthermore the outputted file on the federated sites path should be returned\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysds/test/functions/federated/io/FederatedWriterTest.java",
"new_path": "src/test/java/org/apache/sysds/test/functions/federated/io/FederatedWriterTest.java",
"diff": "@@ -21,8 +21,6 @@ package org.apache.sysds.test.functions.federated.io;\nimport java.util.Arrays;\nimport java.util.Collection;\n-import org.apache.commons.logging.Log;\n-import org.apache.commons.logging.LogFactory;\nimport org.apache.sysds.api.DMLScript;\nimport org.apache.sysds.common.Types;\nimport org.apache.sysds.runtime.meta.MatrixCharacteristics;\n@@ -38,7 +36,7 @@ import org.junit.runners.Parameterized;\[email protected]\npublic class FederatedWriterTest extends AutomatedTestBase {\n- private static final Log LOG = LogFactory.getLog(FederatedWriterTest.class.getName());\n+ // private static final Log LOG = LogFactory.getLog(FederatedWriterTest.class.getName());\nprivate final static String TEST_DIR = \"functions/federated/\";\nprivate final static String TEST_NAME = \"FederatedWriterTest\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + FederatedWriterTest.class.getSimpleName() + \"/\";\n@@ -97,11 +95,12 @@ public class FederatedWriterTest extends AutomatedTestBase {\n// Run reader and write a federated json to enable the rest of the test\nfullDMLScriptName = SCRIPT_DIR + \"functions/federated/io/FederatedReaderTestCreate.dml\";\n- programArgs = new String[] {\"-stats\", \"-explain\",\"-args\", input(\"X1\"), input(\"X2\"), port1 + \"\", port2 + \"\", input(\"X.json\")};\n- String writer = runTest(null).toString();\n- // runTest(null);\n- LOG.error(writer);\n- LOG.error(\"Writing Done\");\n+ programArgs = new String[] {\"-stats\", \"-explain\", \"-args\", input(\"X1\"), input(\"X2\"), port1 + \"\", port2 + \"\",\n+ input(\"X.json\")};\n+ // String writer = runTest(null).toString();\n+ runTest(null);\n+ // LOG.error(writer);\n+ // LOG.error(\"Writing Done\");\n// Run reference dml script with normal matrix\nfullDMLScriptName = SCRIPT_DIR + \"functions/federated/io/FederatedReaderTest.dml\";\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix error in exportData
Fix error in logic of export data, introduces while making
federated write instruction.
This error resulted in null matrices, if the matrices were not fully
generated, which some of our tests exploit. |
49,706 | 29.10.2020 17:02:13 | -3,600 | 19266fdeffd622c7d10eaeae217d53791e87d9e3 | [MINOR] Fix federated write
Unfortunately the last fix made the previous fix break.
This commit should now finally make the write work for federated. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysds/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -784,7 +784,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n// a) get the matrix\nboolean federatedWrite = (outputFormat != null ) && outputFormat.contains(\"federated\");\n- if( isEmpty(true))\n+ if( isEmpty(true) && !federatedWrite)\n{\n//read data from HDFS if required (never read before), this applies only to pWrite w/ different output formats\n//note: for large rdd outputs, we compile dedicated writespinstructions (no need to handle this here)\n@@ -978,6 +978,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n// Federated read\nprotected T readBlobFromFederated(FederationMap fedMap) throws IOException {\n+ LOG.info(\"Pulling data from federated sites\");\nMetaDataFormat iimd = (MetaDataFormat) _metaData;\nDataCharacteristics dc = iimd.getDataCharacteristics();\nreturn readBlobFromFederated(fedMap, dc.getDims());\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix federated write
Unfortunately the last fix made the previous fix break.
This commit should now finally make the write work for federated. |
49,706 | 30.10.2020 11:40:16 | -3,600 | 4898c955d54bdb44be32e1f5d228f737fffadc88 | [DOCS] Kmeans update docs | [
{
"change_type": "MODIFY",
"old_path": "docs/site/builtins-reference.md",
"new_path": "docs/site/builtins-reference.md",
"diff": "@@ -568,7 +568,7 @@ The kmeans() implements the KMeans Clustering algorithm.\n### Usage\n```r\n-kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50)\n+kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50, seed = -1)\n```\n### Arguments\n@@ -581,6 +581,8 @@ kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = F\n| max_iter | Int | `100` |Max no. of iterations allowed |\n| eps | Double | `0.000001` | Tolerance (epsilon) for WCSS change ratio |\n| is_verbose | Boolean | FALSE | do not print per-iteration stats |\n+| avg_sample_size_per_centroid | int | 50 | Number of samples to make in the initialization |\n+| seed | int | -1 | The seed used for initial sampling. If set to -1 random seeds are selected. |\n### Returns\n@@ -593,7 +595,7 @@ kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = F\n```r\nX = rand (rows = 3972, cols = 972)\n-kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50)\n+kmeans(X = X, k = 20, runs = 10, max_iter = 5000, eps = 0.000001, is_verbose = FALSE, avg_sample_size_per_centroid = 50, seed = -1)\n```\n## `lm`-Function\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/kmeans.dml",
"new_path": "scripts/builtin/kmeans.dml",
"diff": "# eps Double 0.000001 Tolerance (epsilon) for WCSS change ratio\n# is_verbose Boolean FALSE do not print per-iteration stats\n# avg_sample_size_per_centroid Int 50 Average number of records per centroid in data samples\n+# seed Int -1 The seed used for initial sampling. If set to -1 random seeds are selected.\n#\n#\n# RETURN VALUES\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/kmeansPredict.dml",
"new_path": "src/test/scripts/functions/builtin/kmeansPredict.dml",
"diff": "X = read($X)\n-[C, Y] = kmeans(X, $k, $runs, $max_iter, $eps, TRUE, 50)\n+[C, Y] = kmeans(X, $k, $runs, $max_iter, $eps, TRUE, 50, 1324)\nY_1 = kmeansPredict(X, C)\nres = mean(Y==Y_1)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/scripts/functions/builtin/smote.dml",
"new_path": "src/test/scripts/functions/builtin/smote.dml",
"diff": "@@ -30,7 +30,7 @@ T = read($T);\nA_B = rbind(A, B)\nn = nrow(A_B)\n# group data into k=2 clusters\n-[C, Y] = kmeans(rbind(A_B, T), 2, 10, 100, 0.000001, FALSE, 50)\n+[C, Y] = kmeans(rbind(A_B, T), 2, 10, 100, 0.000001, FALSE, 50, 314)\n# check if the instances of A and B fall in same cluster\ncheck = matrix(as.scalar(Y[1,1]), n, 1)\ntestSum = sum(check - Y[1:n,])\n"
}
] | Java | Apache License 2.0 | apache/systemds | [DOCS] Kmeans update docs |
49,689 | 30.10.2020 14:40:51 | -3,600 | 079a57b3f844a712b0ff529e166e0ad1c713ba01 | [MINOR] Fix lineage tracing of SAMPLE | [
{
"change_type": "MODIFY",
"old_path": "scripts/builtin/smote.dml",
"new_path": "scripts/builtin/smote.dml",
"diff": "@@ -68,7 +68,11 @@ return (Matrix[Double] Y) {\nsynthetic_samples = matrix(0, iterLim*ncol(knn_index), ncol(X))\n# shuffle the nn indexes\n- rand_index = ifelse(k < iterLim, sample(k, iterLim, TRUE, 42), sample(k, iterLim, 42))\n+ #rand_index = ifelse(k < iterLim, sample(k, iterLim, TRUE, 42), sample(k, iterLim, 42))\n+ if (k < iterLim)\n+ rand_index = sample(k, iterLim, TRUE, 42);\n+ else\n+ rand_index = sample(k, iterLim, 42);\nwhile(iter < iterLim)\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"new_path": "src/main/java/org/apache/sysds/runtime/instructions/cp/DataGenCPInstruction.java",
"diff": "@@ -409,8 +409,11 @@ public class DataGenCPInstruction extends UnaryCPInstruction {\n}\n//replace output variable name with a placeholder\ntmpInstStr = InstructionUtils.replaceOperandName(tmpInstStr);\n- tmpInstStr = replaceNonLiteral(tmpInstStr, rows, 2, ec);\n- tmpInstStr = replaceNonLiteral(tmpInstStr, cols, 3, ec);\n+ tmpInstStr = method.name().equalsIgnoreCase(\"rand\") ?\n+ replaceNonLiteral(tmpInstStr, rows, 2, ec) :\n+ replaceNonLiteral(tmpInstStr, rows, 3, ec);\n+ tmpInstStr = method.name().equalsIgnoreCase(\"rand\") ?\n+ replaceNonLiteral(tmpInstStr, cols, 3, ec) : tmpInstStr;\nbreak;\n}\ncase SEQ: {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix lineage tracing of SAMPLE |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.