author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
49,738
08.11.2017 17:41:35
28,800
f366c46960aac412a862c20e07e5f844b58b05a7
New rewrite for order operation chains This patch introduces a new rewrite for merging subsequent order operations (data, scalar order-by column, and with consistent descending configuration and single consumers) into a single order operation with multiple order-by columns.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -67,6 +67,7 @@ import org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\n+import org.apache.sysml.runtime.instructions.cp.StringInitCPInstruction;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -199,6 +200,17 @@ public class HopRewriteUtils\n&& getDoubleValueSafe((LiteralOp)hop)==val);\n}\n+ public static boolean isLiteralOfValue( Hop hop, boolean val ) {\n+ try {\n+ return (hop instanceof LiteralOp\n+ && (hop.getValueType()==ValueType.BOOLEAN)\n+ && ((LiteralOp)hop).getBooleanValue()==val);\n+ }\n+ catch(HopsException ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ }\n+\npublic static ScalarObject getScalarObject( LiteralOp op )\n{\ntry {\n@@ -481,6 +493,32 @@ public class HopRewriteUtils\nreturn datagen;\n}\n+ public static Hop createDataGenOpByVal( ArrayList<LiteralOp> values, long rows, long cols )\n+ throws HopsException\n+ {\n+ StringBuilder sb = new StringBuilder();\n+ for(LiteralOp lit : values) {\n+ if(sb.length()>0)\n+ sb.append(StringInitCPInstruction.DELIM);\n+ sb.append(lit.getStringValue());\n+ }\n+ LiteralOp str = new LiteralOp(sb.toString());\n+\n+ HashMap<String, Hop> params = new HashMap<>();\n+ params.put(DataExpression.RAND_ROWS, new LiteralOp(rows));\n+ params.put(DataExpression.RAND_COLS, new LiteralOp(cols));\n+ params.put(DataExpression.RAND_MIN, str);\n+ params.put(DataExpression.RAND_MAX, str);\n+ params.put(DataExpression.RAND_SEED, new LiteralOp(DataGenOp.UNSPECIFIED_SEED));\n+\n+ Hop datagen = new DataGenOp(DataGenMethod.SINIT, new DataIdentifier(\"tmp\"), params);\n+ int blksz = ConfigurationManager.getBlocksize();\n+ datagen.setOutputBlocksizes(blksz, blksz);\n+ copyLineNumbers(values.get(0), datagen);\n+\n+ return datagen;\n+ }\n+\npublic static boolean isDataGenOp(Hop hop, DataGenMethod... ops) {\nreturn (hop instanceof DataGenOp\n&& ArrayUtils.contains(ops, ((DataGenOp)hop).getOp()));\n@@ -506,14 +544,21 @@ public class HopRewriteUtils\nreturn createReorg(input, ReOrgOp.TRANSPOSE);\n}\n- public static ReorgOp createReorg(Hop input, ReOrgOp rop)\n- {\n- ReorgOp transpose = new ReorgOp(input.getName(), input.getDataType(), input.getValueType(), rop, input);\n- transpose.setOutputBlocksizes(input.getRowsInBlock(), input.getColsInBlock());\n- copyLineNumbers(input, transpose);\n- transpose.refreshSizeInformation();\n+ public static ReorgOp createReorg(Hop input, ReOrgOp rop) {\n+ ReorgOp reorg = new ReorgOp(input.getName(), input.getDataType(), input.getValueType(), rop, input);\n+ reorg.setOutputBlocksizes(input.getRowsInBlock(), input.getColsInBlock());\n+ copyLineNumbers(input, reorg);\n+ reorg.refreshSizeInformation();\n+ return reorg;\n+ }\n- return transpose;\n+ public static ReorgOp createReorg(ArrayList<Hop> inputs, ReOrgOp rop) {\n+ Hop main = inputs.get(0);\n+ ReorgOp reorg = new ReorgOp(main.getName(), main.getDataType(), main.getValueType(), rop, inputs);\n+ reorg.setOutputBlocksizes(main.getRowsInBlock(), main.getColsInBlock());\n+ copyLineNumbers(main, reorg);\n+ reorg.refreshSizeInformation();\n+ return reorg;\n}\npublic static UnaryOp createUnary(Hop input, OpOp1 type)\n@@ -831,8 +876,17 @@ public class HopRewriteUtils\nreturn ret;\n}\n+ public static boolean isReorg(Hop hop, ReOrgOp type) {\n+ return hop instanceof ReorgOp && ((ReorgOp)hop).getOp()==type;\n+ }\n+\n+ public static boolean isReorg(Hop hop, ReOrgOp... types) {\n+ return ( hop instanceof ReorgOp\n+ && ArrayUtils.contains(types, ((ReorgOp) hop).getOp()));\n+ }\n+\npublic static boolean isTransposeOperation(Hop hop) {\n- return (hop instanceof ReorgOp && ((ReorgOp)hop).getOp()==ReOrgOp.TRANSPOSE);\n+ return isReorg(hop, ReOrgOp.TRANSPOSE);\n}\npublic static boolean isTransposeOperation(Hop hop, int maxParents) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -171,6 +171,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nhi = simplifySlicedMatrixMult(hop, hi, i); //e.g., (X%*%Y)[1,1] -> X[1,] %*% Y[,1];\nhi = simplifyConstantSort(hop, hi, i); //e.g., order(matrix())->matrix/seq;\nhi = simplifyOrderedSort(hop, hi, i); //e.g., order(matrix())->seq;\n+ hi = fuseOrderOperationChain(hi); //e.g., order(order(X,2),1) -> order(X,(12))\nhi = removeUnnecessaryReorgOperation(hop, hi, i); //e.g., t(t(X))->X; rev(rev(X))->X potentially introduced by other rewrites\nhi = simplifyTransposeAggBinBinaryChains(hop, hi, i);//e.g., t(t(A)%*%t(B)+C) -> B%*%A+t(C)\nhi = removeUnnecessaryMinus(hop, hi, i); //e.g., -(-X)->X; potentially introduced by simplify binary or dyn rewrites\n@@ -1481,6 +1482,63 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nreturn hi;\n}\n+ private static Hop fuseOrderOperationChain(Hop hi)\n+ throws HopsException\n+ {\n+ //order(order(X,2),1) -> order(X, (12)),\n+ if( HopRewriteUtils.isReorg(hi, ReOrgOp.SORT)\n+ && hi.getInput().get(1) instanceof LiteralOp //scalar by\n+ && hi.getInput().get(2) instanceof LiteralOp //scalar desc\n+ && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), false) ) //not ixret\n+ {\n+ LiteralOp by = (LiteralOp) hi.getInput().get(1);\n+ boolean desc = HopRewriteUtils.getBooleanValue((LiteralOp)hi.getInput().get(2));\n+\n+ //find chain of order operations with same desc/ixret configuration and single consumers\n+ ArrayList<LiteralOp> byList = new ArrayList<LiteralOp>();\n+ byList.add(by);\n+ Hop input = hi.getInput().get(0);\n+ while( HopRewriteUtils.isReorg(input, ReOrgOp.SORT)\n+ && input.getInput().get(1) instanceof LiteralOp //scalar by\n+ && HopRewriteUtils.isLiteralOfValue(input.getInput().get(2), desc)\n+ && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), false)\n+ && input.getParent().size() == 1 )\n+ {\n+ byList.add((LiteralOp)input.getInput().get(1));\n+ input = input.getInput().get(0);\n+ }\n+\n+ //merge order chain if at least two instances\n+ if( byList.size() >= 2 ) {\n+ //create new order operations\n+ ArrayList<Hop> inputs = new ArrayList<>();\n+ inputs.add(input);\n+ inputs.add(HopRewriteUtils.createDataGenOpByVal(byList, 1, byList.size()));\n+ inputs.add(new LiteralOp(desc));\n+ inputs.add(new LiteralOp(false));\n+ Hop hnew = HopRewriteUtils.createReorg(inputs, ReOrgOp.SORT);\n+\n+ //cleanup references recursively\n+ Hop current = hi;\n+ while(current != input ) {\n+ Hop tmp = current.getInput().get(0);\n+ HopRewriteUtils.removeAllChildReferences(current);\n+ current = tmp;\n+ }\n+\n+ //rewire all parents (avoid anomalies with replicated datagen)\n+ List<Hop> parents = new ArrayList<>(hi.getParent());\n+ for( Hop p : parents )\n+ HopRewriteUtils.replaceChildReference(p, hi, hnew);\n+\n+ hi = hnew;\n+ LOG.debug(\"Applied fuseOrderOperationChain (line \"+hi.getBeginLine()+\").\");\n+ }\n+ }\n+\n+ return hi;\n+ }\n+\n/**\n* Patterns: t(t(A)%*%t(B)+C) -> B%*%A+t(C)\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/StringInitCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/StringInitCPInstruction.java", "diff": "@@ -30,7 +30,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\npublic class StringInitCPInstruction extends UnaryCPInstruction {\n- private static final String DELIM = \" \";\n+ public static final String DELIM = \" \";\nprivate final long _rlen;\nprivate final long _clen;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/reorg/MultipleOrderByColsTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/reorg/MultipleOrderByColsTest.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.test.integration.functions.reorg;\nimport java.util.HashMap;\n+import org.junit.Assert;\nimport org.junit.Test;\nimport org.apache.sysml.api.DMLScript;\n@@ -30,10 +31,12 @@ import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\nimport org.apache.sysml.test.utils.TestUtils;\n+import org.apache.sysml.utils.Statistics;\npublic class MultipleOrderByColsTest extends AutomatedTestBase\n{\nprivate final static String TEST_NAME1 = \"OrderMultiBy\";\n+ private final static String TEST_NAME2 = \"OrderMultiBy2\";\nprivate final static String TEST_DIR = \"functions/reorg/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + MultipleOrderByColsTest.class.getSimpleName() + \"/\";\n@@ -48,6 +51,7 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\npublic void setUp() {\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1,new String[]{\"B\"}));\n+ addTestConfiguration(TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2,new String[]{\"B\"}));\n}\n@Test\n@@ -90,6 +94,26 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\nrunOrderTest(TEST_NAME1, true, true, true, ExecType.CP);\n}\n+ @Test\n+ public void testOrder2DenseAscDataCP() {\n+ runOrderTest(TEST_NAME2, false, false, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOrder2DenseDescDataCP() {\n+ runOrderTest(TEST_NAME2, false, true, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOrder2SparseAscDataCP() {\n+ runOrderTest(TEST_NAME2, true, false, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOrder2SparseDescDataCP() {\n+ runOrderTest(TEST_NAME2, true, true, false, ExecType.CP);\n+ }\n+\n//TODO enable together with additional spark sort runtime\n// @Test\n// public void testOrderDenseAscDataSP() {\n@@ -152,7 +176,7 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + TEST_NAME + \".dml\";\n- programArgs = new String[]{\"-explain\",\"-args\", input(\"A\"),\n+ programArgs = new String[]{\"-stats\",\"-args\", input(\"A\"),\nString.valueOf(desc).toUpperCase(), String.valueOf(ixret).toUpperCase(), output(\"B\") };\nfullRScriptName = HOME + TEST_NAME + \".R\";\n@@ -170,6 +194,10 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\nHashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\nHashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"B\");\nTestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+\n+ //check for applied rewrite\n+ if( testname.equals(TEST_NAME2) && !ixret )\n+ Assert.assertTrue(Statistics.getCPHeavyHitterCount(\"rsort\")==1);\n}\nfinally {\nrtplatform = platformOld;\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/reorg/OrderMultiBy.dml", "new_path": "src/test/scripts/functions/reorg/OrderMultiBy.dml", "diff": "A = read($1);\nix = matrix(\"3 7 14\", rows=1, cols=3)\n-\n-#B = order(target=A, by=14, decreasing=$2, index.return=$3);\n-#B = order(target=B, by=7, decreasing=$2, index.return=$3);\n-#B = order(target=B, by=3, decreasing=$2, index.return=$3);\n-\nB = order(target=A, by=ix, decreasing=$2, index.return=$3);\nwrite(B, $4, format=\"text\");\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/reorg/OrderMultiBy2.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+A = readMM(paste(args[1], \"A.mtx\", sep=\"\"))\n+desc = as.logical(args[2]);\n+ixret = as.logical(args[3]);\n+\n+col1 = A[,3];\n+col2 = A[,7];\n+col3 = A[,14];\n+\n+\n+if( ixret ) {\n+ B = order(col1, col2, col3, decreasing=desc);\n+} else {\n+ B = A[order(col1, col2, col3, decreasing=desc),];\n+}\n+\n+writeMM(as(B,\"CsparseMatrix\"), paste(args[4], \"B\", sep=\"\"))\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/reorg/OrderMultiBy2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+A = read($1);\n+\n+B = order(target=A, by=14, decreasing=$2, index.return=$3);\n+B = order(target=B, by=7, decreasing=$2, index.return=$3);\n+B = order(target=B, by=3, decreasing=$2, index.return=$3);\n+\n+write(B, $4, format=\"text\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1990] New rewrite for order operation chains This patch introduces a new rewrite for merging subsequent order operations (data, scalar order-by column, and with consistent descending configuration and single consumers) into a single order operation with multiple order-by columns.
49,738
08.11.2017 19:41:32
28,800
a66126d49d6f64136d86074cfb6ec666d7c6375a
[MINOR] Fix robustness empty filename handling for JMLC and MLContext
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/MapReduceTool.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/MapReduceTool.java", "diff": "@@ -105,7 +105,7 @@ public class MapReduceTool\npublic static boolean existsFileOnHDFS(String fname) {\n//robustness for empty strings (e.g., JMLC, MLContext)\n- if( fname == null || fname.isEmpty() )\n+ if( fname == null || fname.isEmpty() || fname.trim().isEmpty() )\nreturn false;\ntry {\n@@ -121,7 +121,7 @@ public class MapReduceTool\npublic static boolean isDirectory(String fname) {\n//robustness for empty strings (e.g., JMLC, MLContext)\n- if( fname == null || fname.isEmpty() )\n+ if( fname == null || fname.isEmpty() || fname.trim().isEmpty() )\nreturn false;\ntry {\n@@ -176,7 +176,7 @@ public class MapReduceTool\npublic static boolean isHDFSFileEmpty(String dir) throws IOException {\n//robustness for empty strings (e.g., JMLC, MLContext)\n- if( dir == null || dir.isEmpty() )\n+ if( dir == null || dir.isEmpty() || dir.trim().isEmpty())\nreturn false;\nPath path = new Path(dir);\nFileSystem fs = IOUtilFunctions.getFileSystem(path);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix robustness empty filename handling for JMLC and MLContext
49,738
09.11.2017 11:39:52
28,800
bd139a575de8a57af40b5f0e8ae10403cd3c060e
[HOTFIX][SYSTEMML-1990] Fix cbind/rbind folding test (exec type)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFoldRCBindTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFoldRCBindTest.java", "diff": "@@ -21,7 +21,10 @@ package org.apache.sysml.test.integration.functions.misc;\nimport org.junit.Assert;\nimport org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\n@@ -48,26 +51,37 @@ public class RewriteFoldRCBindTest extends AutomatedTestBase\n@Test\npublic void testRewriteFoldCBindNoRewrite() {\n- testRewriteFoldRCBind( TEST_NAME1, false );\n+ testRewriteFoldRCBind( TEST_NAME1, false, ExecType.CP );\n}\n@Test\npublic void testRewriteFoldCBindRewrite() {\n- testRewriteFoldRCBind( TEST_NAME1, true );\n+ testRewriteFoldRCBind( TEST_NAME1, true, ExecType.CP );\n}\n@Test\npublic void testRewriteFoldRBindNoRewrite() {\n- testRewriteFoldRCBind( TEST_NAME2, false );\n+ testRewriteFoldRCBind( TEST_NAME2, false, ExecType.CP );\n}\n@Test\npublic void testRewriteFoldRBindRewrite() {\n- testRewriteFoldRCBind( TEST_NAME2, true );\n+ testRewriteFoldRCBind( TEST_NAME2, true, ExecType.CP );\n}\n- private void testRewriteFoldRCBind( String testname, boolean rewrites )\n+ private void testRewriteFoldRCBind( String testname, boolean rewrites, ExecType et )\n{\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\ntry {\n@@ -96,6 +110,8 @@ public class RewriteFoldRCBindTest extends AutomatedTestBase\n}\nfinally {\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ rtplatform = platformOld;\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1990] Fix cbind/rbind folding test (exec type)
49,737
09.11.2017 20:42:49
-3,600
d69686273da8bf4dc09441ec34ef3863eb437629
Add PCA to Performance Test Suite Closes
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/python/datagen.py", "new_path": "scripts/perftest/python/datagen.py", "diff": "@@ -215,6 +215,23 @@ def stats2_datagen(matrix_dim, matrix_type, datagen_dir, config_dir):\nreturn save_path\n+def dimreduction_datagen(matrix_dim, matrix_type, datagen_dir, config_dir):\n+\n+ path_name = '.'.join(['dimreduction', matrix_type, str(matrix_dim)])\n+ datagen_write = join(datagen_dir, path_name)\n+ save_path = join(config_dir, path_name)\n+ row, col = split_rowcol(matrix_dim)\n+\n+ R = row\n+ C = col\n+ OUT = join(datagen_write, 'X.data')\n+\n+ config = dict(R=R, C=C, OUT=OUT, FMT=DATA_FORMAT)\n+\n+ config_writer(save_path + '.json', config)\n+ return save_path\n+\n+\ndef config_packets_datagen(algo_payload, matrix_type, matrix_shape, datagen_dir, dense_algos, config_dir):\n\"\"\"\nThis function has two responsibilities. Generate the configuration files for\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/run_perftest.py", "new_path": "scripts/perftest/python/run_perftest.py", "diff": "@@ -47,7 +47,8 @@ ML_ALGO = {'binomial': ['MultiLogReg', 'l2-svm', 'm-svm'],\n'regression1': ['LinearRegDS', 'LinearRegCG'],\n'regression2': ['GLM_poisson', 'GLM_gamma', 'GLM_binomial'],\n'stats1': ['Univar-Stats', 'bivar-stats'],\n- 'stats2': ['stratstats']}\n+ 'stats2': ['stratstats'],\n+ 'dimreduction': ['PCA']}\nML_GENDATA = {'binomial': 'genRandData4LogisticRegression',\n'clustering': 'genRandData4Kmeans',\n@@ -55,7 +56,8 @@ ML_GENDATA = {'binomial': 'genRandData4LogisticRegression',\n'regression1': 'genRandData4LogisticRegression',\n'regression2': 'genRandData4LogisticRegression',\n'stats1': 'genRandData4DescriptiveStats',\n- 'stats2': 'genRandData4StratStats'}\n+ 'stats2': 'genRandData4StratStats',\n+ 'dimreduction': 'genRandData4PCA'}\nML_TRAIN = {'GLM_poisson': 'GLM',\n'GLM_gamma': 'GLM',\n@@ -69,7 +71,8 @@ ML_TRAIN = {'GLM_poisson': 'GLM',\n'm-svm': 'm-svm',\n'l2-svm': 'l2-svm',\n'MultiLogReg': 'MultiLogReg',\n- 'naive-bayes': 'naive-bayes'}\n+ 'naive-bayes': 'naive-bayes',\n+ 'PCA': 'PCA'}\nML_PREDICT = {'Kmeans': 'Kmeans-predict',\n'LinearRegCG': 'GLM-predict',\n@@ -82,7 +85,7 @@ ML_PREDICT = {'Kmeans': 'Kmeans-predict',\n'GLM_gamma': 'GLM-predict',\n'GLM_binomial': 'GLM-predict'}\n-DENSE_TYPE_ALGOS = ['clustering', 'stats1', 'stats2']\n+DENSE_TYPE_ALGOS = ['clustering', 'stats1', 'stats2', 'dimreduction']\n# Responsible for execution and metric logging\n" }, { "change_type": "MODIFY", "old_path": "scripts/perftest/python/train.py", "new_path": "scripts/perftest/python/train.py", "diff": "@@ -338,6 +338,21 @@ def regression2_glm_poisson_train(save_folder_name, datagen_dir, train_dir, conf\nreturn data_folders\n+def dimreduction_pca_train(save_folder_name, datagen_dir, train_dir, config_dir):\n+ save_path = join(config_dir, save_folder_name)\n+ train_write = join(train_dir, save_folder_name)\n+\n+ INPUT = join(datagen_dir, 'X.data')\n+ SCALE = '1'\n+ PROJDATA = '1'\n+ OUTPUT = join(train_write, 'Output.data')\n+\n+ config = dict(INPUT=INPUT, SCALE=SCALE, PROJDATA=PROJDATA, OUTPUT=OUTPUT, OFMT=DATA_FORMAT)\n+ config_writer(save_path + '.json', config)\n+\n+ return [save_path]\n+\n+\ndef config_packets_train(algo_payload, matrix_type, matrix_shape, datagen_dir, train_dir, dense_algos, config_dir):\n\"\"\"\nThis function has two responsibilities. Generate the configuration files for\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1978] Add PCA to Performance Test Suite Closes #694
49,738
09.11.2017 16:31:58
28,800
c96143248349b6c68253ef9b3777afd5e5ed62f2
Generalized ctable rewrites (seq-table, const inputs) This patch generalized the existing rewrite for table(seq(),X,...) to rexpand(X,...) to handle cases with unknown dimensions, including common scenarios with column indexing on X. Additionally, this patch also introduces a new rewrite for table with constant matrix inputs (i.e., table(X, matrix(7)) -> table(X,7)).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java", "diff": "@@ -965,6 +965,15 @@ public class HopRewriteUtils\n|| isLiteralOfValue(hop.getInput().get(1), val));\n}\n+ public static boolean isTernary(Hop hop, OpOp3 type) {\n+ return hop instanceof TernaryOp && ((TernaryOp)hop).getOp()==type;\n+ }\n+\n+ public static boolean isTernary(Hop hop, OpOp3... types) {\n+ return ( hop instanceof TernaryOp\n+ && ArrayUtils.contains(types, ((TernaryOp) hop).getOp()));\n+ }\n+\npublic static boolean containsInput(Hop current, Hop probe) {\nreturn rContainsInput(current, probe, new HashSet<Long>());\n}\n@@ -1052,6 +1061,15 @@ public class HopRewriteUtils\nreturn true;\n}\n+ public static boolean isColumnRightIndexing(Hop hop) {\n+ return hop instanceof IndexingOp\n+ && ((IndexingOp) hop).isColLowerEqualsUpper()\n+ && ((hop.dimsKnown() && hop.getDim1() == hop.getInput().get(0).getDim1())\n+ || (isLiteralOfValue(hop.getInput().get(1), 1)\n+ && isUnary(hop.getInput().get(2), OpOp1.NROW)\n+ && hop.getInput().get(2).getInput().get(0)==hop.getInput().get(0)));\n+ }\n+\npublic static boolean isFullColumnIndexing(LeftIndexingOp hop) {\nreturn hop.isColLowerEqualsUpper()\n&& isLiteralOfValue(hop.getInput().get(2), 1)\n@@ -1112,9 +1130,7 @@ public class HopRewriteUtils\nHop to = dgop.getInput().get(dgop.getParamIndex(Statement.SEQ_TO));\nHop incr = dgop.getInput().get(dgop.getParamIndex(Statement.SEQ_INCR));\nreturn isLiteralOfValue(from, 1) && isLiteralOfValue(incr, 1)\n- && (isLiteralOfValue(to, row?input.getDim1():input.getDim2())\n- || (to instanceof UnaryOp && ((UnaryOp)to).getOp()==(row?\n- OpOp1.NROW:OpOp1.NCOL) && to.getInput().get(0)==input));\n+ && isSizeExpressionOf(to, input, row);\n}\nreturn false;\n}\n@@ -1149,6 +1165,11 @@ public class HopRewriteUtils\nthrow new HopsException(\"Failed to retrieve 'to' argument from basic 1-N sequence.\");\n}\n+ public static boolean isSizeExpressionOf(Hop size, Hop input, boolean row) {\n+ return (input.dimsKnown() && isLiteralOfValue(size, row?input.getDim1():input.getDim2()))\n+ || ((row ? isUnary(size, OpOp1.NROW) : isUnary(size, OpOp1.NCOL)) && (size.getInput().get(0)==input\n+ || (isColumnRightIndexing(input) && size.getInput().get(0)==input.getInput().get(0))));\n+ }\npublic static boolean hasOnlyWriteParents( Hop hop, boolean inclTransient, boolean inclPersistent )\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationDynamic.java", "diff": "@@ -2540,15 +2540,14 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\n//pattern: table(seq(1,nrow(v)), v, nrow(v), m) -> rexpand(v, max=m, dir=row, ignore=false, cast=true)\n//note: this rewrite supports both left/right sequence\nif( hi instanceof TernaryOp && hi.getInput().size()==5 //table without weights\n- && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(2), 1) //i.e., weight of 1\n- && hi.getInput().get(3) instanceof LiteralOp && hi.getInput().get(4) instanceof LiteralOp)\n+ && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(2), 1) ) //i.e., weight of 1\n{\nHop first = hi.getInput().get(0);\nHop second = hi.getInput().get(1);\n//pattern a: table(seq(1,nrow(v)), v, nrow(v), m, 1)\n- if( HopRewriteUtils.isBasic1NSequence(first, second, true) && second.dimsKnown()\n- && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), second.getDim1()) )\n+ if( HopRewriteUtils.isBasic1NSequence(first, second, true)\n+ && HopRewriteUtils.isSizeExpressionOf(hi.getInput().get(3), second, true) )\n{\n//setup input parameter hops\nHashMap<String,Hop> args = new HashMap<>();\n@@ -2568,8 +2567,8 @@ public class RewriteAlgebraicSimplificationDynamic extends HopRewriteRule\nLOG.debug(\"Applied simplifyTableSeqExpand1 (line \"+hi.getBeginLine()+\")\");\n}\n//pattern b: table(v, seq(1,nrow(v)), m, nrow(v))\n- else if( HopRewriteUtils.isBasic1NSequence(second, first, true) && first.dimsKnown()\n- && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(4), first.getDim1()) )\n+ else if( HopRewriteUtils.isBasic1NSequence(second, first, true)\n+ && HopRewriteUtils.isSizeExpressionOf(hi.getInput().get(4), first, true) )\n{\n//setup input parameter hops\nHashMap<String,Hop> args = new HashMap<>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -152,6 +152,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nhi = foldMultipleAppendOperations(hi); //e.g., cbind(X,cbind(Y,Z)) -> cbind(X,Y,Z)\nhi = simplifyBinaryToUnaryOperation(hop, hi, i); //e.g., X*X -> X^2 (pow2), X+X -> X*2, (X>0)-(X<0) -> sign(X)\nhi = canonicalizeMatrixMultScalarAdd(hi); //e.g., eps+U%*%t(V) -> U%*%t(V)+eps, U%*%t(V)-eps -> U%*%t(V)+(-eps)\n+ hi = simplifyCTableWithConstMatrixInputs(hi); //e.g., table(X, matrix(1,...)) -> table(X, 1)\nhi = simplifyReverseOperation(hop, hi, i); //e.g., table(seq(1,nrow(X),1),seq(nrow(X),1,-1)) %*% X -> rev(X)\nif(OptimizerUtils.ALLOW_OPERATOR_FUSION)\nhi = simplifyMultiBinaryToBinaryOperation(hi); //e.g., 1-X*Y -> X 1-* Y\n@@ -672,6 +673,25 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nreturn hi;\n}\n+ private static Hop simplifyCTableWithConstMatrixInputs( Hop hi )\n+ throws HopsException\n+ {\n+ //pattern: table(X, matrix(1,...), matrix(7, ...)) -> table(X, 1, 7)\n+ if( HopRewriteUtils.isTernary(hi, OpOp3.CTABLE) ) {\n+ //note: the first input always expected to be a matrix\n+ for( int i=1; i<hi.getInput().size(); i++ ) {\n+ Hop inCurr = hi.getInput().get(i);\n+ if( HopRewriteUtils.isDataGenOpWithConstantValue(inCurr) ) {\n+ Hop inNew = ((DataGenOp)inCurr).getInput(DataExpression.RAND_MIN);\n+ HopRewriteUtils.replaceChildReference(hi, inCurr, inNew, i);\n+ LOG.debug(\"Applied simplifyCTableWithConstMatrixInputs\"\n+ + i + \" (line \"+hi.getBeginLine()+\").\");\n+ }\n+ }\n+ }\n+ return hi;\n+ }\n+\n/**\n* NOTE: this would be by definition a dynamic rewrite; however, we apply it as a static\n* rewrite in order to apply it before splitting dags which would hide the table information\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteCTableToRExpandTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteCTableToRExpandTest.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.test.integration.functions.misc;\nimport org.junit.Test;\nimport org.junit.Assert;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\n@@ -33,6 +34,8 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\nprivate static final String TEST_NAME2 = \"RewriteCTableToRExpandRightPos\";\nprivate static final String TEST_NAME3 = \"RewriteCTableToRExpandLeftNeg\";\nprivate static final String TEST_NAME4 = \"RewriteCTableToRExpandRightNeg\";\n+ private static final String TEST_NAME5 = \"RewriteCTableToRExpandLeftUnknownPos\";\n+ private static final String TEST_NAME6 = \"RewriteCTableToRExpandRightUnknownPos\";\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RewriteCTableToRExpandTest.class.getSimpleName() + \"/\";\n@@ -52,6 +55,8 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME5, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME5, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME6, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME6, new String[] { \"R\" }) );\n}\n@Test\n@@ -94,6 +99,25 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\ntestRewriteCTableRExpand( TEST_NAME4, CropType.PAD );\n}\n+ @Test\n+ public void testRewriteCTableRExpandLeftUnknownDenseCrop() {\n+ testRewriteCTableRExpand( TEST_NAME5, CropType.CROP );\n+ }\n+\n+ @Test\n+ public void testRewriteCTableRExpandLeftUnknownDensePad() {\n+ testRewriteCTableRExpand( TEST_NAME5, CropType.PAD );\n+ }\n+\n+ @Test\n+ public void testRewriteCTableRExpandRightUnknownDenseCrop() {\n+ testRewriteCTableRExpand( TEST_NAME6, CropType.CROP );\n+ }\n+\n+ @Test\n+ public void testRewriteCTableRExpandRightUnknownDensePad() {\n+ testRewriteCTableRExpand( TEST_NAME6, CropType.PAD );\n+ }\nprivate void testRewriteCTableRExpand( String testname, CropType type )\n{\n@@ -101,10 +125,18 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\nloadTestConfiguration(config);\nint outDim = maxVal + ((type==CropType.CROP) ? -7 : 7);\n+ boolean unknownTests = ( testname.equals(TEST_NAME5) || testname.equals(TEST_NAME6) );\n+\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ if( unknownTests )\n+ rtplatform = RUNTIME_PLATFORM.SINGLE_NODE;\n+\n+ try\n+ {\nString HOME = SCRIPT_DIR + TEST_DIR;\nfullDMLScriptName = HOME + testname + \".dml\";\n- programArgs = new String[]{ \"-stats\",\"-args\",\n+ programArgs = new String[]{ \"-explain\",\"-stats\",\"-args\",\ninput(\"A\"), String.valueOf(outDim), output(\"R\") };\nfullRScriptName = HOME + testname + \".R\";\n@@ -117,14 +149,21 @@ public class RewriteCTableToRExpandTest extends AutomatedTestBase\nrunTest(true, false, null, -1);\n//compare output meta data\n- boolean left = (testname.equals(TEST_NAME1) || testname.equals(TEST_NAME3));\n+ boolean left = (testname.equals(TEST_NAME1) || testname.equals(TEST_NAME3)\n+ || testname.equals(TEST_NAME5) || testname.equals(TEST_NAME6));\nboolean pos = (testname.equals(TEST_NAME1) || testname.equals(TEST_NAME2));\nint rrows = (left && pos) ? rows : outDim;\nint rcols = (!left && pos) ? rows : outDim;\n+ if( !unknownTests )\ncheckDMLMetaDataFile(\"R\", new MatrixCharacteristics(rrows, rcols, 1, 1));\n//check for applied rewrite\n- Assert.assertEquals(Boolean.valueOf(testname.equals(TEST_NAME1) || testname.equals(TEST_NAME2)),\n+ Assert.assertEquals(Boolean.valueOf(testname.equals(TEST_NAME1)\n+ || testname.equals(TEST_NAME2) || unknownTests),\nBoolean.valueOf(heavyHittersContainsSubString(\"rexpand\")));\n}\n+ finally {\n+ rtplatform = platformOld;\n+ }\n+ }\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteCTableToRExpandLeftUnknownPos.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+\n+T = matrix(1, nrow(A), 2);\n+A2 = rand(rows=sum(T)/2, cols=100, min=1, max=10);\n+R = table(seq(1,nrow(A2)), A2[,1], nrow(A2), $2);\n+\n+write(R, $3);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteCTableToRExpandRightUnknownPos.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+\n+T = matrix(1, nrow(A), 2);\n+A2 = rand(rows=sum(T)/2, cols=100, min=1, max=10);\n+R = table(A2[,1], seq(1,nrow(A2)), $2, nrow(A2));\n+\n+write(R, $3);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1990] Generalized ctable rewrites (seq-table, const inputs) This patch generalized the existing rewrite for table(seq(),X,...) to rexpand(X,...) to handle cases with unknown dimensions, including common scenarios with column indexing on X. Additionally, this patch also introduces a new rewrite for table with constant matrix inputs (i.e., table(X, matrix(7)) -> table(X,7)).
49,738
10.11.2017 16:42:58
28,800
223066eebdf86a89dc2feb72ff4bd32ca2ed5155
[MINOR] Reduced instruction footprint of ctable and outer operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/functionobjects/CTable.java", "new_path": "src/main/java/org/apache/sysml/runtime/functionobjects/CTable.java", "diff": "@@ -42,6 +42,15 @@ public class CTable extends ValueFunction\nreturn singleObj;\n}\n+ public void execute(double v1, double v2, double w, boolean ignoreZeros, CTableMap resultMap, MatrixBlock resultBlock)\n+ throws DMLRuntimeException\n+ {\n+ if( resultBlock != null )\n+ execute(v1, v2, w, ignoreZeros, resultBlock);\n+ else\n+ execute(v1, v2, w, ignoreZeros, resultMap);\n+ }\n+\npublic void execute(double v1, double v2, double w, boolean ignoreZeros, CTableMap resultMap)\nthrows DMLRuntimeException\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -784,44 +784,39 @@ public class LibMatrixBincell\nmbOut.allocateDenseBlock();\ndouble c[] = mbOut.getDenseBlock();\n+ //pre-materialize various types used in inner loop\n+ boolean scanType1 = (bOp.fn instanceof LessThan || bOp.fn instanceof Equals\n+ || bOp.fn instanceof NotEquals || bOp.fn instanceof GreaterThanEquals);\n+ boolean scanType2 = (bOp.fn instanceof LessThanEquals || bOp.fn instanceof Equals\n+ || bOp.fn instanceof NotEquals || bOp.fn instanceof GreaterThan);\n+ boolean lt = (bOp.fn instanceof LessThan), lte = (bOp.fn instanceof LessThanEquals);\n+ boolean gt = (bOp.fn instanceof GreaterThan), gte = (bOp.fn instanceof GreaterThanEquals);\n+ boolean eqNeq = (bOp.fn instanceof Equals || bOp.fn instanceof NotEquals);\n+\nlong lnnz = 0;\nfor( int r=0, off=0; r<rlen; r++, off+=clen ) {\ndouble value = mbLeft.quickGetValue(r, 0);\nint ixPos1 = Arrays.binarySearch(b, value);\nint ixPos2 = ixPos1;\n-\nif( ixPos1 >= 0 ) { //match, scan to next val\n- if(bOp.fn instanceof LessThan || bOp.fn instanceof GreaterThanEquals\n- || bOp.fn instanceof Equals || bOp.fn instanceof NotEquals)\n- while( ixPos1<b.length && value==b[ixPos1] ) ixPos1++;\n- if(bOp.fn instanceof GreaterThan || bOp.fn instanceof LessThanEquals\n- || bOp.fn instanceof Equals || bOp.fn instanceof NotEquals)\n- while( ixPos2 > 0 && value==b[ixPos2-1]) --ixPos2;\n- } else {\n- ixPos2 = ixPos1 = Math.abs(ixPos1) - 1;\n+ if(scanType1) while( ixPos1<b.length && value==b[ixPos1] ) ixPos1++;\n+ if(scanType2) while( ixPos2 > 0 && value==b[ixPos2-1]) --ixPos2;\n}\n+ else\n+ ixPos2 = ixPos1 = Math.abs(ixPos1) - 1;\n+ int start = lt ? ixPos1 : (lte||eqNeq) ? ixPos2 : 0;\n+ int end = gt ? ixPos2 : (gte||eqNeq) ? ixPos1 : clen;\n- int start = 0, end = clen;\n- if(bOp.fn instanceof LessThan || bOp.fn instanceof LessThanEquals)\n- start = (bOp.fn instanceof LessThan) ? ixPos1 : ixPos2;\n- else if(bOp.fn instanceof GreaterThan || bOp.fn instanceof GreaterThanEquals)\n- end = (bOp.fn instanceof GreaterThan) ? ixPos2 : ixPos1;\n- else if(bOp.fn instanceof Equals || bOp.fn instanceof NotEquals) {\n- start = ixPos2;\n- end = ixPos1;\n- }\n- if(start < end || bOp.fn instanceof NotEquals) {\nif (bOp.fn instanceof NotEquals) {\nArrays.fill(c, off, off+start, 1.0);\nArrays.fill(c, off+end, off+clen, 1.0);\nlnnz += (start+(clen-end));\n}\n- else {\n+ else if( start < end ) {\nArrays.fill(c, off+start, off+end, 1.0);\nlnnz += (end-start);\n}\n}\n- }\nmbOut.setNonZeros(lnnz);\nmbOut.examSparsity();\n}\n@@ -835,13 +830,9 @@ public class LibMatrixBincell\nif( atype == BinaryAccessType.MATRIX_COL_VECTOR ) //MATRIX - COL_VECTOR\n{\n- for(int r=0; r<rlen; r++)\n- {\n- //replicated value\n+ for(int r=0; r<rlen; r++) {\ndouble v2 = m2.quickGetValue(r, 0);\n-\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1.quickGetValue(r, c);\ndouble v = op.fn.execute( v1, v2 );\nret.appendValue(r, c, v);\n@@ -851,8 +842,7 @@ public class LibMatrixBincell\nelse if( atype == BinaryAccessType.MATRIX_ROW_VECTOR ) //MATRIX - ROW_VECTOR\n{\nfor(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1.quickGetValue(r, c);\ndouble v2 = m2.quickGetValue(0, c);\ndouble v = op.fn.execute( v1, v2 );\n@@ -870,8 +860,7 @@ public class LibMatrixBincell\nelse {\nfor(int r=0; r<rlen; r++) {\ndouble v1 = m1.quickGetValue(r, 0);\n- for(int c=0; c<clen2; c++)\n- {\n+ for(int c=0; c<clen2; c++) {\ndouble v2 = m2.quickGetValue(0, c);\ndouble v = op.fn.execute( v1, v2 );\nret.appendValue(r, c, v);\n@@ -889,18 +878,18 @@ public class LibMatrixBincell\ndouble[] a = m1.denseBlock;\ndouble[] b = m2.denseBlock;\ndouble[] c = ret.denseBlock;\n+ int lnnz = 0;\nfor( int i=0; i<rlen; i++ ) {\nc[i] = op.fn.execute( a[i], b[i] );\n- if( c[i] != 0 )\n- ret.nonZeros++;\n+ lnnz += (c[i] != 0) ? 1 : 0;\n}\n+ ret.nonZeros = lnnz;\n}\n//general case\nelse\n{\nfor(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1.quickGetValue(r, c);\ndouble v2 = m2.quickGetValue(r, c);\ndouble v = op.fn.execute( v1, v2 );\n@@ -923,6 +912,8 @@ public class LibMatrixBincell\nthrow new DMLRuntimeException(\"Unsupported safe binary scalar operations over different input/output representation: \"+m1.sparse+\" \"+ret.sparse);\nboolean copyOnes = (op.fn instanceof NotEquals && op.getConstant()==0);\n+ boolean allocExact = (op.fn instanceof Multiply\n+ || op.fn instanceof Multiply2 || op.fn instanceof Power2);\nif( m1.sparse ) //SPARSE <- SPARSE\n{\n@@ -954,10 +945,8 @@ public class LibMatrixBincell\n}\nelse { //GENERAL CASE\n//create sparse row without repeated resizing for specific ops\n- if( op.fn instanceof Multiply || op.fn instanceof Multiply2\n- || op.fn instanceof Power2 ) {\n+ if( allocExact )\nc.allocate(r, alen);\n- }\nfor(int j=apos; j<apos+alen; j++) {\ndouble val = op.executeScalar(avals[j]);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -5210,26 +5210,17 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//sparse-unsafe ctable execution\n//(because input values of 0 are invalid and have to result in errors)\n- if ( resultBlock == null ) {\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n- double v1 = this.quickGetValue(i, j);\n- double w = that2.quickGetValue(i, j);\n- ctable.execute(v1, v2, w, false, resultMap);\n- }\n- }\n- else {\n- for( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = this.quickGetValue(i, j);\ndouble w = that2.quickGetValue(i, j);\n- ctable.execute(v1, v2, w, false, resultBlock);\n+ ctable.execute(v1, v2, w, false, resultMap, resultBlock);\n}\n+\n+ //maintain nnz (if necessary)\n+ if( resultBlock!=null )\nresultBlock.recomputeNonZeros();\n}\n- }\n/**\n* D = ctable(A,v2,w)\n@@ -5250,24 +5241,16 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//sparse-unsafe ctable execution\n//(because input values of 0 are invalid and have to result in errors)\n- if ( resultBlock == null ) {\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n- double v1 = this.quickGetValue(i, j);\n- ctable.execute(v1, v2, w, false, resultMap);\n- }\n- }\n- else {\n- for( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = this.quickGetValue(i, j);\n- ctable.execute(v1, v2, w, false, resultBlock);\n+ ctable.execute(v1, v2, w, false, resultMap, resultBlock);\n}\n+\n+ //maintain nnz (if necessary)\n+ if( resultBlock!=null )\nresultBlock.recomputeNonZeros();\n}\n- }\n/**\n* Specific ctable case of ctable(seq(...),X), where X is the only\n@@ -5286,30 +5269,19 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//sparse-unsafe ctable execution\n//(because input values of 0 are invalid and have to result in errors)\n- if( resultBlock == null) {\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n- double v1 = this.quickGetValue(i, j);\n- if( left )\n- ctable.execute(offset+i+1, v1, w, false, resultMap);\n- else\n- ctable.execute(v1, offset+i+1, w, false, resultMap);\n- }\n- }\n- else {\n- for( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = this.quickGetValue(i, j);\nif( left )\n- ctable.execute(offset+i+1, v1, w, false, resultBlock);\n+ ctable.execute(offset+i+1, v1, w, false, resultMap, resultBlock);\nelse\n- ctable.execute(v1, offset+i+1, w, false, resultBlock);\n+ ctable.execute(v1, offset+i+1, w, false, resultMap, resultBlock);\n}\n+\n+ //maintain nnz (if necessary)\n+ if( resultBlock!=null )\nresultBlock.recomputeNonZeros();\n}\n- }\n/**\n* D = ctable(A,B,w)\n@@ -5344,25 +5316,16 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nSparseBlock a = this.sparseBlock;\nSparseBlock b = that.sparseBlock;\n- for( int i=0; i<rlen; i++ )\n- {\n- if( !a.isEmpty(i) )\n- {\n+ for( int i=0; i<rlen; i++ ) {\n+ if( a.isEmpty(i) ) continue;\nint alen = a.size(i);\nint apos = a.pos(i);\ndouble[] avals = a.values(i);\nint bpos = b.pos(i);\ndouble[] bvals = b.values(i);\n-\n- if( resultBlock == null ) {\n- for( int j=0; j<alen; j++ )\n- ctable.execute(avals[apos+j], bvals[bpos+j], w, ignoreZeros, resultMap);\n- }\n- else {\nfor( int j=0; j<alen; j++ )\n- ctable.execute(avals[apos+j], bvals[bpos+j], w, ignoreZeros, resultBlock);\n- }\n- }\n+ ctable.execute(avals[apos+j], bvals[bpos+j],\n+ w, ignoreZeros, resultMap, resultBlock);\n}\n}\nelse //SPARSE-UNSAFE | GENERIC INPUTS\n@@ -5370,14 +5333,10 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//sparse-unsafe ctable execution\n//(because input values of 0 are invalid and have to result in errors)\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = this.quickGetValue(i, j);\ndouble v2 = that.quickGetValue(i, j);\n- if( resultBlock == null )\n- ctable.execute(v1, v2, w, ignoreZeros, resultMap);\n- else\n- ctable.execute(v1, v2, w, ignoreZeros, resultBlock);\n+ ctable.execute(v1, v2, w, ignoreZeros, resultMap, resultBlock);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix/UaggOuterChainTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix/UaggOuterChainTest.java", "diff": "@@ -1359,10 +1359,10 @@ public class UaggOuterChainTest extends AutomatedTestBase\n//check statistics for right operator in cp and spark\nif( instType == ExecType.CP ) {\n- Assert.assertTrue(\"Missing opcode sp_uaggouerchain\", Statistics.getCPHeavyHitterOpCodes().contains(UAggOuterChain.OPCODE));\n+ Assert.assertTrue(\"Missing opcode sp_uaggouterchain\", Statistics.getCPHeavyHitterOpCodes().contains(UAggOuterChain.OPCODE));\n}\nelse if( instType == ExecType.SPARK ) {\n- Assert.assertTrue(\"Missing opcode sp_uaggouerchain\",\n+ Assert.assertTrue(\"Missing opcode sp_uaggouterchain\",\nStatistics.getCPHeavyHitterOpCodes().contains(Instruction.SP_INST_PREFIX+UAggOuterChain.OPCODE));\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Reduced instruction footprint of ctable and outer operations
49,738
11.11.2017 14:17:00
28,800
4ae6beee527569886770fa8cfa7b9d3f185fde6f
[HOTFIX][SYSTEMML-2010] Fix merge of blocks w/ external functions
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMergeBlockSequence.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMergeBlockSequence.java", "diff": "@@ -29,6 +29,8 @@ import org.apache.sysml.hops.FunctionOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.Hop.DataOpTypes;\nimport org.apache.sysml.hops.HopsException;\n+import org.apache.sysml.parser.ExternalFunctionStatement;\n+import org.apache.sysml.parser.FunctionStatementBlock;\nimport org.apache.sysml.parser.StatementBlock;\nimport org.apache.sysml.parser.VariableSet;\n@@ -66,6 +68,7 @@ public class RewriteMergeBlockSequence extends StatementBlockRewriteRule\nif( HopRewriteUtils.isLastLevelStatementBlock(sb1)\n&& HopRewriteUtils.isLastLevelStatementBlock(sb2)\n&& !sb1.isSplitDag() && !sb2.isSplitDag()\n+ && !(hasExternalFunctionOpRoot(sb1) && hasExternalFunctionOpRoot(sb2))\n&& (!hasFunctionOpRoot(sb1) || !hasFunctionIOConflict(sb1,sb2))\n&& (!hasFunctionOpRoot(sb2) || !hasFunctionIOConflict(sb2,sb1)) )\n{\n@@ -167,6 +170,22 @@ public class RewriteMergeBlockSequence extends StatementBlockRewriteRule\nreturn ret;\n}\n+ private static boolean hasExternalFunctionOpRoot(StatementBlock sb)\n+ throws HopsException {\n+ if( sb == null || sb.get_hops() == null )\n+ return false;\n+ for( Hop root : sb.get_hops() )\n+ if( root instanceof FunctionOp ) {\n+ FunctionStatementBlock fsb = sb.getDMLProg()\n+ .getFunctionStatementBlock(((FunctionOp)root).getFunctionKey());\n+ //note: in case of builtin multi-return functions such as qr (namespace _internal),\n+ //there is no function statement block and hence we need to check for null\n+ if( fsb != null && fsb.getStatement(0) instanceof ExternalFunctionStatement )\n+ return true;\n+ }\n+ return false;\n+ }\n+\nprivate static boolean hasFunctionIOConflict(StatementBlock sb1, StatementBlock sb2)\nthrows HopsException\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2010] Fix merge of blocks w/ external functions
49,738
12.11.2017 14:59:19
28,800
f264bebcd187c61d64e21fb9ccc26cb3e86968ee
[MINOR] Fix flaky JMLC MSVM scoring test (avoid unnecessary R runs) This is an attempt to fix the flaky JMLC MSVM scoring test that keeps failing occasionally on our jenkins infrastructure, which is however not reproducible in a local dev environment.
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/MulticlassSVMScoreTest.java", "diff": "@@ -110,8 +110,8 @@ public class MulticlassSVMScoreTest extends AutomatedTestBase\nwriteInputMatrix(\"W\", W, true);\n//for each input data set\n- for( int i=0; i<nRuns; i++ )\n- {\n+ int lnRuns = CHECK_IN_OUT ? 1 : nRuns;\n+ for( int i=0; i<lnRuns; i++ ) {\n//write input data\nwriteInputMatrix(\"X\", Xset.get(i), true);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix flaky JMLC MSVM scoring test (avoid unnecessary R runs) This is an attempt to fix the flaky JMLC MSVM scoring test that keeps failing occasionally on our jenkins infrastructure, which is however not reproducible in a local dev environment.
49,738
13.11.2017 19:01:29
28,800
4fea3c6d42ccecadb826be597a4066b33ec77bd6
New optional sideeffect parameter for external UDFs This patch introduces a new optional sideeffect parameter for external UDFs that write or read data. Making sideeffects explicit allows more aggressive rewrites wrt merging statement blocks that create more opportunities for common subexpression elimination and rewrites.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMergeBlockSequence.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteMergeBlockSequence.java", "diff": "@@ -68,7 +68,8 @@ public class RewriteMergeBlockSequence extends StatementBlockRewriteRule\nif( HopRewriteUtils.isLastLevelStatementBlock(sb1)\n&& HopRewriteUtils.isLastLevelStatementBlock(sb2)\n&& !sb1.isSplitDag() && !sb2.isSplitDag()\n- && !(hasExternalFunctionOpRoot(sb1) && hasExternalFunctionOpRoot(sb2))\n+ && !(hasExternalFunctionOpRootWithSideEffect(sb1)\n+ && hasExternalFunctionOpRootWithSideEffect(sb2))\n&& (!hasFunctionOpRoot(sb1) || !hasFunctionIOConflict(sb1,sb2))\n&& (!hasFunctionOpRoot(sb2) || !hasFunctionIOConflict(sb2,sb1)) )\n{\n@@ -170,7 +171,7 @@ public class RewriteMergeBlockSequence extends StatementBlockRewriteRule\nreturn ret;\n}\n- private static boolean hasExternalFunctionOpRoot(StatementBlock sb)\n+ private static boolean hasExternalFunctionOpRootWithSideEffect(StatementBlock sb)\nthrows HopsException {\nif( sb == null || sb.getHops() == null )\nreturn false;\n@@ -180,7 +181,8 @@ public class RewriteMergeBlockSequence extends StatementBlockRewriteRule\n.getFunctionStatementBlock(((FunctionOp)root).getFunctionKey());\n//note: in case of builtin multi-return functions such as qr (namespace _internal),\n//there is no function statement block and hence we need to check for null\n- if( fsb != null && fsb.getStatement(0) instanceof ExternalFunctionStatement )\n+ if( fsb != null && fsb.getStatement(0) instanceof ExternalFunctionStatement\n+ && ((ExternalFunctionStatement)fsb.getStatement(0)).hasSideEffects() )\nreturn true;\n}\nreturn false;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ExternalFunctionStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/ExternalFunctionStatement.java", "diff": "@@ -23,22 +23,20 @@ import java.util.HashMap;\npublic class ExternalFunctionStatement extends FunctionStatement\n{\n-\n//valid attribute names\npublic static final String CLASS_NAME = \"classname\";\npublic static final String EXEC_TYPE = \"exectype\";\n- //public static final String EXEC_LOCATION = \"execlocation\"; //MB: obsolete\npublic static final String CONFIG_FILE = \"configfile\";\n+ public static final String SIDE_EFFECTS = \"sideeffect\";\n+\n- //valid attribute values for execlocation and\n+ //valid attribute values for execlocation\npublic static final String FILE_BASED = \"file\";\npublic static final String IN_MEMORY = \"mem\";\n- //public static final String MASTER = \"master\"; //MB: obsolete\n- //public static final String WORKER = \"worker\"; //MB: obsolete\n//default values for optional attributes\npublic static final String DEFAULT_EXEC_TYPE = FILE_BASED;\n- //public static final String DEFAULT_EXEC_LOCATION = MASTER; //MB: obsolete\n+ public static final String DEFAULT_SIDE_EFFECTS = \"false\";\n//all parameters\nprivate HashMap<String,String> _otherParams;\n@@ -56,6 +54,11 @@ public class ExternalFunctionStatement extends FunctionStatement\nreturn _otherParams;\n}\n+ public boolean hasSideEffects() {\n+ return _otherParams.containsKey(SIDE_EFFECTS)\n+ && Boolean.parseBoolean(_otherParams.get(SIDE_EFFECTS));\n+ }\n+\n/**\n* Validates all attributes and attribute values.\n*\n@@ -69,7 +72,7 @@ public class ExternalFunctionStatement extends FunctionStatement\n//warnings for all not defined attributes\nfor( String varName : _otherParams.keySet() )\nif( !(varName.equals(CLASS_NAME) || varName.equals(EXEC_TYPE)\n- || varName.equals(CONFIG_FILE) ) )\n+ || varName.equals(CONFIG_FILE) || varName.equals(SIDE_EFFECTS) ) )\n{\nLOG.warn( printWarningLocation() + \"External function specifies undefined attribute type '\"+varName+\"'.\");\n}\n@@ -83,19 +86,25 @@ public class ExternalFunctionStatement extends FunctionStatement\n}\n//exec type (optional, default: file)\n- if( _otherParams.containsKey( EXEC_TYPE ) )\n- {\n+ if( _otherParams.containsKey( EXEC_TYPE ) ) {\n//check specified values\nString execType = _otherParams.get(EXEC_TYPE);\n- if( !(execType.equals(FILE_BASED) || execType.equals(IN_MEMORY)) ) { //always unconditional (invalid parameter)\n- sb.raiseValidateError(\"External function specifies invalid value for (optional) attribute '\"+EXEC_TYPE+\"' (valid values: \"+FILE_BASED+\",\"+IN_MEMORY+\").\", false);\n+ if( !(execType.equals(FILE_BASED) || execType.equals(IN_MEMORY)) ) //always unconditional (invalid parameter)\n+ sb.raiseValidateError(\"External function specifies invalid value for (optional) attribute '\"\n+ + EXEC_TYPE+\"' (valid values: \"+FILE_BASED+\",\"+IN_MEMORY+\").\", false);\n}\n- }\n- else\n- {\n+ else {\n//put default values\n_otherParams.put(EXEC_TYPE, DEFAULT_EXEC_TYPE);\n}\n+\n+ //side effects\n+ if( _otherParams.containsKey( SIDE_EFFECTS ) ) {\n+ String sideeffect = _otherParams.get(SIDE_EFFECTS);\n+ if( !(sideeffect.equals(\"true\") || sideeffect.equals(\"false\")) ) //always unconditional (invalid parameter)\n+ sb.raiseValidateError(\"External function specifies invalid value for (optional) attribute '\"\n+ + SIDE_EFFECTS+\"' (valid values: true, false).\", false);\n+ }\n}\n@Override\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/external/DynReadWrite.dml", "new_path": "src/test/scripts/functions/external/DynReadWrite.dml", "diff": "dynRead = externalFunction(String fname, Integer rows, Integer cols, String format)\nreturn (Matrix[Double] M)\n-implemented in (classname=\"org.apache.sysml.udf.lib.DynamicReadMatrixCP\",exectype=\"mem\")\n+implemented in (classname=\"org.apache.sysml.udf.lib.DynamicReadMatrixCP\",exectype=\"mem\", sideeffect=\"true\")\ndynWrite = externalFunction(Matrix[Double] input, String fname, String format)\nreturn(Boolean success)\n-implemented in (classname=\"org.apache.sysml.udf.lib.DynamicWriteMatrixCP\",exectype=\"mem\")\n+implemented in (classname=\"org.apache.sysml.udf.lib.DynamicWriteMatrixCP\",exectype=\"mem\", sideeffect=\"true\")\nX = read($1, rows=$2, cols=$3, format=\"text\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2014] New optional sideeffect parameter for external UDFs This patch introduces a new optional sideeffect parameter for external UDFs that write or read data. Making sideeffects explicit allows more aggressive rewrites wrt merging statement blocks that create more opportunities for common subexpression elimination and rewrites.
49,738
13.11.2017 21:56:42
28,800
ac7990ef803327f442d4ef62a4d72ec1ae43e188
[MINOR] Cleanup applied rewrites in optimization level 1 This patch cleans up an inconsistency of not disabling the rewrites for (1) matrix multiplication chain optimization and (2) the removal of unnecessary casts under optimization level 1, which is supposed to disable all rewrites.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java", "diff": "@@ -87,6 +87,7 @@ public class ProgramRewriter\n_dagRuleSet.add( new RewriteTransientWriteParentHandling() );\n_dagRuleSet.add( new RewriteRemoveReadAfterWrite() ); //dependency: before blocksize\n_dagRuleSet.add( new RewriteBlockSizeAndReblock() );\n+ if( OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION )\n_dagRuleSet.add( new RewriteRemoveUnnecessaryCasts() );\nif( OptimizerUtils.ALLOW_COMMON_SUBEXPRESSION_ELIMINATION )\n_dagRuleSet.add( new RewriteCommonSubexpressionElimination() );\n@@ -120,12 +121,11 @@ public class ProgramRewriter\n// DYNAMIC REWRITES (which do require size information)\nif( dynamicRewrites )\n{\n+ if ( OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES) {\n_dagRuleSet.add( new RewriteMatrixMultChainOptimization() ); //dependency: cse\n- if ( OptimizerUtils.ALLOW_SUM_PRODUCT_REWRITES)\n_dagRuleSet.add( new RewriteElementwiseMultChainOptimization() ); //dependency: cse\n-\n- if( OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION )\n- {\n+ }\n+ if( OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION ) {\n_dagRuleSet.add( new RewriteAlgebraicSimplificationDynamic() ); //dependencies: cse\n_dagRuleSet.add( new RewriteAlgebraicSimplificationStatic() ); //dependencies: cse\n}\n@@ -133,6 +133,7 @@ public class ProgramRewriter\n// cleanup after all rewrites applied\n// (newly introduced operators, introduced redundancy after rewrites w/ multiple parents)\n+ if( OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION )\n_dagRuleSet.add( new RewriteRemoveUnnecessaryCasts() );\nif( OptimizerUtils.ALLOW_COMMON_SUBEXPRESSION_ELIMINATION )\n_dagRuleSet.add( new RewriteCommonSubexpressionElimination(true) );\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup applied rewrites in optimization level 1 This patch cleans up an inconsistency of not disabling the rewrites for (1) matrix multiplication chain optimization and (2) the removal of unnecessary casts under optimization level 1, which is supposed to disable all rewrites.
49,736
14.11.2017 15:59:17
28,800
4bc1fea872096e912045c1ea5d2d5e54b3206793
[MINOR] Fixed incorrect memory estimates in Caffe2DML summary for a network with separate label and features data layer Also added a warn message when the user tries to run SystemML with less than local memory budget.
[ { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala", "new_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala", "diff": "@@ -309,18 +309,26 @@ class Caffe2DML(val sc: SparkContext,\ndef getTestAlgo(): String = if (inputs.containsKey(\"$test_algo\")) inputs.get(\"$test_algo\") else \"minibatch\"\nprivate def getMemInBytes(l:CaffeLayer, batchSize:Int, isTraining:Boolean):Long = {\n+ val numLayerInput = if(!l.isInstanceOf[Data]) l.bottomLayerOutputShape._1.toLong * l.bottomLayerOutputShape._2.toLong * l.bottomLayerOutputShape._3.toLong * batchSize else 0\nval numLayerOutput = l.outputShape._1.toLong * l.outputShape._2.toLong * l.outputShape._3.toLong * batchSize\nval numLayerError = numLayerOutput\nval numLayerWeights = if(l.weightShape != null) l.weightShape()(0).toLong * l.weightShape()(1).toLong else 0\nval numLayerBias = if(l.biasShape != null)l.biasShape()(0).toLong * l.biasShape()(1).toLong else 0\nval numLayerGradients = (numLayerWeights + numLayerBias) * batchSize\n- if(isTraining) (numLayerOutput + numLayerError + numLayerWeights + numLayerBias + numLayerGradients)*Double.BYTES\n- else (numLayerOutput + numLayerWeights + numLayerBias)*Double.BYTES\n+ if(isTraining) (numLayerInput + numLayerOutput + numLayerError + numLayerWeights + numLayerBias + numLayerGradients)*Double.BYTES\n+ else (numLayerInput + numLayerOutput + numLayerWeights + numLayerBias)*Double.BYTES\n}\ndef summary(sparkSession: org.apache.spark.sql.SparkSession): Unit = {\nval layers = net.getLayers .map(l => (l, net.getCaffeLayer(l)))\nval numDataLayers = layers.filter(l => l._2.isInstanceOf[Data]).length\n- val batchSize = if(numDataLayers == 1) layers.filter(l => l._2.isInstanceOf[Data]).map(l => l._2.param.getDataParam.getBatchSize).get(0) else -1\n+ val batchSizes = layers.filter(l => l._2.isInstanceOf[Data]).map(l => l._2.param.getDataParam.getBatchSize).distinct\n+ if(batchSizes.size > 1) {\n+ Caffe2DML.LOG.warn(\"Multiple data layers with different batch sizes:\" + batchSizes.mkString(\",\") + \". Using the batch size:\" + batchSizes.get(0))\n+ }\n+ else if(batchSizes.size == 0) {\n+ Caffe2DML.LOG.warn(\"No data layers found and hence ignoring the memory computation.\")\n+ }\n+ val batchSize = if(batchSizes.size > 0) batchSizes.get(0) else -1\nval header = Seq(\"Name\", \"Type\", \"Output\", \"Weight\", \"Bias\", \"Top\", \"Bottom\", \"Memory* (train/test)\")\nval entries = layers\n.map(l => {\n@@ -347,19 +355,7 @@ class Caffe2DML(val sc: SparkContext,\nval crspq = convLayers.map(l => l.numChannels.toLong*l.kernel_h.toLong*l.kernel_w.toLong*l.outputShape._2.toLong*l.outputShape._3.toLong)\nval kpq = convLayers.map(l => l.outputShape._1.toLong*l.outputShape._2.toLong*l.outputShape._3.toLong)\n- if(getTrainAlgo().equals(\"minibatch\") && getTestAlgo().equals(\"minibatch\")) {\n- System.out.println(\"Total number of layer outputs/errors/weights/bias/gradients: \" + numLayerOutput + \"/\" + numLayerError +\n- \"/\" + numLayerWeights + \"/\" + numLayerBias + \"/\" + numLayerGradients)\n- System.out.println(\"Total memory requirements for parameters* for train/test: \" +\n- OptimizerUtils.toMB(layers.map(l => getMemInBytes(l._2, batchSize, true)).sum) + \"/\" +\n- OptimizerUtils.toMB(layers.map(l => getMemInBytes(l._2, batchSize, false)).sum))\n- System.out.println(\"[Advanced] Key network statistics to compute intermediate CP overhead \" +\n- \"batchSize/maxThreads/1-thread im2col*(sum, max)/1-thread reshape_col*(sum, max): \" +\n- batchSize + \"/\" + OptimizerUtils.getConstrainedNumThreads(-1) + \"/(\" +\n- OptimizerUtils.toMB(crspq.sum*Double.BYTES) + \", \" + OptimizerUtils.toMB(crspq.max*Double.BYTES) + \")/(\" +\n- OptimizerUtils.toMB(kpq.sum*Double.BYTES) + \", \" + OptimizerUtils.toMB(kpq.max*Double.BYTES) + \").\")\n- }\n- System.out.println(\"* => memory in megabytes assuming the parameters are in double precision and in dense format.\")\n+ System.out.println(\"* => memory in megabytes assuming the parameters (input, output activations, weights and backpropagation errors) are in double precision and in dense format.\")\n}\n// ================================================================================================\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala", "diff": "package org.apache.sysml.api.ml\n+import org.apache.commons.logging.LogFactory;\nimport org.apache.spark.api.java.JavaSparkContext\nimport org.apache.spark.rdd.RDD\n+\nimport java.io.File\n+\nimport org.apache.spark.SparkContext\nimport org.apache.spark.ml.{ Estimator, Model }\nimport org.apache.spark.sql.types.StructType\n@@ -30,12 +33,17 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock\nimport org.apache.sysml.runtime.DMLRuntimeException\nimport org.apache.sysml.runtime.instructions.spark.utils.{ RDDConverterUtils, RDDConverterUtilsExt }\n+import org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.api.mlcontext._\nimport org.apache.sysml.api.mlcontext.ScriptFactory._\nimport org.apache.spark.sql._\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel\n+import org.apache.sysml.hops.OptimizerUtils;\n+\nimport java.util.HashMap\n+\nimport scala.collection.JavaConversions._\n+\nimport java.util.Random\n/****************************************************\n@@ -118,10 +126,18 @@ trait BaseSystemMLEstimatorOrModel {\ndef setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters1: Int): BaseSystemMLEstimatorOrModel = { statisticsMaxHeavyHitters = statisticsMaxHeavyHitters1; this }\ndef setConfigProperty(key: String, value: String): BaseSystemMLEstimatorOrModel = { config.put(key, value); this }\ndef updateML(ml: MLContext): Unit = {\n+ System.gc();\nml.setGPU(enableGPU); ml.setForceGPU(forceGPU);\nml.setExplain(explain); ml.setExplainLevel(explainLevel);\nml.setStatistics(statistics); ml.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\nconfig.map(x => ml.setConfigProperty(x._1, x._2))\n+ // Since this is an approximate information, the check below only warns the users of unintended side effects\n+ // (for example: holding too many strong references) and is not added as a safeguard.\n+ val freeMem = Runtime.getRuntime().freeMemory();\n+ if(freeMem < OptimizerUtils.getLocalMemBudget()) {\n+ val LOG = LogFactory.getLog(classOf[BaseSystemMLEstimatorOrModel].getName())\n+ LOG.warn(\"SystemML local memory budget:\" + OptimizerUtils.toMB(OptimizerUtils.getLocalMemBudget()) + \" mb. Approximate free memory available on the driver JVM:\" + OptimizerUtils.toMB(freeMem) + \" mb.\");\n+ }\n}\ndef copyProperties(other: BaseSystemMLEstimatorOrModel): BaseSystemMLEstimatorOrModel = {\nother.setGPU(enableGPU); other.setForceGPU(forceGPU);\n@@ -236,6 +252,13 @@ trait BaseSystemMLClassifierModel extends BaseSystemMLEstimatorModel {\n.in(\"C\", C)\n.in(\"H\", H)\n.in(\"W\", W)\n+\n+ System.gc();\n+ val freeMem = Runtime.getRuntime().freeMemory();\n+ if(freeMem < OptimizerUtils.getLocalMemBudget()) {\n+ val LOG = LogFactory.getLog(classOf[BaseSystemMLClassifierModel].getName())\n+ LOG.warn(\"SystemML local memory budget:\" + OptimizerUtils.toMB(OptimizerUtils.getLocalMemBudget()) + \" mb. Approximate free memory abailable:\" + OptimizerUtils.toMB(freeMem));\n+ }\nval ret = (new MLContext(sc)).execute(script1).getMatrix(\"Prediction\").toMatrixBlock\nif (ret.getNumColumns != 1 && H == 1 && W == 1) {\n@@ -251,6 +274,7 @@ trait BaseSystemMLClassifierModel extends BaseSystemMLEstimatorModel {\nval script = getPredictionScript(isSingleNode)\n// Uncomment for debugging\n// ml.setExplainLevel(ExplainLevel.RECOMPILE_RUNTIME)\n+\nval modelPredict = ml.execute(script._1.in(script._2, X, new MatrixMetadata(X.getNumRows, X.getNumColumns, X.getNonZeros)))\nreturn modelPredict.getMatrix(probVar)\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fixed incorrect memory estimates in Caffe2DML summary for a network with separate label and features data layer - Also added a warn message when the user tries to run SystemML with less than local memory budget.
49,738
14.11.2017 17:32:38
28,800
27cabbc4730377d9e8e34d06855106687123c240
New spark order operations w/ multiple order-by cols This patch adds runtime support for distributed spark operations regarding the recently added order w/ multiple order-by columns. We now also enable the related automatic rewrite of consecutive order calls for CP and Spark execution types.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ReorgOp.java", "diff": "@@ -372,7 +372,8 @@ public class ReorgOp extends Hop implements MultiThreadedHop\n}\n}\nelse if( et==ExecType.SPARK ) {\n- boolean sortRewrite = !FORCE_DIST_SORT_INDEXES && isSortSPRewriteApplicable();\n+ boolean sortRewrite = !FORCE_DIST_SORT_INDEXES\n+ && isSortSPRewriteApplicable() && by.getDataType().isScalar();\nLop transform1 = constructCPOrSparkSortLop(input, by, desc, ixret, et, sortRewrite);\nsetOutputDimensions(transform1);\nsetLineNumbers(transform1);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -1509,7 +1509,8 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nif( HopRewriteUtils.isReorg(hi, ReOrgOp.SORT)\n&& hi.getInput().get(1) instanceof LiteralOp //scalar by\n&& hi.getInput().get(2) instanceof LiteralOp //scalar desc\n- && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), false) ) //not ixret\n+ && HopRewriteUtils.isLiteralOfValue(hi.getInput().get(3), false) //not ixret\n+ && !OptimizerUtils.isHadoopExecutionMode() )\n{\nLiteralOp by = (LiteralOp) hi.getInput().get(1);\nboolean desc = HopRewriteUtils.getBooleanValue((LiteralOp)hi.getInput().get(2));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ReorgSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ReorgSPInstruction.java", "diff": "@@ -25,12 +25,14 @@ import java.util.Iterator;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.function.Function;\nimport org.apache.spark.api.java.function.PairFlatMapFunction;\n+import org.apache.spark.api.java.function.PairFunction;\nimport scala.Tuple2;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject.UpdateType;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.functionobjects.DiagIndex;\n@@ -40,6 +42,7 @@ import org.apache.sysml.runtime.functionobjects.SwapIndex;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.spark.functions.FilterDiagBlocksFunction;\n+import org.apache.sysml.runtime.instructions.spark.functions.IsBlockInList;\nimport org.apache.sysml.runtime.instructions.spark.functions.IsBlockInRange;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDAggregateUtils;\nimport org.apache.sysml.runtime.instructions.spark.utils.RDDSortUtils;\n@@ -53,6 +56,7 @@ import org.apache.sysml.runtime.matrix.mapred.IndexedMatrixValue;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.runtime.util.IndexRange;\nimport org.apache.sysml.runtime.util.UtilFunctions;\npublic class ReorgSPInstruction extends UnarySPInstruction {\n@@ -162,33 +166,46 @@ public class ReorgSPInstruction extends UnarySPInstruction {\nboolean desc = ec.getScalarInput(_desc.getName(), _desc.getValueType(), _desc.isLiteral()).getBooleanValue();\nboolean ixret = ec.getScalarInput(_ixret.getName(), _ixret.getValueType(), _ixret.isLiteral()).getBooleanValue();\nboolean singleCol = (mcIn.getCols() == 1);\n+ out = in1;\n- //error handling unsupported operations\n- //TODO additional spark sort runtime with multiple order columns\n- if( cols.length > 1 )\n- LOG.warn(\"Unsupported sort with multiple order-by columns. Falling back first sort column.\");\n- long col = cols[0];\n+ if( cols.length > mcIn.getColsPerBlock() )\n+ LOG.warn(\"Unsupported sort with number of order-by columns large than blocksize: \"+cols.length);\n+ if( singleCol || cols.length==1 ) {\n// extract column (if necessary) and sort\n- out = in1;\n- if( !singleCol ){\n- out = out.filter(new IsBlockInRange(1, mcIn.getRows(), col, col, mcIn))\n- .mapValues(new ExtractColumn((int)UtilFunctions.computeCellInBlock(col, mcIn.getColsPerBlock())));\n- }\n+ if( !singleCol )\n+ out = out.filter(new IsBlockInRange(1, mcIn.getRows(), cols[0], cols[0], mcIn))\n+ .mapValues(new ExtractColumn((int)UtilFunctions.computeCellInBlock(cols[0], mcIn.getColsPerBlock())));\n//actual index/data sort operation\n- if( ixret ) { //sort indexes\n+ if( ixret ) //sort indexes\nout = RDDSortUtils.sortIndexesByVal(out, !desc, mcIn.getRows(), mcIn.getRowsPerBlock());\n- }\n- else if( singleCol && !desc) { //sort single-column matrix\n+ else if( singleCol && !desc) //sort single-column matrix\nout = RDDSortUtils.sortByVal(out, mcIn.getRows(), mcIn.getRowsPerBlock());\n- }\n- else { //sort multi-column matrix\n- if (! _bSortIndInMem)\n+ else if( !_bSortIndInMem ) //sort multi-column matrix w/ rewrite\nout = RDDSortUtils.sortDataByVal(out, in1, !desc, mcIn.getRows(), mcIn.getCols(), mcIn.getRowsPerBlock(), mcIn.getColsPerBlock());\n- else\n+ else //sort multi-column matrix\nout = RDDSortUtils.sortDataByValMemSort(out, in1, !desc, mcIn.getRows(), mcIn.getCols(), mcIn.getRowsPerBlock(), mcIn.getColsPerBlock(), sec, (ReorgOperator) _optr);\n}\n+ else { //general case: multi-column sort\n+ // extract columns (if necessary)\n+ if( cols.length < mcIn.getCols() )\n+ out = out.filter(new IsBlockInList(cols, mcIn))\n+ .mapToPair(new ExtractColumns(cols, mcIn));\n+\n+ // append extracted columns (if necessary)\n+ if( mcIn.getCols() > mcIn.getColsPerBlock() )\n+ out = RDDAggregateUtils.mergeByKey(out);\n+\n+ //actual index/data sort operation\n+ if( ixret ) //sort indexes\n+ out = RDDSortUtils.sortIndexesByVals(out, !desc, mcIn.getRows(), (long)cols.length, mcIn.getRowsPerBlock());\n+ else if( cols.length==mcIn.getCols() && !desc) //sort single-column matrix\n+ out = RDDSortUtils.sortByVals(out, mcIn.getRows(), cols.length, mcIn.getRowsPerBlock());\n+ else //sort multi-column matrix\n+ out = RDDSortUtils.sortDataByVals(out, in1, !desc, mcIn.getRows(), mcIn.getCols(),\n+ cols.length, mcIn.getRowsPerBlock(), mcIn.getColsPerBlock());\n+ }\n}\nelse {\nthrow new DMLRuntimeException(\"Error: Incorrect opcode in ReorgSPInstruction:\" + opcode);\n@@ -323,5 +340,34 @@ public class ReorgSPInstruction extends UnarySPInstruction {\nreturn arg0.sliceOperations(0, arg0.getNumRows()-1, _col, _col, new MatrixBlock());\n}\n}\n+\n+ private static class ExtractColumns implements PairFunction<Tuple2<MatrixIndexes, MatrixBlock>,MatrixIndexes,MatrixBlock>\n+ {\n+ private static final long serialVersionUID = 2902729186431711506L;\n+\n+ private final long[] _cols;\n+ private final int _brlen, _bclen;\n+\n+ public ExtractColumns(long[] cols, MatrixCharacteristics mc) {\n+ _cols = cols;\n+ _brlen = mc.getRowsPerBlock();\n+ _bclen = mc.getColsPerBlock();\n+ }\n+\n+ public Tuple2<MatrixIndexes, MatrixBlock> call(Tuple2<MatrixIndexes, MatrixBlock> arg0)\n+ throws Exception\n+ {\n+ MatrixIndexes ix = arg0._1();\n+ MatrixBlock in = arg0._2();\n+ MatrixBlock out = new MatrixBlock(in.getNumRows(), _cols.length, true);\n+ for(int i=0; i<_cols.length; i++)\n+ if( UtilFunctions.isInBlockRange(ix, _brlen, _bclen, new IndexRange(1, Long.MAX_VALUE, _cols[i], _cols[i])) ) {\n+ int index = UtilFunctions.computeCellInBlock(_cols[i], _bclen);\n+ out.leftIndexingOperations(in.sliceOperations(0, in.getNumRows()-1, index, index, new MatrixBlock()),\n+ 0, in.getNumRows()-1, i, i, out, UpdateType.INPLACE);\n+ }\n+ return new Tuple2<>(new MatrixIndexes(ix.getRowIndex(), 1), out);\n+ }\n+ }\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/functions/IsBlockInList.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.spark.functions;\n+\n+import org.apache.spark.api.java.function.Function;\n+\n+import scala.Tuple2;\n+\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n+import org.apache.sysml.runtime.util.UtilFunctions;\n+\n+public class IsBlockInList implements Function<Tuple2<MatrixIndexes,MatrixBlock>, Boolean>\n+{\n+ private static final long serialVersionUID = -1956151588590369875L;\n+\n+ private final long[] _cols;\n+ private final int _brlen, _bclen;\n+\n+ public IsBlockInList(long[] cols, MatrixCharacteristics mc) {\n+ _cols = cols;\n+ _brlen = mc.getRowsPerBlock();\n+ _bclen = mc.getColsPerBlock();\n+ }\n+\n+ @Override\n+ public Boolean call(Tuple2<MatrixIndexes, MatrixBlock> kv)\n+ throws Exception\n+ {\n+ for( int i=0; i<_cols.length; i++ )\n+ if( UtilFunctions.isInBlockRange(kv._1(), _brlen, _bclen, 1, Long.MAX_VALUE, _cols[i], _cols[i]) )\n+ return true;\n+ return false;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/functions/IsBlockInRange.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/functions/IsBlockInRange.java", "diff": "@@ -30,7 +30,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\npublic class IsBlockInRange implements Function<Tuple2<MatrixIndexes,MatrixBlock>, Boolean>\n{\n-\nprivate static final long serialVersionUID = 5849687296021280540L;\nprivate long _rl; long _ru; long _cl; long _cu;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java", "diff": "@@ -35,16 +35,19 @@ import org.apache.spark.broadcast.Broadcast;\nimport scala.Tuple2;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject.UpdateType;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.functionobjects.SortIndex;\nimport org.apache.sysml.runtime.instructions.spark.data.PartitionedBlock;\nimport org.apache.sysml.runtime.instructions.spark.data.RowMatrixBlock;\nimport org.apache.sysml.runtime.instructions.spark.functions.ReplicateVectorFunction;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.runtime.util.SortUtils;\nimport org.apache.sysml.runtime.util.UtilFunctions;\npublic class RDDSortUtils\n@@ -93,6 +96,28 @@ public class RDDSortUtils\nreturn ret;\n}\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sortByVals(\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> in, long rlen, long clen, int brlen )\n+ {\n+ //create value-index rdd from inputs\n+ JavaRDD<MatrixBlock> dvals = in.values()\n+ .flatMap(new ExtractRowsFunction());\n+\n+ //sort (creates sorted range per partition)\n+ int numPartitions = SparkUtils.getNumPreferredPartitions(\n+ new MatrixCharacteristics(rlen, clen, brlen, brlen), in);\n+ JavaRDD<MatrixBlock> sdvals = dvals\n+ .sortBy(new CreateDoubleKeysFunction(), true, numPartitions);\n+\n+ //create binary block output\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> ret = sdvals\n+ .zipWithIndex()\n+ .mapPartitionsToPair(new ConvertToBinaryBlockFunction5(rlen, brlen));\n+ ret = RDDAggregateUtils.mergeByKey(ret, false);\n+\n+ return ret;\n+ }\n+\npublic static JavaPairRDD<MatrixIndexes, MatrixBlock> sortIndexesByVal( JavaPairRDD<MatrixIndexes, MatrixBlock> val,\nboolean asc, long rlen, int brlen )\n{\n@@ -116,6 +141,29 @@ public class RDDSortUtils\nreturn ret;\n}\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sortIndexesByVals( JavaPairRDD<MatrixIndexes, MatrixBlock> in,\n+ boolean asc, long rlen, long clen, int brlen )\n+ {\n+ //create value-index rdd from inputs\n+ JavaPairRDD<ValuesIndexPair, double[]> dvals = in\n+ .flatMapToPair(new ExtractDoubleValuesWithIndexFunction2(brlen));\n+\n+ //sort (creates sorted range per partition)\n+ int numPartitions = SparkUtils.getNumPreferredPartitions(\n+ new MatrixCharacteristics(rlen, clen+1, brlen, brlen));\n+ JavaRDD<ValuesIndexPair> sdvals = dvals\n+ .sortByKey(new IndexComparator2(asc), true, numPartitions)\n+ .keys(); //workaround for index comparator\n+\n+ //create binary block output\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> ret = sdvals\n+ .zipWithIndex()\n+ .mapPartitionsToPair(new ConvertToBinaryBlockFunction6(rlen, brlen));\n+ ret = RDDAggregateUtils.mergeByKey(ret, false);\n+\n+ return ret;\n+ }\n+\npublic static JavaPairRDD<MatrixIndexes, MatrixBlock> sortDataByVal( JavaPairRDD<MatrixIndexes, MatrixBlock> val,\nJavaPairRDD<MatrixIndexes, MatrixBlock> data, boolean asc, long rlen, long clen, int brlen, int bclen )\n{\n@@ -131,7 +179,6 @@ public class RDDSortUtils\n.keys(); //workaround for index comparator\n//create target indexes by original index\n- long numRep = (long)Math.ceil((double)clen/bclen);\nJavaPairRDD<MatrixIndexes, MatrixBlock> ixmap = sdvals\n.zipWithIndex()\n.mapToPair(new ExtractIndexFunction())\n@@ -139,7 +186,40 @@ public class RDDSortUtils\n.mapPartitionsToPair(new ConvertToBinaryBlockFunction4(rlen, brlen));\nixmap = RDDAggregateUtils.mergeByKey(ixmap, false);\n+ //actual data sort\n+ return sortDataByIx(data, ixmap, rlen, clen, brlen, bclen);\n+ }\n+\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sortDataByVals( JavaPairRDD<MatrixIndexes, MatrixBlock> val,\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> data, boolean asc, long rlen, long clen, long clen2, int brlen, int bclen )\n+ {\n+ //create value-index rdd from inputs\n+ JavaPairRDD<ValuesIndexPair, double[]> dvals = val\n+ .flatMapToPair(new ExtractDoubleValuesWithIndexFunction2(brlen));\n+\n+ //sort (creates sorted range per partition)\n+ int numPartitions = SparkUtils.getNumPreferredPartitions(\n+ new MatrixCharacteristics(rlen, clen2+1, brlen, brlen));\n+ JavaRDD<ValuesIndexPair> sdvals = dvals\n+ .sortByKey(new IndexComparator2(asc), true, numPartitions)\n+ .keys(); //workaround for index comparator\n+\n+ //create target indexes by original index\n+ JavaPairRDD<MatrixIndexes, MatrixBlock> ixmap = sdvals\n+ .zipWithIndex()\n+ .mapToPair(new ExtractIndexFunction2())\n+ .sortByKey()\n+ .mapPartitionsToPair(new ConvertToBinaryBlockFunction4(rlen, brlen));\n+ ixmap = RDDAggregateUtils.mergeByKey(ixmap, false);\n+\n+ //actual data sort\n+ return sortDataByIx(data, ixmap, rlen, clen, brlen, bclen);\n+ }\n+\n+ public static JavaPairRDD<MatrixIndexes, MatrixBlock> sortDataByIx(JavaPairRDD<MatrixIndexes,MatrixBlock> data,\n+ JavaPairRDD<MatrixIndexes,MatrixBlock> ixmap, long rlen, long clen, int brlen, int bclen) {\n//replicate indexes for all column blocks\n+ long numRep = (long)Math.ceil((double)clen/bclen);\nJavaPairRDD<MatrixIndexes, MatrixBlock> rixmap = ixmap\n.flatMapToPair(new ReplicateVectorFunction(false, numRep));\n@@ -200,12 +280,25 @@ public class RDDSortUtils\n@Override\npublic Iterator<Double> call(MatrixBlock arg0)\n- throws Exception\n- {\n+ throws Exception {\nreturn DataConverter.convertToDoubleList(arg0).iterator();\n}\n}\n+ private static class ExtractRowsFunction implements FlatMapFunction<MatrixBlock,MatrixBlock>\n+ {\n+ private static final long serialVersionUID = -2786968469468554974L;\n+\n+ @Override\n+ public Iterator<MatrixBlock> call(MatrixBlock arg0)\n+ throws Exception {\n+ ArrayList<MatrixBlock> rows = new ArrayList<>();\n+ for(int i=0; i<arg0.getNumRows(); i++)\n+ rows.add(arg0.sliceOperations(i, i, 0, arg0.getNumColumns()-1, new MatrixBlock()));\n+ return rows.iterator();\n+ }\n+ }\n+\nprivate static class ExtractDoubleValuesFunction2 implements FlatMapFunction<Tuple2<MatrixBlock,MatrixBlock>,DoublePair>\n{\nprivate static final long serialVersionUID = 2132672563825289022L;\n@@ -257,6 +350,35 @@ public class RDDSortUtils\n}\n}\n+ private static class ExtractDoubleValuesWithIndexFunction2 implements PairFlatMapFunction<Tuple2<MatrixIndexes,MatrixBlock>,ValuesIndexPair,double[]>\n+ {\n+ private static final long serialVersionUID = 8358254634903633283L;\n+\n+ private final int _brlen;\n+\n+ public ExtractDoubleValuesWithIndexFunction2(int brlen) {\n+ _brlen = brlen;\n+ }\n+\n+ @Override\n+ public Iterator<Tuple2<ValuesIndexPair,double[]>> call(Tuple2<MatrixIndexes,MatrixBlock> arg0)\n+ throws Exception\n+ {\n+ ArrayList<Tuple2<ValuesIndexPair,double[]>> ret = new ArrayList<>();\n+ MatrixIndexes ix = arg0._1();\n+ MatrixBlock mb = arg0._2();\n+\n+ long ixoffset = (ix.getRowIndex()-1)*_brlen;\n+ for( int i=0; i<mb.getNumRows(); i++) {\n+ double[] vals = DataConverter.convertToDoubleVector(\n+ mb.sliceOperations(i, i, 0, mb.getNumColumns()-1, new MatrixBlock()));\n+ ret.add(new Tuple2<>(new ValuesIndexPair(vals,ixoffset+i+1), vals));\n+ }\n+\n+ return ret.iterator();\n+ }\n+ }\n+\nprivate static class CreateDoubleKeyFunction implements Function<Double,Double>\n{\nprivate static final long serialVersionUID = 2021786334763247835L;\n@@ -281,17 +403,32 @@ public class RDDSortUtils\n}\n}\n- private static class ExtractIndexFunction implements PairFunction<Tuple2<ValueIndexPair,Long>,Long,Long>\n+ private static class CreateDoubleKeysFunction implements Function<MatrixBlock,double[]>\n{\n+ private static final long serialVersionUID = 4316858496746520340L;\n+\n+ @Override\n+ public double[] call(MatrixBlock row) throws Exception {\n+ return DataConverter.convertToDoubleVector(row);\n+ }\n+ }\n+\n+ private static class ExtractIndexFunction implements PairFunction<Tuple2<ValueIndexPair,Long>,Long,Long> {\nprivate static final long serialVersionUID = -4553468724131249535L;\n@Override\n- public Tuple2<Long, Long> call(Tuple2<ValueIndexPair,Long> arg0)\n- throws Exception\n- {\n+ public Tuple2<Long, Long> call(Tuple2<ValueIndexPair,Long> arg0) throws Exception {\nreturn new Tuple2<>(arg0._1().ix, arg0._2());\n}\n+ }\n+\n+ private static class ExtractIndexFunction2 implements PairFunction<Tuple2<ValuesIndexPair,Long>,Long,Long> {\n+ private static final long serialVersionUID = -1366455446597907270L;\n+ @Override\n+ public Tuple2<Long, Long> call(Tuple2<ValuesIndexPair,Long> arg0) throws Exception {\n+ return new Tuple2<>(arg0._1().ix, arg0._2());\n+ }\n}\nprivate static class ConvertToBinaryBlockFunction implements PairFlatMapFunction<Iterator<Tuple2<Double,Long>>,MatrixIndexes,MatrixBlock>\n@@ -485,6 +622,98 @@ public class RDDSortUtils\n}\n}\n+ private static class ConvertToBinaryBlockFunction5 implements PairFlatMapFunction<Iterator<Tuple2<MatrixBlock,Long>>,MatrixIndexes,MatrixBlock>\n+ {\n+ private static final long serialVersionUID = 6357994683868091724L;\n+\n+ private long _rlen = -1;\n+ private int _brlen = -1;\n+\n+ public ConvertToBinaryBlockFunction5(long rlen, int brlen)\n+ {\n+ _rlen = rlen;\n+ _brlen = brlen;\n+ }\n+\n+ public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Iterator<Tuple2<MatrixBlock,Long>> arg0)\n+ throws Exception\n+ {\n+ ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret = new ArrayList<>();\n+ MatrixIndexes ix = null;\n+ MatrixBlock mb = null;\n+\n+ while( arg0.hasNext() )\n+ {\n+ Tuple2<MatrixBlock,Long> val = arg0.next();\n+ long valix = val._2 + 1;\n+ long rix = UtilFunctions.computeBlockIndex(valix, _brlen);\n+ int pos = UtilFunctions.computeCellInBlock(valix, _brlen);\n+\n+ if( ix == null || ix.getRowIndex() != rix ) {\n+ if( ix !=null )\n+ ret.add(new Tuple2<>(ix,mb));\n+ long len = UtilFunctions.computeBlockSize(_rlen, rix, _brlen);\n+ ix = new MatrixIndexes(rix,1);\n+ mb = new MatrixBlock((int)len, val._1.getNumColumns(), false);\n+ }\n+\n+ mb.leftIndexingOperations(val._1, pos, pos, 0, val._1.getNumColumns()-1, mb, UpdateType.INPLACE);\n+ }\n+\n+ //flush last block\n+ if( mb!=null && mb.getNonZeros() != 0 )\n+ ret.add(new Tuple2<>(ix,mb));\n+ return ret.iterator();\n+ }\n+ }\n+\n+ private static class ConvertToBinaryBlockFunction6 implements PairFlatMapFunction<Iterator<Tuple2<ValuesIndexPair,Long>>,MatrixIndexes,MatrixBlock>\n+ {\n+ private static final long serialVersionUID = 5351649694631911694L;\n+\n+ private long _rlen = -1;\n+ private int _brlen = -1;\n+\n+ public ConvertToBinaryBlockFunction6(long rlen, int brlen)\n+ {\n+ _rlen = rlen;\n+ _brlen = brlen;\n+ }\n+\n+ public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Iterator<Tuple2<ValuesIndexPair,Long>> arg0)\n+ throws Exception\n+ {\n+ ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret = new ArrayList<>();\n+\n+ MatrixIndexes ix = null;\n+ MatrixBlock mb = null;\n+\n+ while( arg0.hasNext() )\n+ {\n+ Tuple2<ValuesIndexPair,Long> val = arg0.next();\n+ long valix = val._2 + 1;\n+ long rix = UtilFunctions.computeBlockIndex(valix, _brlen);\n+ int pos = UtilFunctions.computeCellInBlock(valix, _brlen);\n+\n+ if( ix == null || ix.getRowIndex() != rix ) {\n+ if( ix !=null )\n+ ret.add(new Tuple2<>(ix,mb));\n+ long len = UtilFunctions.computeBlockSize(_rlen, rix, _brlen);\n+ ix = new MatrixIndexes(rix,1);\n+ mb = new MatrixBlock((int)len, 1, false);\n+ }\n+\n+ mb.quickSetValue(pos, 0, val._1.ix);\n+ }\n+\n+ //flush last block\n+ if( mb!=null && mb.getNonZeros() != 0 )\n+ ret.add(new Tuple2<>(ix,mb));\n+\n+ return ret.iterator();\n+ }\n+ }\n+\nprivate static class ShuffleMatrixBlockRowsFunction implements PairFlatMapFunction<Iterator<Tuple2<MatrixIndexes,Tuple2<MatrixBlock,MatrixBlock>>>,MatrixIndexes,RowMatrixBlock>\n{\nprivate static final long serialVersionUID = 6885207719329119646L;\n@@ -690,6 +919,19 @@ public class RDDSortUtils\n}\n}\n+ private static class ValuesIndexPair implements Serializable\n+ {\n+ private static final long serialVersionUID = 4297433409147784971L;\n+\n+ public double[] vals;\n+ public long ix;\n+\n+ public ValuesIndexPair(double[] dvals, long lix) {\n+ vals = dvals;\n+ ix = lix;\n+ }\n+ }\n+\npublic static class IndexComparator implements Comparator<ValueIndexPair>, Serializable\n{\nprivate static final long serialVersionUID = 5154839870549241343L;\n@@ -700,18 +942,32 @@ public class RDDSortUtils\n}\n@Override\n- public int compare(ValueIndexPair o1, ValueIndexPair o2)\n- {\n- //note: use conversion to Double and Long instead of native\n- //compare for compatibility with jdk 6\n- int retVal = Double.valueOf(o1.val).compareTo(o2.val);\n- if(retVal != 0) {\n+ public int compare(ValueIndexPair o1, ValueIndexPair o2) {\n+ int retVal = Double.compare(o1.val, o2.val);\n+ if(retVal != 0)\nreturn (_asc ? retVal : -1*retVal);\n+ else //for stable sort\n+ return Long.compare(o1.ix, o2.ix);\n}\n- else {\n- //for stable sort\n- return Long.valueOf(o1.ix).compareTo(o2.ix);\n}\n+\n+ public static class IndexComparator2 implements Comparator<ValuesIndexPair>, Serializable\n+ {\n+ private static final long serialVersionUID = 5531987863790922691L;\n+\n+ private boolean _asc;\n+ public IndexComparator2(boolean asc) {\n+ _asc = asc;\n+ }\n+\n+ @Override\n+ public int compare(ValuesIndexPair o1, ValuesIndexPair o2)\n+ {\n+ int retVal = SortUtils.compare(o1.vals, o2.vals);\n+ if(retVal != 0)\n+ return (_asc ? retVal : -1*retVal);\n+ else //for stable sort\n+ return Long.compare(o1.ix, o2.ix);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/SortUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/SortUtils.java", "diff": "@@ -51,6 +51,18 @@ public class SortUtils\nisSorted(0, in.getNumRows()*in.getNumColumns(), in.getDenseBlock());\n}\n+ public static int compare(double[] d1, double[] d2) {\n+ if( d1 == null || d2 == null )\n+ throw new RuntimeException(\"Invalid invocation w/ null parameter.\");\n+ int ret = Long.compare(d1.length, d2.length);\n+ if( ret != 0 ) return ret;\n+ for(int i=0; i<d1.length; i++) {\n+ ret = Double.compare(d1[i], d2[i]);\n+ if( ret != 0 ) return ret;\n+ }\n+ return 0;\n+ }\n+\n/**\n* In-place sort of two arrays, only indexes is used for comparison and values\n* of same position are sorted accordingly.\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/reorg/MultipleOrderByColsTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/reorg/MultipleOrderByColsTest.java", "diff": "@@ -114,46 +114,45 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\nrunOrderTest(TEST_NAME2, true, true, false, ExecType.CP);\n}\n-//TODO enable together with additional spark sort runtime\n-// @Test\n-// public void testOrderDenseAscDataSP() {\n-// runOrderTest(TEST_NAME1, false, false, false, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderDenseAscIxSP() {\n-// runOrderTest(TEST_NAME1, false, false, true, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderDenseDescDataSP() {\n-// runOrderTest(TEST_NAME1, false, true, false, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderDenseDescIxSP() {\n-// runOrderTest(TEST_NAME1, false, true, true, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderSparseAscDataSP() {\n-// runOrderTest(TEST_NAME1, true, false, false, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderSparseAscIxSP() {\n-// runOrderTest(TEST_NAME1, true, false, true, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderSparseDescDataSP() {\n-// runOrderTest(TEST_NAME1, true, true, false, ExecType.SPARK);\n-// }\n-//\n-// @Test\n-// public void testOrderSparseDescIxSP() {\n-// runOrderTest(TEST_NAME1, true, true, true, ExecType.SPARK);\n-// }\n+ @Test\n+ public void testOrderDenseAscDataSP() {\n+ runOrderTest(TEST_NAME1, false, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderDenseAscIxSP() {\n+ runOrderTest(TEST_NAME1, false, false, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderDenseDescDataSP() {\n+ runOrderTest(TEST_NAME1, false, true, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderDenseDescIxSP() {\n+ runOrderTest(TEST_NAME1, false, true, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderSparseAscDataSP() {\n+ runOrderTest(TEST_NAME1, true, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderSparseAscIxSP() {\n+ runOrderTest(TEST_NAME1, true, false, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderSparseDescDataSP() {\n+ runOrderTest(TEST_NAME1, true, true, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrderSparseDescIxSP() {\n+ runOrderTest(TEST_NAME1, true, true, true, ExecType.SPARK);\n+ }\nprivate void runOrderTest( String testname, boolean sparse, boolean desc, boolean ixret, ExecType et)\n{\n@@ -161,11 +160,11 @@ public class MultipleOrderByColsTest extends AutomatedTestBase\nswitch( et ){\ncase MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\ncase SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n- default: rtplatform = RUNTIME_PLATFORM.HYBRID; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n}\nboolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n- if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\nDMLScript.USE_LOCAL_SPARK_CONFIG = true;\ntry\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2007] New spark order operations w/ multiple order-by cols This patch adds runtime support for distributed spark operations regarding the recently added order w/ multiple order-by columns. We now also enable the related automatic rewrite of consecutive order calls for CP and Spark execution types.
49,738
14.11.2017 18:13:45
28,800
5df6ab6dd3f2d879a98d5adcc7dd98fba6245cf2
[MINOR] Performance sparse cbind/rbind (shallow copy on empty rhs)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -3545,6 +3545,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nfinal int m = cbind ? rlen : rlen+Arrays.stream(that).mapToInt(mb -> mb.rlen).sum();\nfinal int n = cbind ? clen+Arrays.stream(that).mapToInt(mb -> mb.clen).sum() : clen;\nfinal long nnz = nonZeros+Arrays.stream(that).mapToLong(mb -> mb.nonZeros).sum();\n+ boolean shallowCopy = (nonZeros == nnz);\nboolean sp = evalSparseFormatInMemory(m, n, nnz);\n//init result matrix\n@@ -3577,7 +3578,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//adjust sparse rows if required\nresult.allocateSparseRowsBlock();\n//allocate sparse rows once for cbind\n- if( cbind && nnz > rlen && result.getSparseBlock() instanceof SparseBlockMCSR ) {\n+ if( cbind && nnz > rlen && !shallowCopy && result.getSparseBlock() instanceof SparseBlockMCSR ) {\nSparseBlock sblock = result.getSparseBlock();\nfor( int i=0; i<result.rlen; i++ ) {\nfinal int row = i; //workaround for lambda compile issue\n@@ -3588,7 +3589,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n//core append operation\n- result.appendToSparse(this, 0, 0);\n+ result.appendToSparse(this, 0, 0, !shallowCopy);\nif( cbind ) {\nfor(int i=0, off=clen; i<that.length; i++) {\nresult.appendToSparse(that[i], 0, off);\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance sparse cbind/rbind (shallow copy on empty rhs)
49,736
14.11.2017 22:10:09
28,800
62b5725d82c5a24f05d6178a7498a9124746bf36
Allow user to specify custom path to BLAS libraries This feature is useful in the cloud environment where the user doesnot have sudo permission or where setting environment variables such as LD_LIBRARY_PATH is difficult.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "diff": "@@ -48,6 +48,7 @@ public class NativeHelper {\npublic static String blasType;\nprivate static int maxNumThreads = -1;\nprivate static boolean setMaxNumThreads = false;\n+ private static String customLibPath = null;\nstatic {\n// Note: we only support 64 bit Java on x86 and AMD machine\nsupportedArchitectures.put(\"x86_64\", \"x86_64\");\n@@ -58,15 +59,20 @@ public class NativeHelper {\nprivate static String hintOnFailures = \"\";\n+ public static void setBLASPath(String path) {\n+ customLibPath = path;\n+ init(true);\n+ }\n+\n// Performing loading in a method instead of a static block will throw a detailed stack trace in case of fatal errors\n- private static void init() {\n+ private static void init(boolean forcedInit) {\n// Only Linux supported for BLAS\nif(!SystemUtils.IS_OS_LINUX)\nreturn;\n// attemptedLoading variable ensures that we don't try to load SystemML and other dependencies\n// again and again especially in the parfor (hence the double-checking with synchronized).\n- if(!attemptedLoading) {\n+ if(!attemptedLoading || forcedInit) {\nDMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n// -------------------------------------------------------------------------------------\n// We allow BLAS to be enabled or disabled or explicitly selected in one of the two ways:\n@@ -85,7 +91,7 @@ public class NativeHelper {\nreturn;\n}\nsynchronized(NativeHelper.class) {\n- if(!attemptedLoading) {\n+ if(!attemptedLoading || forcedInit) {\n// -----------------------------------------------------------------------------\n// =============================================================================\n// By default, we will native.blas=true and we will attempt to load MKL first.\n@@ -152,7 +158,7 @@ public class NativeHelper {\n}\npublic static boolean isNativeLibraryLoaded() {\n- init();\n+ init(false);\nif(maxNumThreads == -1)\nmaxNumThreads = OptimizerUtils.getConstrainedNumThreads(-1);\nif(isSystemMLLoaded && !setMaxNumThreads && maxNumThreads != -1) {\n@@ -183,6 +189,22 @@ public class NativeHelper {\n}\nprivate static boolean loadBLAS(String blas, String optionalMsg) {\n+ // First attempt to load from custom library path\n+ if(customLibPath != null) {\n+ String libPath = customLibPath + File.separator + System.mapLibraryName(blas);\n+ try {\n+ System.load(libPath);\n+ // Print to stdout as this feature is intended for cloud environment\n+ System.out.println(\"Loaded the library:\" + libPath);\n+ return true;\n+ }\n+ catch (UnsatisfiedLinkError e1) {\n+ // Print to stdout as this feature is intended for cloud environment\n+ System.out.println(\"Unable to load \" + libPath + \":\" + e1.getMessage());\n+ }\n+ }\n+\n+ // Then try loading using loadLibrary\ntry {\nSystem.loadLibrary(blas);\nreturn true;\n@@ -198,6 +220,7 @@ public class NativeHelper {\n}\n}\n+\nprivate static boolean loadLibraryHelper(String path) {\nInputStream in = null; OutputStream out = null;\ntry {\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mlcontext.py", "new_path": "src/main/python/systemml/mlcontext.py", "diff": "# Methods to create Script object\nscript_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]\n# Utility methods\n-util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG' ]\n+util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG', 'setBLASPath' ]\n__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods\nimport os\n@@ -64,6 +64,24 @@ def _get_spark_context():\nelse:\nraise Exception('Expected spark context to be created.')\n+\n+\n+def setBLASPath(path):\n+ \"\"\"\n+ This method useful in the cloud environment where the user\n+ doesnot have sudo permission or where setting environment variables\n+ such as LD_LIBRARY_PATH is difficult.\n+\n+ Parameters\n+ ----------\n+ path: String\n+ Custom path where the BLAS libraries where located.\n+ \"\"\"\n+ sc = _get_spark_context()\n+ sc._jvm.org.apache.sysml.utils.NativeHelper.setBLASPath(path)\n+\n+\n+\n# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook\n# Example usage:\n# with jvm_stdout():\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1630] Allow user to specify custom path to BLAS libraries - This feature is useful in the cloud environment where the user doesnot have sudo permission or where setting environment variables such as LD_LIBRARY_PATH is difficult.
49,736
15.11.2017 10:04:34
28,800
2f87565d8ce5ad385870b8af12f3c3fda53470f2
[MINOR] Updated setBLASPath to provide the BLAS type along with the path
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "diff": "@@ -48,7 +48,6 @@ public class NativeHelper {\npublic static String blasType;\nprivate static int maxNumThreads = -1;\nprivate static boolean setMaxNumThreads = false;\n- private static String customLibPath = null;\nstatic {\n// Note: we only support 64 bit Java on x86 and AMD machine\nsupportedArchitectures.put(\"x86_64\", \"x86_64\");\n@@ -59,31 +58,24 @@ public class NativeHelper {\nprivate static String hintOnFailures = \"\";\n- public static void setBLASPath(String path) {\n- customLibPath = path;\n- init(true);\n+ public static void setBLASPath(String customLibPath, String userSpecifiedBLAS) {\n+ init(customLibPath, userSpecifiedBLAS);\n}\n// Performing loading in a method instead of a static block will throw a detailed stack trace in case of fatal errors\n- private static void init(boolean forcedInit) {\n+ private static void init(String customLibPath, String userSpecifiedBLAS) {\n// Only Linux supported for BLAS\nif(!SystemUtils.IS_OS_LINUX)\nreturn;\n// attemptedLoading variable ensures that we don't try to load SystemML and other dependencies\n// again and again especially in the parfor (hence the double-checking with synchronized).\n- if(!attemptedLoading || forcedInit) {\n- DMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n+ if(!attemptedLoading || customLibPath != null) {\n// -------------------------------------------------------------------------------------\n- // We allow BLAS to be enabled or disabled or explicitly selected in one of the two ways:\n- // 1. DML Configuration: native.blas (boolean flag)\n- // 2. Environment variable: SYSTEMML_BLAS (can be set to mkl, openblas or none)\n- // The option 1 will be removed in later SystemML versions.\n- // The option 2 is useful for two reasons:\n- // - Developer testing of different BLAS\n- // - Provides fine-grained control. Certain machines could use mkl while others use openblas, etc.\n- String userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n-\n+ if(userSpecifiedBLAS == null) {\n+ DMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n+ userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n+ }\nif(userSpecifiedBLAS.equals(\"auto\") || userSpecifiedBLAS.equals(\"mkl\") || userSpecifiedBLAS.equals(\"openblas\")) {\nlong start = System.nanoTime();\nif(!supportedArchitectures.containsKey(SystemUtils.OS_ARCH)) {\n@@ -91,24 +83,24 @@ public class NativeHelper {\nreturn;\n}\nsynchronized(NativeHelper.class) {\n- if(!attemptedLoading || forcedInit) {\n+ if(!attemptedLoading || customLibPath != null) {\n// -----------------------------------------------------------------------------\n// =============================================================================\n// By default, we will native.blas=true and we will attempt to load MKL first.\n// If MKL is not enabled then we try to load OpenBLAS.\n// If both MKL and OpenBLAS are not available we fall back to Java BLAS.\nif(userSpecifiedBLAS.equals(\"auto\")) {\n- blasType = isMKLAvailable() ? \"mkl\" : isOpenBLASAvailable() ? \"openblas\" : null;\n+ blasType = isMKLAvailable(customLibPath) ? \"mkl\" : isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load either MKL or OpenBLAS due to \" + hintOnFailures);\n}\nelse if(userSpecifiedBLAS.equals(\"mkl\")) {\n- blasType = isMKLAvailable() ? \"mkl\" : null;\n+ blasType = isMKLAvailable(customLibPath) ? \"mkl\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load MKL due to \" + hintOnFailures);\n}\nelse if(userSpecifiedBLAS.equals(\"openblas\")) {\n- blasType = isOpenBLASAvailable() ? \"openblas\" : null;\n+ blasType = isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load OpenBLAS due to \" + hintOnFailures);\n}\n@@ -158,7 +150,14 @@ public class NativeHelper {\n}\npublic static boolean isNativeLibraryLoaded() {\n- init(false);\n+ // We allow BLAS to be enabled or disabled or explicitly selected in one of the two ways:\n+ // 1. DML Configuration: native.blas (boolean flag)\n+ // 2. Environment variable: SYSTEMML_BLAS (can be set to mkl, openblas or none)\n+ // The option 1 will be removed in later SystemML versions.\n+ // The option 2 is useful for two reasons:\n+ // - Developer testing of different BLAS\n+ // - Provides fine-grained control. Certain machines could use mkl while others use openblas, etc.\n+ init(null, null);\nif(maxNumThreads == -1)\nmaxNumThreads = OptimizerUtils.getConstrainedNumThreads(-1);\nif(isSystemMLLoaded && !setMaxNumThreads && maxNumThreads != -1) {\n@@ -178,17 +177,17 @@ public class NativeHelper {\n}\n- private static boolean isMKLAvailable() {\n- return loadBLAS(\"mkl_rt\", null);\n+ private static boolean isMKLAvailable(String customLibPath) {\n+ return loadBLAS(customLibPath, \"mkl_rt\", null);\n}\n- private static boolean isOpenBLASAvailable() {\n- if(!loadBLAS(\"gomp\", \"gomp required for loading OpenBLAS-enabled SystemML library\"))\n+ private static boolean isOpenBLASAvailable(String customLibPath) {\n+ if(!loadBLAS(customLibPath, \"gomp\", \"gomp required for loading OpenBLAS-enabled SystemML library\"))\nreturn false;\n- return loadBLAS(\"openblas\", null);\n+ return loadBLAS(customLibPath, \"openblas\", null);\n}\n- private static boolean loadBLAS(String blas, String optionalMsg) {\n+ private static boolean loadBLAS(String customLibPath, String blas, String optionalMsg) {\n// First attempt to load from custom library path\nif(customLibPath != null) {\nString libPath = customLibPath + File.separator + System.mapLibraryName(blas);\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mlcontext.py", "new_path": "src/main/python/systemml/mlcontext.py", "diff": "@@ -66,7 +66,7 @@ def _get_spark_context():\n-def setBLASPath(path):\n+def setBLASPath(path, blas='auto'):\n\"\"\"\nThis method useful in the cloud environment where the user\ndoesnot have sudo permission or where setting environment variables\n@@ -75,10 +75,13 @@ def setBLASPath(path):\nParameters\n----------\npath: String\n- Custom path where the BLAS libraries where located.\n+ Custom path to the directory where the BLAS shared libraries are located.\n+\n+ blas: String\n+ Can be auto, openblas or mkl\n\"\"\"\nsc = _get_spark_context()\n- sc._jvm.org.apache.sysml.utils.NativeHelper.setBLASPath(path)\n+ sc._jvm.org.apache.sysml.utils.NativeHelper.setBLASPath(str(path), blas)\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Updated setBLASPath to provide the BLAS type along with the path
49,736
15.11.2017 12:47:40
28,800
1336d32a08d2458476be6bc38fa42367e27c3918
[MINOR] Integrated setting of blas directory with SystemML configuration property
[ { "change_type": "MODIFY", "old_path": "conf/SystemML-config.xml.template", "new_path": "conf/SystemML-config.xml.template", "diff": "<!-- enables native blas for matrix multiplication and convolution, experimental feature (options: auto, mkl, openblas, none) -->\n<sysml.native.blas>none</sysml.native.blas>\n+ <!-- custom directory where BLAS libraries are available, experimental feature (options: absolute directory path or none). If set to none, we use standard LD_LIBRARY_PATH. -->\n+ <sysml.native.blas.directory>none</sysml.native.blas.directory>\n+\n<!-- prints finegrained statistics information (includes extra GPU information and extra statistics information for Deep Neural Networks done in CP mode) -->\n<sysml.stats.finegrained>false</sysml.stats.finegrained>\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -30,6 +30,7 @@ import org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\n+import org.apache.sysml.utils.NativeHelper;\nimport org.apache.sysml.utils.Statistics;\npublic class ScriptExecutorUtils {\n@@ -77,6 +78,12 @@ public class ScriptExecutorUtils {\nDMLScript.SYNCHRONIZE_GPU = dmlconf.getBooleanValue(DMLConfig.SYNCHRONIZE_GPU);\nDMLScript.EAGER_CUDA_FREE = dmlconf.getBooleanValue(DMLConfig.EAGER_CUDA_FREE);\nDMLScript.STATISTICS_MAX_WRAP_LEN = dmlconf.getIntValue(DMLConfig.STATS_MAX_WRAP_LEN);\n+\n+ String customLibPath = dmlconf.getTextValue(DMLConfig.NATIVE_BLAS_DIR);\n+ if(!customLibPath.equalsIgnoreCase(\"none\")) {\n+ NativeHelper.initializeCustomBLAS(customLibPath, dmlconf.getTextValue(DMLConfig.NATIVE_BLAS));\n+ }\n+\nif(DMLScript.USE_ACCELERATOR) {\nDMLScript.FLOATING_POINT_PRECISION = dmlconf.getTextValue(DMLConfig.FLOATING_POINT_PRECISION);\norg.apache.sysml.runtime.matrix.data.LibMatrixCUDA.resetFloatingPointPrecision();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -76,6 +76,7 @@ public class DMLConfig\npublic static final String CP_PARALLEL_IO = \"sysml.cp.parallel.io\";\npublic static final String COMPRESSED_LINALG = \"sysml.compressed.linalg\"; //auto, true, false\npublic static final String NATIVE_BLAS = \"sysml.native.blas\";\n+ public static final String NATIVE_BLAS_DIR = \"sysml.native.blas.directory\";\npublic static final String CODEGEN = \"sysml.codegen.enabled\"; //boolean\npublic static final String CODEGEN_COMPILER = \"sysml.codegen.compiler\"; //see SpoofCompiler.CompilerType\npublic static final String CODEGEN_OPTIMIZER = \"sysml.codegen.optimizer\"; //see SpoofCompiler.PlanSelector\n@@ -130,6 +131,7 @@ public class DMLConfig\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n_defaultVals.put(NATIVE_BLAS, \"none\" );\n+ _defaultVals.put(NATIVE_BLAS_DIR, \"none\" );\n_defaultVals.put(EXTRA_FINEGRAINED_STATS,\"false\" );\n_defaultVals.put(STATS_MAX_WRAP_LEN, \"30\" );\n_defaultVals.put(GPU_MEMORY_UTILIZATION_FACTOR, \"0.9\" );\n@@ -415,7 +417,7 @@ public class DMLConfig\nLOCAL_TMP_DIR,SCRATCH_SPACE,OPTIMIZATION_LEVEL,\nNUM_REDUCERS, DEFAULT_BLOCK_SIZE,\nYARN_APPMASTER, YARN_APPMASTERMEM, YARN_MAPREDUCEMEM,\n- CP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS,\n+ CP_PARALLEL_OPS, CP_PARALLEL_IO, NATIVE_BLAS, NATIVE_BLAS_DIR,\nCOMPRESSED_LINALG,\nCODEGEN, CODEGEN_COMPILER, CODEGEN_OPTIMIZER, CODEGEN_PLANCACHE, CODEGEN_LITERALS,\nEXTRA_FINEGRAINED_STATS, STATS_MAX_WRAP_LEN,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "diff": "@@ -36,6 +36,7 @@ import org.apache.commons.lang.SystemUtils;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n/**\n* This class helps in loading native library.\n@@ -58,9 +59,18 @@ public class NativeHelper {\nprivate static String hintOnFailures = \"\";\n- public static void setBLASPath(String customLibPath, String userSpecifiedBLAS) {\n+ public static void initializeCustomBLAS(String customLibPath, String userSpecifiedBLAS) throws DMLRuntimeException {\n+ if(attemptedLoading && blasType != null && isSupportedBLAS(userSpecifiedBLAS) && !blasType.equalsIgnoreCase(userSpecifiedBLAS) ) {\n+ throw new DMLRuntimeException(\"Cannot replace previously loaded blas \\\"\" + blasType + \"\\\" with \\\"\" + userSpecifiedBLAS + \"\\\".\");\n+ }\n+ else {\ninit(customLibPath, userSpecifiedBLAS);\n}\n+ }\n+\n+ private static boolean isSupportedBLAS(String userSpecifiedBLAS) {\n+ return userSpecifiedBLAS.equalsIgnoreCase(\"auto\") || userSpecifiedBLAS.equalsIgnoreCase(\"mkl\") || userSpecifiedBLAS.equalsIgnoreCase(\"openblas\");\n+ }\n// Performing loading in a method instead of a static block will throw a detailed stack trace in case of fatal errors\nprivate static void init(String customLibPath, String userSpecifiedBLAS) {\n@@ -76,7 +86,7 @@ public class NativeHelper {\nDMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\nuserSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n}\n- if(userSpecifiedBLAS.equals(\"auto\") || userSpecifiedBLAS.equals(\"mkl\") || userSpecifiedBLAS.equals(\"openblas\")) {\n+ if(isSupportedBLAS(userSpecifiedBLAS)) {\nlong start = System.nanoTime();\nif(!supportedArchitectures.containsKey(SystemUtils.OS_ARCH)) {\nLOG.info(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\n@@ -89,17 +99,17 @@ public class NativeHelper {\n// By default, we will native.blas=true and we will attempt to load MKL first.\n// If MKL is not enabled then we try to load OpenBLAS.\n// If both MKL and OpenBLAS are not available we fall back to Java BLAS.\n- if(userSpecifiedBLAS.equals(\"auto\")) {\n+ if(userSpecifiedBLAS.equalsIgnoreCase(\"auto\")) {\nblasType = isMKLAvailable(customLibPath) ? \"mkl\" : isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load either MKL or OpenBLAS due to \" + hintOnFailures);\n}\n- else if(userSpecifiedBLAS.equals(\"mkl\")) {\n+ else if(userSpecifiedBLAS.equalsIgnoreCase(\"mkl\")) {\nblasType = isMKLAvailable(customLibPath) ? \"mkl\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load MKL due to \" + hintOnFailures);\n}\n- else if(userSpecifiedBLAS.equals(\"openblas\")) {\n+ else if(userSpecifiedBLAS.equalsIgnoreCase(\"openblas\")) {\nblasType = isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\nif(blasType == null)\nLOG.info(\"Unable to load OpenBLAS due to \" + hintOnFailures);\n@@ -143,7 +153,7 @@ public class NativeHelper {\nLOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n}\nelse {\n- LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'native.blas'=\" + userSpecifiedBLAS + \".\");\n+ LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'sysml.native.blas'=\" + userSpecifiedBLAS + \".\");\n}\nattemptedLoading = true;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mlcontext.py", "new_path": "src/main/python/systemml/mlcontext.py", "diff": "# Methods to create Script object\nscript_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]\n# Utility methods\n-util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG', 'setBLASPath' ]\n+util_methods = [ 'jvm_stdout', '_java2py', 'getHopDAG' ]\n__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods\nimport os\n@@ -66,25 +66,6 @@ def _get_spark_context():\n-def setBLASPath(path, blas='auto'):\n- \"\"\"\n- This method useful in the cloud environment where the user\n- doesnot have sudo permission or where setting environment variables\n- such as LD_LIBRARY_PATH is difficult.\n-\n- Parameters\n- ----------\n- path: String\n- Custom path to the directory where the BLAS shared libraries are located.\n-\n- blas: String\n- Can be auto, openblas or mkl\n- \"\"\"\n- sc = _get_spark_context()\n- sc._jvm.org.apache.sysml.utils.NativeHelper.setBLASPath(str(path), blas)\n-\n-\n-\n# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook\n# Example usage:\n# with jvm_stdout():\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1630] [MINOR] Integrated setting of blas directory with SystemML configuration property
49,738
15.11.2017 17:45:25
28,800
2e0c72f0e6916666dd048071a33de43486e6a4d3
[MINOR] Fix explain output (missing line numbers for block w/ writes)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/OutputStatement.java", "new_path": "src/main/java/org/apache/sysml/parser/OutputStatement.java", "diff": "@@ -54,11 +54,13 @@ public class OutputStatement extends Statement\npublic OutputStatement(ParserRuleContext ctx, DataIdentifier t, DataOp op, String filename) {\n_id = t;\n_paramsExpr = new DataExpression(ctx, op, new HashMap<String, Expression>(), filename);\n+ setCtxValuesAndFilename(ctx, filename);\n}\npublic OutputStatement(DataIdentifier t, DataOp op, ParseInfo parseInfo) {\n_id = t;\n_paramsExpr = new DataExpression(op, new HashMap<String, Expression>(), parseInfo);\n+ setParseInfo(parseInfo);\n}\npublic static boolean isValidParamName(String key){\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix explain output (missing line numbers for block w/ writes)
49,736
15.11.2017 19:12:41
28,800
3b4656f5eed84860a65f09c4af2f955d0d112e37
[MINOR] [DOC] Updated the native BLAS documentation Updated the documentation to suggest using a released version of OpenBLAS. Also corrected the documentation to use 'sysml.native.blas' rather than 'native.blas'. Also explained how to use 'sysml.native.blas.directory' property.
[ { "change_type": "MODIFY", "old_path": "docs/native-backend.md", "new_path": "docs/native-backend.md", "diff": "@@ -37,7 +37,7 @@ rather than SystemML's internal Java library for performing single-node\noperations such matrix multiplication, convolution, etc.\nTo allow SystemML to use native BLAS rather than internal Java library,\n-please set the configuration property `native.blas` to `auto`.\n+please set the configuration property `sysml.native.blas` to `auto`.\nOther possible options are: `mkl`, `openblas` and `none`.\nThe first two options will only attempt to use the respective BLAS libraries.\n@@ -50,6 +50,8 @@ The current version of SystemML only supports BLAS on **Linux** machines.\n## Step 1: Install BLAS\n+If BLAS is already installed, please skip this step.\n+\n### Option 1: Install Intel MKL\nDownload and install the [community version of Intel MKL](https://software.intel.com/sites/campaigns/nest/).\n@@ -60,14 +62,16 @@ with license key. Since we use MKL DNN primitives, we depend on Intel MKL versio\n### Option 2: Install OpenBLAS\n+The default OpenBLAS (via yum/apt-get) uses its internal threading rather than OpenMP,\n+which can lead to performance degradation when using SystemML. So, instead we recommend that you\n+compile OpenBLAS from the source instead of installing it with `yum` or `apt-get`.\n+\n+The steps to install OpenBLAS v0.2.20:\n+\n```bash\n-# The default OpenBLAS (via yum/apt-get) uses its internal threading rather than OpenMP,\n-# which can lead to performance degradation when using SystemML. So, instead we recommend that you\n-# compile OpenBLAS from the source.\n-# RedHat / CentOS: sudo yum install openblas\n-# Ubuntu: sudo apt-get install openblas\n-git clone https://github.com/xianyi/OpenBLAS.git\n-cd OpenBLAS/\n+wget https://github.com/xianyi/OpenBLAS/archive/v0.2.20.tar.gz\n+tar -xzf v0.2.20.tar.gz\n+cd OpenBLAS-0.2.20/\nmake clean\nmake USE_OPENMP=1\nsudo make install\n@@ -79,20 +83,15 @@ To find the location of `gomp` on your system, please use the command `ldconfig\nIf gomp is available as `/lib64/libgomp.so.1` instead of `/lib64/libgomp.so`,\nplease add a softlink to it:\n-```bash\n-sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so\n-```\n-\n-## Step 2: Install other dependencies\n-\n```bash\n# Centos/RedHat\nsudo yum install gcc-c++\n# Ubuntu\nsudo apt-get install g++\n+sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so\n```\n-## Step 3: Provide the location of the native libraries\n+## Step 2: Provide the location of the native libraries\n1. Pass the location of the native libraries using command-line options:\n@@ -107,6 +106,22 @@ If you want to use SystemML with Spark, please add the following line to `spark-\nexport LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies\n+In cloud environment where you may not be able to set `LD_LIBRARY_PATH` or `spark.executorEnv.LD_LIBRARY_PATH`\n+before starting spark, you can use set the configuration property `sysml.native.blas.directory`. For example:\n+\n+```python\n+mlCtx.setConfigProperty(\"sysml.native.blas.directory\", \"/path/to/blas-n-other-dependencies\")\n+```\n+\n+## Step 3: Set configuration property to enable native BLAS\n+\n+The configuration property `sysml.native.blas` can be either set in the file `SystemML-config.xml`\n+or using `setConfigProperty` method of `MLContext` or `mllearn` classes. For example:\n+\n+```python\n+mlCtx.setConfigProperty(\"sysml.native.blas\", \"openblas\")\n+```\n+\n## Common issues on Linux\n- Unable to load `gomp`.\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] [DOC] Updated the native BLAS documentation - Updated the documentation to suggest using a released version of OpenBLAS. - Also corrected the documentation to use 'sysml.native.blas' rather than 'native.blas'. - Also explained how to use 'sysml.native.blas.directory' property.
49,736
16.11.2017 14:12:29
28,800
fe9b023c750939f5a3e8e74cbc8202aca9b14fe9
Allow setting and resetting of sysml.native.blas property for different execution
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java", "diff": "@@ -78,11 +78,7 @@ public class ScriptExecutorUtils {\nDMLScript.SYNCHRONIZE_GPU = dmlconf.getBooleanValue(DMLConfig.SYNCHRONIZE_GPU);\nDMLScript.EAGER_CUDA_FREE = dmlconf.getBooleanValue(DMLConfig.EAGER_CUDA_FREE);\nDMLScript.STATISTICS_MAX_WRAP_LEN = dmlconf.getIntValue(DMLConfig.STATS_MAX_WRAP_LEN);\n-\n- String customLibPath = dmlconf.getTextValue(DMLConfig.NATIVE_BLAS_DIR);\n- if(!customLibPath.equalsIgnoreCase(\"none\")) {\n- NativeHelper.initializeCustomBLAS(customLibPath, dmlconf.getTextValue(DMLConfig.NATIVE_BLAS));\n- }\n+ NativeHelper.initialize(dmlconf.getTextValue(DMLConfig.NATIVE_BLAS_DIR), dmlconf.getTextValue(DMLConfig.NATIVE_BLAS).trim());\nif(DMLScript.USE_ACCELERATOR) {\nDMLScript.FLOATING_POINT_PRECISION = dmlconf.getTextValue(DMLConfig.FLOATING_POINT_PRECISION);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java", "diff": "@@ -24,7 +24,6 @@ import java.io.IOException;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n-import java.util.HashMap;\nimport java.util.Vector;\nimport java.io.InputStream;\nimport java.io.OutputStream;\n@@ -43,86 +42,188 @@ import org.apache.sysml.runtime.DMLRuntimeException;\n* By default, it first tries to load Intel MKL, else tries to load OpenBLAS.\n*/\npublic class NativeHelper {\n- private static boolean isSystemMLLoaded = false;\n+\n+ public static enum NativeBlasState {\n+ NOT_ATTEMPTED_LOADING_NATIVE_BLAS,\n+ SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE,\n+ SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_NOT_IN_USE,\n+ ATTEMPTED_LOADING_NATIVE_BLAS_UNSUCCESSFULLY\n+ };\n+\n+ public static NativeBlasState CURRENT_NATIVE_BLAS_STATE = NativeBlasState.NOT_ATTEMPTED_LOADING_NATIVE_BLAS;\n+ private static String blasType;\nprivate static final Log LOG = LogFactory.getLog(NativeHelper.class.getName());\n- private static HashMap<String, String> supportedArchitectures = new HashMap<>();\n- public static String blasType;\n+\n+ // Useful for deciding whether to use native BLAS in parfor environment.\nprivate static int maxNumThreads = -1;\nprivate static boolean setMaxNumThreads = false;\n- static {\n- // Note: we only support 64 bit Java on x86 and AMD machine\n- supportedArchitectures.put(\"x86_64\", \"x86_64\");\n- supportedArchitectures.put(\"amd64\", \"x86_64\");\n- }\n- private static boolean attemptedLoading = false;\n+ /**\n+ * Called by Statistics to print the loaded BLAS.\n+ *\n+ * @return empty string or the BLAS that is loaded\n+ */\n+ public static String getCurrentBLAS() {\n+ return blasType != null ? blasType : \"\";\n+ }\n- private static String hintOnFailures = \"\";\n+ /**\n+ * Called by runtime to check if the BLAS is available for exploitation\n+ *\n+ * @return true if CURRENT_NATIVE_BLAS_STATE is SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_NOT_IN_USE else false\n+ */\n+ public static boolean isNativeLibraryLoaded() {\n+ if(!isBLASLoaded()) {\n+ DMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n+ String userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n+ String customLibPath = (dmlConfig == null) ? \"none\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS_DIR).trim().toLowerCase();\n+ performLoading(customLibPath, userSpecifiedBLAS);\n+ }\n+ if(maxNumThreads == -1)\n+ maxNumThreads = OptimizerUtils.getConstrainedNumThreads(-1);\n+ if(CURRENT_NATIVE_BLAS_STATE == NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE && !setMaxNumThreads && maxNumThreads != -1) {\n+ // This method helps us decide whether to use GetPrimitiveArrayCritical or GetDoubleArrayElements in JNI as each has different tradeoffs.\n+ // In current implementation, we always use GetPrimitiveArrayCritical as it has proven to be fastest.\n+ // We can revisit this decision later and hence I would not recommend removing this method.\n+ setMaxNumThreads(maxNumThreads);\n+ setMaxNumThreads = true;\n+ }\n+ return CURRENT_NATIVE_BLAS_STATE == NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n+ }\n- public static void initializeCustomBLAS(String customLibPath, String userSpecifiedBLAS) throws DMLRuntimeException {\n- if(attemptedLoading && blasType != null && isSupportedBLAS(userSpecifiedBLAS) && !blasType.equalsIgnoreCase(userSpecifiedBLAS) ) {\n+ /**\n+ * Initialize the native library before executing the DML program\n+ *\n+ * @param customLibPath specified by sysml.native.blas.directory\n+ * @param userSpecifiedBLAS specified by sysml.native.blas\n+ * @throws DMLRuntimeException if error\n+ */\n+ public static void initialize(String customLibPath, String userSpecifiedBLAS) throws DMLRuntimeException {\n+ if(isBLASLoaded() && isSupportedBLAS(userSpecifiedBLAS) && !blasType.equalsIgnoreCase(userSpecifiedBLAS)) {\nthrow new DMLRuntimeException(\"Cannot replace previously loaded blas \\\"\" + blasType + \"\\\" with \\\"\" + userSpecifiedBLAS + \"\\\".\");\n}\n- else {\n- init(customLibPath, userSpecifiedBLAS);\n+ else if(isBLASLoaded() && userSpecifiedBLAS.equalsIgnoreCase(\"none\")) {\n+ CURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_NOT_IN_USE;\n+ }\n+ else if(isBLASLoaded() && userSpecifiedBLAS.equalsIgnoreCase(blasType)) {\n+ CURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n+ }\n+ else if(!isBLASLoaded() && isSupportedBLAS(userSpecifiedBLAS)) {\n+ performLoading(customLibPath, userSpecifiedBLAS);\n}\n}\n+ /**\n+ * Return true if the given BLAS type is supported.\n+ *\n+ * @param userSpecifiedBLAS BLAS type specified via sysml.native.blas property\n+ * @return true if the userSpecifiedBLAS is auto | mkl | openblas, else false\n+ */\nprivate static boolean isSupportedBLAS(String userSpecifiedBLAS) {\n- return userSpecifiedBLAS.equalsIgnoreCase(\"auto\") || userSpecifiedBLAS.equalsIgnoreCase(\"mkl\") || userSpecifiedBLAS.equalsIgnoreCase(\"openblas\");\n+ return userSpecifiedBLAS.equalsIgnoreCase(\"auto\") ||\n+ userSpecifiedBLAS.equalsIgnoreCase(\"mkl\") ||\n+ userSpecifiedBLAS.equalsIgnoreCase(\"openblas\");\n+ }\n+\n+ /**\n+ * Note: we only support 64 bit Java on x86 and AMD machine\n+ *\n+ * @return true if the hardware architecture is supported\n+ */\n+ private static boolean isSupportedArchitecture() {\n+ if(SystemUtils.OS_ARCH.equals(\"x86_64\") || SystemUtils.OS_ARCH.equals(\"amd64\")) {\n+ return true;\n+ }\n+ LOG.info(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\n+ return false;\n+ }\n+\n+ /**\n+ * Check if native BLAS libraries have been successfully loaded\n+ * @return true if CURRENT_NATIVE_BLAS_STATE is SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE or SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_NOT_IN_USE\n+ */\n+ private static boolean isBLASLoaded() {\n+ return CURRENT_NATIVE_BLAS_STATE == NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE ||\n+ CURRENT_NATIVE_BLAS_STATE == NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_NOT_IN_USE;\n+ }\n+\n+ /**\n+ * Check if we should attempt to perform loading.\n+ * If custom library path is provided, we should attempt to load again if not already loaded.\n+ *\n+ * @param customLibPath custom library path\n+ * @return true if we should attempt to load blas again\n+ */\n+ private static boolean shouldReload(String customLibPath) {\n+ boolean isValidBLASDirectory = customLibPath != null && !customLibPath.equalsIgnoreCase(\"none\");\n+ return CURRENT_NATIVE_BLAS_STATE == NativeBlasState.NOT_ATTEMPTED_LOADING_NATIVE_BLAS ||\n+ (isValidBLASDirectory && !isBLASLoaded());\n}\n// Performing loading in a method instead of a static block will throw a detailed stack trace in case of fatal errors\n- private static void init(String customLibPath, String userSpecifiedBLAS) {\n+ private static void performLoading(String customLibPath, String userSpecifiedBLAS) {\n// Only Linux supported for BLAS\nif(!SystemUtils.IS_OS_LINUX)\nreturn;\n// attemptedLoading variable ensures that we don't try to load SystemML and other dependencies\n// again and again especially in the parfor (hence the double-checking with synchronized).\n- if(!attemptedLoading || customLibPath != null) {\n- // -------------------------------------------------------------------------------------\n- if(userSpecifiedBLAS == null) {\n- DMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n- userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n- }\n- if(isSupportedBLAS(userSpecifiedBLAS)) {\n+ if(shouldReload(customLibPath) && isSupportedBLAS(userSpecifiedBLAS) && isSupportedArchitecture()) {\nlong start = System.nanoTime();\n- if(!supportedArchitectures.containsKey(SystemUtils.OS_ARCH)) {\n- LOG.info(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\n- return;\n- }\nsynchronized(NativeHelper.class) {\n- if(!attemptedLoading || customLibPath != null) {\n- // -----------------------------------------------------------------------------\n- // =============================================================================\n- // By default, we will native.blas=true and we will attempt to load MKL first.\n- // If MKL is not enabled then we try to load OpenBLAS.\n- // If both MKL and OpenBLAS are not available we fall back to Java BLAS.\n+ if(shouldReload(customLibPath)) {\n+ // Set attempted loading unsuccessful in case of exception\n+ CURRENT_NATIVE_BLAS_STATE = NativeBlasState.ATTEMPTED_LOADING_NATIVE_BLAS_UNSUCCESSFULLY;\n+ String [] blas = new String[] { userSpecifiedBLAS };\nif(userSpecifiedBLAS.equalsIgnoreCase(\"auto\")) {\n- blasType = isMKLAvailable(customLibPath) ? \"mkl\" : isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\n- if(blasType == null)\n- LOG.info(\"Unable to load either MKL or OpenBLAS due to \" + hintOnFailures);\n- }\n- else if(userSpecifiedBLAS.equalsIgnoreCase(\"mkl\")) {\n- blasType = isMKLAvailable(customLibPath) ? \"mkl\" : null;\n- if(blasType == null)\n- LOG.info(\"Unable to load MKL due to \" + hintOnFailures);\n- }\n- else if(userSpecifiedBLAS.equalsIgnoreCase(\"openblas\")) {\n- blasType = isOpenBLASAvailable(customLibPath) ? \"openblas\" : null;\n- if(blasType == null)\n- LOG.info(\"Unable to load OpenBLAS due to \" + hintOnFailures);\n- }\n- else {\n- // Only thrown at development time.\n- throw new RuntimeException(\"Unsupported BLAS:\" + userSpecifiedBLAS);\n- }\n- // =============================================================================\n- if(blasType != null && loadLibraryHelper(\"libsystemml_\" + blasType + \"-Linux-x86_64.so\")) {\n+ blas = new String[] { \"mkl\", \"openblas\" };\n+ }\n+ if(checkAndLoadBLAS(customLibPath, blas) && loadLibraryHelper(\"libsystemml_\" + blasType + \"-Linux-x86_64.so\")) {\n+ LOG.info(\"Using native blas: \" + blasType + getNativeBLASPath());\n+ CURRENT_NATIVE_BLAS_STATE = NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE;\n+ }\n+ }\n+ }\n+ double timeToLoadInMilliseconds = (System.nanoTime()-start)*1e-6;\n+ if(timeToLoadInMilliseconds > 1000)\n+ LOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n+ }\n+ else if(LOG.isDebugEnabled() && !isSupportedBLAS(userSpecifiedBLAS)) {\n+ LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'sysml.native.blas'=\" + userSpecifiedBLAS + \".\");\n+ }\n+ }\n+\n+ private static boolean checkAndLoadBLAS(String customLibPath, String [] listBLAS) {\n+ if(customLibPath != null && customLibPath.equalsIgnoreCase(\"none\"))\n+ customLibPath = null;\n+\n+ boolean isLoaded = false;\n+ for(int i = 0; i < listBLAS.length; i++) {\n+ String blas = listBLAS[i];\n+ if(blas.equalsIgnoreCase(\"mkl\")) {\n+ isLoaded = loadBLAS(customLibPath, \"mkl_rt\", null);\n+ }\n+ else if(blas.equalsIgnoreCase(\"openblas\")) {\n+ boolean isGompLoaded = loadBLAS(customLibPath, \"gomp\", \"gomp required for loading OpenBLAS-enabled SystemML library\");\n+ if(isGompLoaded) {\n+ isLoaded = loadBLAS(customLibPath, \"openblas\", null);\n+ }\n+ }\n+ if(isLoaded) {\n+ blasType = blas;\n+ break;\n+ }\n+ }\n+ return isLoaded;\n+ }\n+\n+ /**\n+ * Useful method for debugging.\n+ *\n+ * @return empty string (if !LOG.isDebugEnabled()) or the path from where openblas or mkl is loaded.\n+ */\n+ private static String getNativeBLASPath() {\nString blasPathAndHint = \"\";\n- // ------------------------------------------------------------\n- // This logic gets the list of native libraries that are loaded\nif(LOG.isDebugEnabled()) {\n// Only perform the checking of library paths when DEBUG is enabled to avoid runtime overhead.\ntry {\n@@ -141,43 +242,7 @@ public class NativeHelper {\nLOG.debug(\"Error while finding list of native libraries:\" + e.getMessage());\n}\n}\n- // ------------------------------------------------------------\n-\n- LOG.info(\"Using native blas: \" + blasType + blasPathAndHint);\n- isSystemMLLoaded = true;\n- }\n- }\n- }\n- double timeToLoadInMilliseconds = (System.nanoTime()-start)*1e-6;\n- if(timeToLoadInMilliseconds > 1000)\n- LOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n- }\n- else {\n- LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'sysml.native.blas'=\" + userSpecifiedBLAS + \".\");\n- }\n- attemptedLoading = true;\n- }\n- }\n-\n- public static boolean isNativeLibraryLoaded() {\n- // We allow BLAS to be enabled or disabled or explicitly selected in one of the two ways:\n- // 1. DML Configuration: native.blas (boolean flag)\n- // 2. Environment variable: SYSTEMML_BLAS (can be set to mkl, openblas or none)\n- // The option 1 will be removed in later SystemML versions.\n- // The option 2 is useful for two reasons:\n- // - Developer testing of different BLAS\n- // - Provides fine-grained control. Certain machines could use mkl while others use openblas, etc.\n- init(null, null);\n- if(maxNumThreads == -1)\n- maxNumThreads = OptimizerUtils.getConstrainedNumThreads(-1);\n- if(isSystemMLLoaded && !setMaxNumThreads && maxNumThreads != -1) {\n- // This method helps us decide whether to use GetPrimitiveArrayCritical or GetDoubleArrayElements in JNI as each has different tradeoffs.\n- // In current implementation, we always use GetPrimitiveArrayCritical as it has proven to be fastest.\n- // We can revisit this decision later and hence I would not recommend removing this method.\n- setMaxNumThreads(maxNumThreads);\n- setMaxNumThreads = true;\n- }\n- return isSystemMLLoaded;\n+ return blasPathAndHint;\n}\npublic static int getMaxNumThreads() {\n@@ -186,17 +251,14 @@ public class NativeHelper {\nreturn maxNumThreads;\n}\n-\n- private static boolean isMKLAvailable(String customLibPath) {\n- return loadBLAS(customLibPath, \"mkl_rt\", null);\n- }\n-\n- private static boolean isOpenBLASAvailable(String customLibPath) {\n- if(!loadBLAS(customLibPath, \"gomp\", \"gomp required for loading OpenBLAS-enabled SystemML library\"))\n- return false;\n- return loadBLAS(customLibPath, \"openblas\", null);\n- }\n-\n+ /**\n+ * Attempts to load native BLAS\n+ *\n+ * @param customLibPath can be null (if we want to only want to use LD_LIBRARY_PATH), else the\n+ * @param blas can be gomp, openblas or mkl_rt\n+ * @param optionalMsg message for debugging\n+ * @return true if successfully loaded BLAS\n+ */\nprivate static boolean loadBLAS(String customLibPath, String blas, String optionalMsg) {\n// First attempt to load from custom library path\nif(customLibPath != null) {\n@@ -219,8 +281,6 @@ public class NativeHelper {\nreturn true;\n}\ncatch (UnsatisfiedLinkError e) {\n- if(!hintOnFailures.contains(blas))\n- hintOnFailures = hintOnFailures + blas + \" \";\nif(optionalMsg != null)\nLOG.debug(\"Unable to load \" + blas + \"(\" + optionalMsg + \"):\" + e.getMessage());\nelse\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "new_path": "src/main/java/org/apache/sysml/utils/Statistics.java", "diff": "@@ -774,8 +774,8 @@ public class Statistics\n//show extended caching/compilation statistics\nif( DMLScript.STATISTICS )\n{\n- if(NativeHelper.blasType != null) {\n- String blas = NativeHelper.blasType != null ? NativeHelper.blasType : \"\";\n+ if(NativeHelper.CURRENT_NATIVE_BLAS_STATE == NativeHelper.NativeBlasState.SUCCESSFULLY_LOADED_NATIVE_BLAS_AND_IN_USE) {\n+ String blas = NativeHelper.getCurrentBLAS();\nsb.append(\"Native \" + blas + \" calls (dense mult/conv/bwdF/bwdD):\\t\" + numNativeLibMatrixMultCalls.longValue() + \"/\" +\nnumNativeConv2dCalls.longValue() + \"/\" + numNativeConv2dBwdFilterCalls.longValue()\n+ \"/\" + numNativeConv2dBwdDataCalls.longValue() + \".\\n\");\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1630] Allow setting and resetting of sysml.native.blas property for different execution
49,736
16.11.2017 14:43:14
28,800
f7bc20266a42929913565d4d80dd07f3a5dbe7de
[MINOR] Added max_pool_backward tests in nn library These tests compare the output of CP operators with that of GPU operators (i.e. CuDNN).
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/gen_maxpool_bwd.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+Hout = floor(($H + 2 * $pad - $pool) / $stride) + 1\n+Wout = floor(($W + 2 * $pad - $pool) / $stride) + 1\n+\n+X = rand(rows=$N, cols=$C*$H*$W, sparsity=$sp, min=-0.5, max=1)\n+dout = rand(rows=$N, cols=$C*Hout*Wout, sparsity=$sp, min=-0.5, max=1)\n+write(X, \"input.mtx\", format=\"binary\")\n+write(dout, \"dout.mtx\", format=\"binary\")\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/compare_backends/run_tests.sh", "new_path": "scripts/nn/test/compare_backends/run_tests.sh", "diff": "#-------------------------------------------------------------\n# Additional tests to compare the accuracy of different convolution related operators with CuDNN\n-./test_conv2d_bwd_filter.sh\n-./test_conv2d_bwd_data.sh\n-./test_conv2d.sh\n+#./test_conv2d_bwd_filter.sh\n+#./test_conv2d_bwd_data.sh\n+#./test_conv2d.sh\n./test_maxpool.sh\n+./test_maxpool_bwd.sh\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/compare_backends/test_maxpool.sh", "new_path": "scripts/nn/test/compare_backends/test_maxpool.sh", "diff": "@@ -40,7 +40,7 @@ do\n# Running a test in GPU mode\n$SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_maxpool.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W pool=3\n# Comparing the CPU vs GPU results to make sure they are the same\n- $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"maxpool:stride=\"$stride\",pad=\"$pad\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"maxpool:sparsity=\"$sparsity\",stride=\"$stride\",pad=\"$pad\nrm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\ndone\ndone\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/test_maxpool_bwd.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read(\"input.mtx\")\n+X = max(X, 0)\n+dout = read(\"dout.mtx\")\n+out = max_pool_backward(X, dout, input_shape=[$N,$C,$H,$W], pool_size=[$pool,$pool], stride=[$stride,$stride], padding=[$pad,$pad])\n+write(out, $out, format=\"csv\")\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/test_maxpool_bwd.sh", "diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='systemml-*-extra.jar'\n+\n+# N = Number of images, C = number of channels, H = height, W = width\n+N=5\n+C=3\n+H=28\n+W=28\n+for sparsity in 0.1 0.2 0.5 0.6 0.9\n+do\n+ # Generating the data\n+\n+ for stride in 1 2 3\n+ do\n+ for pad in 0 1 2\n+ do\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_maxpool_bwd.dml -nvargs sp=$sparsity N=$N C=$C H=$H W=$W pool=3 stride=$stride pad=$pad\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_maxpool_bwd.dml -nvargs stride=$stride pad=$pad out=out_cp.csv N=$N C=$C H=$H W=$W pool=3\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_maxpool_bwd.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W pool=3\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"maxpool_bwd:sparsity=\"$sparsity\",stride=\"$stride\",pad=\"$pad\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ done\n+ done\n+ rm -rf input.mtx input.mtx.mtd\n+done\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Added max_pool_backward tests in nn library - These tests compare the output of CP operators with that of GPU operators (i.e. CuDNN).
49,736
16.11.2017 15:53:11
28,800
2ae07e9ce0eba63606a07aabb7af204c61860b97
Fixed a bug in CP relu_maxpool* operations
[ { "change_type": "MODIFY", "old_path": "scripts/nn/test/compare_backends/run_tests.sh", "new_path": "scripts/nn/test/compare_backends/run_tests.sh", "diff": "#-------------------------------------------------------------\n# Additional tests to compare the accuracy of different convolution related operators with CuDNN\n-#./test_conv2d_bwd_filter.sh\n-#./test_conv2d_bwd_data.sh\n-#./test_conv2d.sh\n+./test_conv2d_bwd_filter.sh\n+./test_conv2d_bwd_data.sh\n+./test_conv2d.sh\n./test_maxpool.sh\n./test_maxpool_bwd.sh\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java", "diff": "@@ -416,6 +416,8 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\nelse {\noutputBlock = new MatrixBlock(N, C*P*Q, false).allocateBlock();\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling\"))\n+ params.minValForMaxPoolOperations = 0;\nLibMatrixDNN.maxpooling(matBlock, outputBlock, params);\n}\n}\n@@ -426,6 +428,8 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction {\n}\nelse {\noutputBlock = new MatrixBlock(N, C*H*W, false).allocateBlock();\n+ if(instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\"))\n+ params.minValForMaxPoolOperations = 0;\nLibMatrixDNN.maxpoolingBackward(matBlock, dout, outputBlock, params,\n!instOpcode.equalsIgnoreCase(\"maxpooling_backward\"));\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/ConvolutionParameters.java", "diff": "@@ -45,6 +45,8 @@ public class ConvolutionParameters implements Serializable\npublic MatrixBlock bias;\npublic int [] start_indexes_h, end_indexes_h, start_indexes_w, end_indexes_w;\n+ public double minValForMaxPoolOperations = -Double.MAX_VALUE;\n+\npublic ConvolutionParameters(long N, long C, long H, long W,\nlong K, long R, long S, long stride_h, long stride_w,\nlong pad_h, long pad_w, int numThreads) throws DMLRuntimeException {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPoolingHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPoolingHelper.java", "diff": "@@ -49,9 +49,11 @@ public class LibMatrixDNNPoolingHelper {\ndouble[] in = _params.input1.getDenseBlock();\ndouble[] out = _params.output.getDenseBlock();\n+ double minValForMaxPoolOperations = _params.minValForMaxPoolOperations;\n+\n//thread-local initialization of output block\nif( !(_params.isStride1Pad0() && _params.isAllOnes(P, Q, W)) )\n- Arrays.fill(out, _rl*CPQ, _ru*CPQ, -Double.MAX_VALUE);\n+ Arrays.fill(out, _rl*CPQ, _ru*CPQ, minValForMaxPoolOperations);\nif( _params.isStride1Pad0() && _params.isAllOnes(P, Q, W) ) {\n//quick-path w/o materialized index arrays and\n@@ -59,7 +61,7 @@ public class LibMatrixDNNPoolingHelper {\nint lenh = Math.min(R,H);\nfor(int i = _rl, oix=_rl*C; i < _ru; i++, oix+=C)\nfor (int c = 0, off=i*CHW; c < C; c++, off+=H)\n- out[oix+c] = max(-Double.MAX_VALUE, in, off, lenh);\n+ out[oix+c] = max(minValForMaxPoolOperations, in, off, lenh);\n}\nelse if( _params.isStride1Pad0() ) {\n//quick-path w/o materialized index arrays\n@@ -109,7 +111,7 @@ public class LibMatrixDNNPoolingHelper {\n@Override\npublic Long call() throws Exception {\n//thread-local initialization of output block\n- Arrays.fill(outputArray, _rl *CPQ, _ru*CPQ, -Double.MAX_VALUE);\n+ Arrays.fill(outputArray, _rl *CPQ, _ru*CPQ, _params.minValForMaxPoolOperations);\nfor(int n = _rl; n < _ru; n++) {\nif( !_params.input1.sparseBlock.isEmpty(n) ) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Fixed a bug in CP relu_maxpool* operations
49,738
17.11.2017 13:21:49
28,800
5e96d2a6a00a34f91e6d9902294f5205e2b037e9
[HOTFIX][SYSTEMML-2015] Fix ctable rehashing of collided entries
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/LongLongDoubleHashMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/LongLongDoubleHashMap.java", "diff": "@@ -103,7 +103,7 @@ public class LongLongDoubleHashMap\nwhile( e.next!=null ) {\nADoubleEntry tmp = e;\ne = e.next; //tmp.next overwritten on append\n- appendEntry(e.getKey1(), e.getKey2(), tmp);\n+ appendEntry(tmp.getKey1(), tmp.getKey2(), tmp);\n}\nappendEntry(e.getKey1(), e.getKey2(), e);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2015] Fix ctable rehashing of collided entries
49,738
17.11.2017 16:49:45
28,800
db3d54c1ce6651f2abf43d3ba919527b943aa8ba
Avoid sparse-dense conversion in codegen outer tpls This patch generalizes the codegen outer product template to consume - similar to the cell and row templates - generic SideInputs instead of dense matrices as side input, which avoids unnecessary conversions from sparse to dense matrices and thus improves performance.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeOuterProduct.java", "new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeOuterProduct.java", "diff": "@@ -31,6 +31,7 @@ public class CNodeOuterProduct extends CNodeTpl\nprivate static final String TEMPLATE =\n\"package codegen;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\\n\"\n+ + \"import org.apache.sysml.runtime.codegen.SpoofOperator.SideInput;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofOuterProduct;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofOuterProduct.OutProdType;\\n\"\n+ \"import org.apache.commons.math3.util.FastMath;\\n\"\n@@ -39,10 +40,10 @@ public class CNodeOuterProduct extends CNodeTpl\n+ \" public %TMP%() {\\n\"\n+ \" super(OutProdType.%TYPE%);\\n\"\n+ \" }\\n\"\n- + \" protected void genexecDense(double a, double[] a1, int a1i, double[] a2, int a2i, double[][] b, double[] scalars, double[] c, int ci, int m, int n, int len, int rix, int cix) { \\n\"\n+ + \" protected void genexecDense(double a, double[] a1, int a1i, double[] a2, int a2i, SideInput[] b, double[] scalars, double[] c, int ci, int m, int n, int len, int rix, int cix) { \\n\"\n+ \"%BODY_dense%\"\n+ \" }\\n\"\n- + \" protected double genexecCellwise(double a, double[] a1, int a1i, double[] a2, int a2i, double[][] b, double[] scalars, int m, int n, int len, int rix, int cix) { \\n\"\n+ + \" protected double genexecCellwise(double a, double[] a1, int a1i, double[] a2, int a2i, SideInput[] b, double[] scalars, int m, int n, int len, int rix, int cix) { \\n\"\n+ \"%BODY_cellwise%\"\n+ \" return %OUT_cellwise%;\\n\"\n+ \" }\\n\"\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOuterProduct.java", "new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOuterProduct.java", "diff": "@@ -82,7 +82,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\n- double[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n+ SideInput[] b = prepInputMatrices(inputs, 3, false);\ndouble[] scalars = prepInputScalars(scalarObjects);\n//core sequential execute\n@@ -118,7 +118,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\n- double[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n+ SideInput[] b = prepInputMatrices(inputs, 3, false);\ndouble[] scalars = prepInputScalars(scalarObjects);\n//core sequential execute\n@@ -186,7 +186,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\n- double[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n+ SideInput[] b = prepInputMatrices(inputs, 3, false);\ndouble[] scalars = prepInputScalars(scalarObjects);\n//core sequential execute\n@@ -268,7 +268,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n//input preparation\ndouble[][] ab = getDenseMatrices(prepInputMatrices(inputs, 1, 2, true, false));\n- double[][] b = getDenseMatrices(prepInputMatrices(inputs, 3, true));\n+ SideInput[] b = prepInputMatrices(inputs, 3, false);\ndouble[] scalars = prepInputScalars(scalarObjects);\n//core sequential execute\n@@ -338,13 +338,14 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nreturn UtilFunctions.roundToNext(base, k);\n}\n- private void executeDense(double[] a, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeDense(double[] a, double[] u, double[] v, SideInput[] b, double[] scalars,\ndouble[] c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu )\n{\n//approach: iterate over non-zeros of w, selective mm computation\n//cache-conscious blocking: due to blocksize constraint (default 1000),\n//a blocksize of 16 allows to fit blocks of UV into L2 cache (256KB)\n+ SideInput[] lb = createSparseSideInputs(b);\nfinal int blocksizeIJ = 16; //u/v block (max at typical L2 size)\nint cix = 0;\n//blocked execution\n@@ -358,18 +359,19 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nfor( int j=bj, vix=bj*k; j<bjmin; j++, vix+=k)\nif( a[ix+j] != 0 ) {\ncix = (type == OutProdType.LEFT_OUTER_PRODUCT) ? vix : uix;\n- genexecDense( a[ix+j], u, uix, v, vix, b, scalars, c, cix, m, n, k, i, j);\n+ genexecDense( a[ix+j], u, uix, v, vix, lb, scalars, c, cix, m, n, k, i, j);\n}\n}\n}\n- private void executeCellwiseDense(double[] a, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeCellwiseDense(double[] a, double[] u, double[] v, SideInput[] b, double[] scalars,\ndouble[] c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu )\n{\n//approach: iterate over non-zeros of w, selective mm computation\n//cache-conscious blocking: due to blocksize constraint (default 1000),\n//a blocksize of 16 allows to fit blocks of UV into L2 cache (256KB)\n+ SideInput[] lb = createSparseSideInputs(b);\nfinal int blocksizeIJ = 16; //u/v block (max at typical L2 size)\n//blocked execution\ndouble sum = 0;\n@@ -383,18 +385,19 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nfor( int j=bj, vix=bj*k; j<bjmin; j++, vix+=k)\nif( a[ix+j] != 0 ) {\nif(type == OutProdType.CELLWISE_OUTER_PRODUCT)\n- c[ix+j] = genexecCellwise( a[ix+j], u, uix, v, vix, b, scalars, m, n, k, i, j );\n+ c[ix+j] = genexecCellwise( a[ix+j], u, uix, v, vix, lb, scalars, m, n, k, i, j );\nelse\n- sum += genexecCellwise( a[ix+j], u, uix, v, vix, b, scalars, m, n, k, i, j);\n+ sum += genexecCellwise( a[ix+j], u, uix, v, vix, lb, scalars, m, n, k, i, j);\n}\n}\nif( type != OutProdType.CELLWISE_OUTER_PRODUCT )\nc[0] = sum;\n}\n- private void executeSparse(SparseBlock sblock, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeSparse(SparseBlock sblock, double[] u, double[] v, SideInput[] b, double[] scalars,\ndouble[] c, int m, int n, int k, long nnz, OutProdType type, int rl, int ru, int cl, int cu)\n{\n+ SideInput[] lb = createSparseSideInputs(b);\nboolean left = (_outerProductType== OutProdType.LEFT_OUTER_PRODUCT);\n//approach: iterate over non-zeros of w, selective mm computation\n@@ -420,7 +423,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nint index = (cl==0||sblock.isEmpty(i)) ? 0 : sblock.posFIndexGTE(i,cl);\nindex = wpos + ((index>=0) ? index : n);\nfor( ; index<wpos+wlen && wix[index]<cu; index++ ) {\n- genexecDense(wval[index], u, uix, v, wix[index]*k, b, scalars, c,\n+ genexecDense(wval[index], u, uix, v, wix[index]*k, lb, scalars, c,\n(left ? wix[index]*k : uix), m, n, k, i, wix[index]);\n}\n}\n@@ -454,7 +457,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nint index = wpos + curk[i-bi];\nfor( ; index<wpos+wlen && wix[index]<bjmin; index++ ) {\n- genexecDense(wval[index], u, uix, v, wix[index]*k, b, scalars, c,\n+ genexecDense(wval[index], u, uix, v, wix[index]*k, lb, scalars, c,\n(left ? wix[index]*k : uix), m, n, k, i, wix[index]);\n}\ncurk[i-bi] = index - wpos;\n@@ -464,9 +467,10 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n}\n}\n- private void executeCellwiseSparse(SparseBlock sblock, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeCellwiseSparse(SparseBlock sblock, double[] u, double[] v, SideInput[] b, double[] scalars,\nMatrixBlock out, int m, int n, int k, long nnz, OutProdType type, int rl, int ru, int cl, int cu )\n{\n+ SideInput[] lb = createSparseSideInputs(b);\nfinal int blocksizeIJ = (int) (8L*m*n/nnz);\nint[] curk = new int[Math.min(blocksizeIJ, ru-rl)];\n@@ -491,11 +495,11 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nif( type == OutProdType.CELLWISE_OUTER_PRODUCT )\nfor( ; index<wpos+wlen && wix[index]<bjmin; index++ )\nc[wix[index]] = genexecCellwise( wval[index],\n- u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index] );\n+ u, uix, v, wix[index]*k, lb, scalars, m, n, k, i, wix[index] );\nelse\nfor( ; index<wpos+wlen && wix[index]<bjmin; index++ )\ntmp += genexecCellwise( wval[index],\n- u, uix, v, wix[index]*k, b, scalars, m, n, k, i, wix[index]);\n+ u, uix, v, wix[index]*k, lb, scalars, m, n, k, i, wix[index]);\ncurk[i-bi] = index - wpos;\n}\n}\n@@ -522,7 +526,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nint index = wpos + curk[i-bi];\nfor( ; index<wpos+wlen && wix[index]<bjmin; index++ ) {\nc.append(i, wix[index], genexecCellwise( wval[index], u, uix, v,\n- wix[index]*k, b, scalars, m, n, k, i, wix[index] ));\n+ wix[index]*k, lb, scalars, m, n, k, i, wix[index] ));\n}\ncurk[i-bi] = index - wpos;\n}\n@@ -531,9 +535,10 @@ public abstract class SpoofOuterProduct extends SpoofOperator\n}\n}\n- private void executeCompressed(CompressedMatrixBlock a, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeCompressed(CompressedMatrixBlock a, double[] u, double[] v, SideInput[] b, double[] scalars,\ndouble[] c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu)\n{\n+ SideInput[] lb = createSparseSideInputs(b);\nboolean left = (_outerProductType==OutProdType.LEFT_OUTER_PRODUCT);\nIterator<IJV> iter = !left ? a.getIterator(rl, ru, false) :\n@@ -542,14 +547,15 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nIJV cell = iter.next();\nint uix = cell.getI() * k;\nint vix = cell.getJ() * k;\n- genexecDense(cell.getV(), u, uix, v, vix, b, scalars, c,\n+ genexecDense(cell.getV(), u, uix, v, vix, lb, scalars, c,\nleft ? vix : uix, m, n, k, cell.getI(), cell.getJ());\n}\n}\n- private void executeCellwiseCompressed(CompressedMatrixBlock a, double[] u, double[] v, double[][] b, double[] scalars,\n+ private void executeCellwiseCompressed(CompressedMatrixBlock a, double[] u, double[] v, SideInput[] b, double[] scalars,\nMatrixBlock out, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu )\n{\n+ SideInput[] lb = createSparseSideInputs(b);\ndouble[] c = out.getDenseBlock();\nSparseBlock csblock = out.getSparseBlock();\n@@ -562,23 +568,23 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nif( out.isInSparseFormat() ) {\ncsblock.allocate(cell.getI());\ncsblock.append(cell.getI(), cell.getJ(),\n- genexecCellwise(cell.getV(), u, uix, v, vix, b, scalars, m, n, k, cell.getI(), cell.getJ()));\n+ genexecCellwise(cell.getV(), u, uix, v, vix, lb, scalars, m, n, k, cell.getI(), cell.getJ()));\n}\nelse {\nc[cell.getI()*n+cell.getJ()] =\n- genexecCellwise(cell.getV(), u, uix, v, vix, b, scalars, m, n, k, cell.getI(), cell.getJ());\n+ genexecCellwise(cell.getV(), u, uix, v, vix, lb, scalars, m, n, k, cell.getI(), cell.getJ());\n}\n}\nelse {\n- c[0] += genexecCellwise(cell.getV(), u, uix, v, vix, b, scalars, m, n, k, cell.getI(), cell.getJ());\n+ c[0] += genexecCellwise(cell.getV(), u, uix, v, vix, lb, scalars, m, n, k, cell.getI(), cell.getJ());\n}\n}\n}\n- protected abstract void genexecDense( double a, double[] u, int ui, double[] v, int vi, double[][] b,\n+ protected abstract void genexecDense( double a, double[] u, int ui, double[] v, int vi, SideInput[] b,\ndouble[] scalars, double[] c, int ci, int m, int n, int k, int rowIndex, int colIndex);\n- protected abstract double genexecCellwise( double a, double[] u, int ui, double[] v, int vi, double[][] b,\n+ protected abstract double genexecCellwise( double a, double[] u, int ui, double[] v, int vi, SideInput[] b,\ndouble[] scalars, int m, int n, int k, int rowIndex, int colIndex);\nprivate class ParExecTask implements Callable<Long>\n@@ -586,7 +592,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nprivate final MatrixBlock _a;\nprivate final double[] _u;\nprivate final double[] _v;\n- private final double[][] _b;\n+ private final SideInput[] _b;\nprivate final double[] _scalars;\nprivate final MatrixBlock _c;\nprivate final int _clen;\n@@ -598,7 +604,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nprivate final int _cl;\nprivate final int _cu;\n- protected ParExecTask( MatrixBlock a, double[] u, double[] v, double[][] b, double[] scalars , MatrixBlock c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu ) {\n+ protected ParExecTask( MatrixBlock a, double[] u, double[] v, SideInput[] b, double[] scalars , MatrixBlock c, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu ) {\n_a = a;\n_u = u;\n_v = v;\n@@ -653,7 +659,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nprivate final MatrixBlock _a;\nprivate final double[] _u;\nprivate final double[] _v;\n- private final double[][] _b;\n+ private final SideInput[] _b;\nprivate final double[] _scalars;\nprivate final int _rlen;\nprivate final int _clen;\n@@ -664,7 +670,7 @@ public abstract class SpoofOuterProduct extends SpoofOperator\nprivate final int _cl;\nprivate final int _cu;\n- protected ParOuterProdAggTask( MatrixBlock a, double[] u, double[] v, double[][] b, double[] scalars, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu ) {\n+ protected ParOuterProdAggTask( MatrixBlock a, double[] u, double[] v, SideInput[] b, double[] scalars, int m, int n, int k, OutProdType type, int rl, int ru, int cl, int cu ) {\n_a = a;\n_u = u;\n_v = v;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2020] Avoid sparse-dense conversion in codegen outer tpls This patch generalizes the codegen outer product template to consume - similar to the cell and row templates - generic SideInputs instead of dense matrices as side input, which avoids unnecessary conversions from sparse to dense matrices and thus improves performance.
49,738
20.11.2017 12:46:51
28,800
c3d565b881d16b6716a9dc36ac484139049a04cd
[HOTFIX][SYSTEMML-2020] Fix case-sensitive names of new test scripts
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/SparseSideInputTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/SparseSideInputTest.java", "diff": "@@ -35,7 +35,7 @@ import org.apache.sysml.test.utils.TestUtils;\npublic class SparseSideInputTest extends AutomatedTestBase\n{\n- private static final String TEST_NAME = \"sparseSideInput\";\n+ private static final String TEST_NAME = \"SparseSideInput\";\nprivate static final String TEST_NAME1 = TEST_NAME+\"1\"; //row sum(X/rowSums(X)+Y)\nprivate static final String TEST_NAME2 = TEST_NAME+\"2\"; //cell sum(abs(X^2)+Y)\nprivate static final String TEST_NAME3 = TEST_NAME+\"3\"; //magg sum(X^2), sum(X+Y)\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2020] Fix case-sensitive names of new test scripts
49,768
22.11.2017 12:25:06
28,800
6bd2d880813f6dd82e45a7f24fcd14f692a16eb1
[SYSTEMML][MINOR] Fix classpath in release script
[ { "change_type": "MODIFY", "old_path": "dev/release/release-verify.sh", "new_path": "dev/release/release-verify.sh", "diff": "@@ -158,7 +158,7 @@ cd $EXEC_DIR/src/test/java\nif [[ \"$COMPILE_CODE\" == \"true\" ]]; then\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Compiling release verify utility...\"\n- javac -Xlint:unchecked -classpath ../../../../..//target/lib/commons-compress-1.4.1.jar:../../../../..//target/lib/commons-io-2.4.jar:. org/apache/sysml/validation/*.java\n+ javac -Xlint:unchecked -classpath ../../../target/release/systemml/target/lib/commons-compress-1.4.1.jar:../../../target/release/systemml/target/lib/commons-io-2.4.jar:. org/apache/sysml/validation/*.java\ncd \"$ORIG_DIR\" # Return to directory from it was called.\nexit 0\n@@ -182,7 +182,7 @@ fi\nif [[ \"$LIC_NOTICE_VERIFY\" == \"true\" ]]; then\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Verifying license and notices from zip/tgz/tar.gz files...\"\n- java -classpath ../../../../..//target/lib/commons-compress-1.4.1.jar:../../../../..//target/lib/commons-io-2.4.jar:. org/apache/sysml/validation/ValidateLicAndNotice $DIST_DIR\n+ java -classpath ../../../target/release/systemml/target/lib/commons-compress-1.4.1.jar:../../../target/release/systemml/target/lib/commons-io-2.4.jar:. org/apache/sysml/validation/ValidateLicAndNotice $DIST_DIR\nRET_CODE=$?\nif [[ $RET_CODE == 0 ]]; then\necho \"`date +%Y-%m-%dT%H:%M:%S`: INFO: Verification of license and notices completed successfully.\"\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML][MINOR] Fix classpath in release script
49,738
22.11.2017 15:16:42
28,800
38162d232e22aa189003428d0afbcc04712b608d
[MINOR] Fix test resource leaks to address intermittent test failures
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/JMLCInputStreamReadTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/JMLCInputStreamReadTest.java", "diff": "@@ -142,6 +142,7 @@ public class JMLCInputStreamReadTest extends AutomatedTestBase\n//read matrix from input stream\nFileInputStream fis = new FileInputStream(output(\"X\"));\ndouble[][] data2 = conn.convertToDoubleMatrix(fis, rows, cols, format);\n+ fis.close();\n//compare matrix result\nTestUtils.compareMatrices(data, data2, rows, cols, 0);\n@@ -164,6 +165,7 @@ public class JMLCInputStreamReadTest extends AutomatedTestBase\n//read frame from input stream\nFileInputStream fis = new FileInputStream(output(\"X\"));\nString[][] fdata2 = conn.convertToStringFrame(fis, rows, cols, format);\n+ fis.close();\n//compare frame result\nTestUtils.compareFrames(fdata, fdata2, rows, cols);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextTest.java", "diff": "@@ -33,7 +33,7 @@ import static org.junit.Assert.assertTrue;\nimport java.io.File;\nimport java.io.FileInputStream;\n-import java.io.FileNotFoundException;\n+import java.io.IOException;\nimport java.io.InputStream;\nimport java.net.MalformedURLException;\nimport java.net.URL;\n@@ -123,24 +123,26 @@ public class MLContextTest extends MLContextTestBase {\n}\n@Test\n- public void testCreateDMLScriptBasedOnInputStreamAndExecute() throws FileNotFoundException {\n+ public void testCreateDMLScriptBasedOnInputStreamAndExecute() throws IOException {\nSystem.out.println(\"MLContextTest - create DML script based on InputStream and execute\");\nsetExpectedStdOut(\"hello world\");\nFile file = new File(baseDirectory + File.separator + \"hello-world.dml\");\n- InputStream is = new FileInputStream(file);\n+ try( InputStream is = new FileInputStream(file) ) {\nScript script = dmlFromInputStream(is);\nml.execute(script);\n}\n+ }\n@Test\n- public void testCreatePYDMLScriptBasedOnInputStreamAndExecute() throws FileNotFoundException {\n+ public void testCreatePYDMLScriptBasedOnInputStreamAndExecute() throws IOException {\nSystem.out.println(\"MLContextTest - create PYDML script based on InputStream and execute\");\nsetExpectedStdOut(\"hello world\");\nFile file = new File(baseDirectory + File.separator + \"hello-world.pydml\");\n- InputStream is = new FileInputStream(file);\n+ try( InputStream is = new FileInputStream(file) ) {\nScript script = pydmlFromInputStream(is);\nml.execute(script);\n}\n+ }\n@Test\npublic void testCreateDMLScriptBasedOnLocalFileAndExecute() {\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "new_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java", "diff": "@@ -570,12 +570,8 @@ public class TestUtils\n}\nelse if ( count > 1 ) {\nFile tmp = new File(csvFile+\"_temp.csv\");\n- OutputStreamWriter out = null;\n-\n- try {\n- out = new OutputStreamWriter(new FileOutputStream(tmp),\n- \"UTF-8\");\n+ try( OutputStreamWriter out = new OutputStreamWriter(new FileOutputStream(tmp), \"UTF-8\") ) {\n// Directory listing may contain .crc files or may be in the\n// wrong order. Sanitize the list of names.\nArrayList<String> partNames = new ArrayList<String>();\n@@ -594,8 +590,6 @@ public class TestUtils\n\"UTF-8\");\nout.append(fileContents);\n}\n- } finally {\n- IOUtilFunctions.closeSilently(out);\n}\ncsvFile = tmp.getCanonicalPath();\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix test resource leaks to address intermittent test failures
49,736
29.11.2017 10:23:20
28,800
b9831f31a977d4e18c9195fd0faa579e069a8c72
[MINOR] Turn on the parallel flush of jvm stdout by default.
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/classloader.py", "new_path": "src/main/python/systemml/classloader.py", "diff": "@@ -57,8 +57,8 @@ def get_spark_context():\n_in_jvm_stdout = False\ndefault_jvm_stdout = True\n-default_jvm_stdout_parallel_flush = False\n-def set_default_jvm_stdout(enable, parallel_flush=False):\n+default_jvm_stdout_parallel_flush = True\n+def set_default_jvm_stdout(enable, parallel_flush=True):\n\"\"\"\nThis is useful utility method to get the output of the driver JVM from within a Jupyter notebook\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Turn on the parallel flush of jvm stdout by default.
49,736
29.11.2017 15:08:18
28,800
4416b5e51d4cabf419e4a989737a4bf086cb9703
[MINOR] [DOC] Updated the deep learning documentation Scale the input features for training lenet using Keras. Added table to highlight differences between three APIs.
[ { "change_type": "MODIFY", "old_path": "docs/deep-learning.md", "new_path": "docs/deep-learning.md", "diff": "@@ -27,11 +27,67 @@ limitations under the License.\n<br/>\n+# Introduction\n+\nThere are three different ways to implement a Deep Learning model in SystemML:\n1. Using the [DML-bodied NN library](https://github.com/apache/systemml/tree/master/scripts/nn): This library allows the user to exploit full flexibility of [DML language](http://apache.github.io/systemml/dml-language-reference) to implement your neural network.\n2. Using the experimental [Caffe2DML API](http://apache.github.io/systemml/beginners-guide-caffe2dml.html): This API allows a model expressed in Caffe's proto format to be imported into SystemML. This API **doesnot** require Caffe to be installed on your SystemML.\n-3. Using the experimental [Keras2DML API](http://apache.github.io/systemml/beginners-guide-keras2dml.html): This API allows a model expressed in Keras to be imported into SystemML. However, this API requires Keras to be installed on your driver.\n+3. Using the experimental [Keras2DML API](http://apache.github.io/systemml/beginners-guide-keras2dml.html): This API allows a model expressed in Keras's functional API to be imported into SystemML. However, this API requires Keras to be installed on your driver.\n+\n+| | NN library | Caffe2DML | Keras2DML |\n+|------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|\n+| External dependency | None | None | Keras |\n+| Ability to add custom layers | Yes | No | No |\n+| The user needs to know | [DML](http://apache.github.io/systemml/dml-language-reference) | [Caffe's proto API](http://apache.github.io/systemml/reference-guide-caffe2dml#layers-supported-in-caffe2dml) | [Keras' functional API](https://keras.io/getting-started/functional-api-guide/) |\n+| Can be invoked using pyspark | Yes. Please see [Python MLContext API](http://apache.github.io/systemml/spark-mlcontext-programming-guide) | Yes. | Yes. |\n+| Can be invoked using spark-shell | Yes. Please see [Scala MLContext API](http://apache.github.io/systemml/spark-mlcontext-programming-guide) | Limited support | No |\n+| Can be invoked via command-line or JMLC API | Yes | No | No |\n+| GPU and [native BLAS](http://apache.github.io/systemml/native-backend.html) support | Yes | Yes | Yes |\n+| Part of SystemML's [mllearn](http://apache.github.io/systemml/python-reference.html#mllearn-api) API | No | Yes | Yes |\n+\n+## mllearn API\n+\n+Before we go any further, let us briefly discuss the training and prediction functions in the mllearn API (i.e. Caffe2DML and Keras2DML).\n+### Training functions\n+\n+<div class=\"codetabs\">\n+<div data-lang=\"sklearn way\" markdown=\"1\">\n+{% highlight python %}\n+# Input: Two Python objects (X_train, y_train) of type numpy, pandas or scipy.\n+model.fit(X_train, y_train)\n+{% endhighlight %}\n+</div>\n+<div data-lang=\"mllib way\" markdown=\"1\">\n+{% highlight python %}\n+# Input: One LabeledPoint DataFrame with atleast two columns: features (of type Vector) and labels.\n+model.fit(X_df)\n+{% endhighlight %}\n+</div>\n+</div>\n+\n+### Prediction functions\n+\n+<div class=\"codetabs\">\n+<div data-lang=\"sklearn way\" markdown=\"1\">\n+{% highlight python %}\n+# Input: One Python object (X_test) of type numpy, pandas or scipy.\n+model.predict(X_test)\n+# OR model.score(X_test, y_test)\n+{% endhighlight %}\n+</div>\n+<div data-lang=\"mllib way\" markdown=\"1\">\n+{% highlight python %}\n+# Input: One LabeledPoint DataFrame (df_test) with atleast one column: features (of type Vector).\n+model.transform(df_test)\n+{% endhighlight %}\n+</div>\n+</div>\n+\n+Please note that when training using mllearn API (i.e. `model.fit(X_df)`), SystemML\n+expects that labels have been converted to 1-based value.\n+This avoids unnecessary decoding overhead for large dataset if the label columns has already been decoded.\n+For scikit-learn API, there is no such requirement.\n# Training Lenet on the MNIST dataset\n@@ -144,6 +200,11 @@ x = Dense(10, activation='softmax')(x)\nkeras_model = Model(input_img, x)\nkeras_model.summary()\n+# Scale the input features\n+scale = 0.00390625\n+X_train = X_train*scale\n+X_test = X_test*scale\n+\nfrom systemml.mllearn import Keras2DML\nsysml_model = Keras2DML(spark, keras_model, input_shape=(1,28,28), weights='weights_dir')\n# sysml_model.setConfigProperty(\"sysml.native.blas\", \"auto\")\n@@ -183,7 +244,7 @@ from keras.applications.resnet50 import preprocess_input, decode_predictions, Re\nmodel = ResNet50(weights='imagenet',include_top=True,pooling='None',input_shape=(224,224,3))\nmodel.compile(optimizer='sgd', loss= 'categorical_crossentropy')\n-resnet = Keras2DML(spark,model,input_shape=(3,224,224), weights='tmp', labels='https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/labels.txt')\n+resnet = Keras2DML(spark,model,input_shape=(3,224,224), weights='weights_dir', labels='https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/labels.txt')\nresnet.summary()\nurllib.urlretrieve('https://upload.wikimedia.org/wikipedia/commons/f/f4/Cougar_sitting.jpg', 'test.jpg')\nimg_shape = (3, 224, 224)\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] [DOC] Updated the deep learning documentation - Scale the input features for training lenet using Keras. - Added table to highlight differences between three APIs.
49,738
29.11.2017 20:28:31
28,800
78586a13114b9fed585060e2ab8976ba6f9b50bd
Fix robustness compression w/ incompressible data This patch improves the robustness of matrix compression by returning the uncompressed block if all columns are incompressible or the compression ratio is below 1. This avoids unnecessary size and computation overhead for an unnecessary indirection to a single uncompressed column group.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "diff": "@@ -214,21 +214,23 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n* +per column sparsity\n*\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n+ * @return compressed matrix block or original block if incompressible\n*/\n- public void compress()\n+ public MatrixBlock compress()\nthrows DMLRuntimeException\n{\n//default sequential execution\n- compress(1);\n+ return compress(1);\n}\n/**\n* Compress block.\n*\n* @param k number of threads\n+ * @return compressed matrix block or original block if incompressible\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- public void compress(int k)\n+ public MatrixBlock compress(int k)\nthrows DMLRuntimeException\n{\n//check for redundant compression\n@@ -305,6 +307,12 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nLOG.debug(\"--compression phase 1: \"+_stats.timePhase1);\n}\n+ if( colsC.isEmpty() ) {\n+ if( LOG.isDebugEnabled() )\n+ LOG.debug(\"Abort block compression because all columns are incompressible.\");\n+ return new MatrixBlock().copyShallow(this);\n+ }\n+\n// PHASE 2: Grouping columns\n// Divide the bitmap columns into column groups.\nList<int[]> bitmapColGrps = PlanningCoCoder.findCocodesByPartitioning(\n@@ -366,6 +374,12 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n_stats.size = estimateCompressedSizeInMemory();\n_stats.ratio= estimateSizeInMemory() / _stats.size;\n+ if( _stats.ratio < 1 ) {\n+ if( LOG.isDebugEnabled() )\n+ LOG.debug(\"Abort block compression because compression ratio is less than 1.\");\n+ return new MatrixBlock().copyShallow(this);\n+ }\n+\n//final cleanup (discard uncompressed block)\nrawblock.cleanupBlock(true, true);\nthis.cleanupBlock(true, true);\n@@ -382,6 +396,8 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nLOG.debug(\"--compressed size: \"+_stats.size);\nLOG.debug(\"--compression ratio: \"+_stats.ratio);\n}\n+\n+ return this;\n}\npublic CompressionStatistics getCompressionStatistics() {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CompressionCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/CompressionCPInstruction.java", "diff": "@@ -53,11 +53,11 @@ public class CompressionCPInstruction extends UnaryCPInstruction {\nMatrixBlock in = ec.getMatrixInput(input1.getName(), getExtendedOpcode());\n//compress the matrix block\n- CompressedMatrixBlock cmb = new CompressedMatrixBlock(in);\n- cmb.compress(OptimizerUtils.getConstrainedNumThreads(-1));\n+ MatrixBlock out = new CompressedMatrixBlock(in)\n+ .compress(OptimizerUtils.getConstrainedNumThreads(-1));\n//set output and release input\nec.releaseMatrixInput(input1.getName(), getExtendedOpcode());\n- ec.setMatrixOutput(output.getName(), cmb, getExtendedOpcode());\n+ ec.setMatrixOutput(output.getName(), out, getExtendedOpcode());\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CompressionSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/CompressionSPInstruction.java", "diff": "@@ -35,7 +35,7 @@ public class CompressionSPInstruction extends UnarySPInstruction {\nprivate CompressionSPInstruction(Operator op, CPOperand in, CPOperand out, String opcode, String istr) {\nsuper(op, in, out, opcode, istr);\n- _sptype = SPINSTRUCTION_TYPE.Reorg;\n+ _sptype = SPINSTRUCTION_TYPE.Compression;\n}\npublic static CompressionSPInstruction parseInstruction ( String str )\n@@ -43,12 +43,8 @@ public class CompressionSPInstruction extends UnarySPInstruction {\n{\nInstructionUtils.checkNumFields(str, 2);\nString[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n-\n- String opcode = parts[0];\n- CPOperand in = new CPOperand(parts[1]);\n- CPOperand out = new CPOperand(parts[2]);\n-\n- return new CompressionSPInstruction(null, in, out, opcode, str);\n+ return new CompressionSPInstruction(null,\n+ new CPOperand(parts[1]), new CPOperand(parts[2]), parts[0], str);\n}\n@Override\n@@ -70,18 +66,11 @@ public class CompressionSPInstruction extends UnarySPInstruction {\nsec.addLineageRDD(input1.getName(), output.getName());\n}\n- public static class CompressionFunction implements Function<MatrixBlock,MatrixBlock>\n- {\n+ public static class CompressionFunction implements Function<MatrixBlock,MatrixBlock> {\nprivate static final long serialVersionUID = -6528833083609423922L;\n-\n@Override\n- public MatrixBlock call(MatrixBlock arg0)\n- throws Exception\n- {\n- CompressedMatrixBlock cmb = new CompressedMatrixBlock(arg0);\n- cmb.compress();\n-\n- return cmb;\n+ public MatrixBlock call(MatrixBlock arg0) throws Exception {\n+ return new CompressedMatrixBlock(arg0).compress();\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1349,7 +1349,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\ncopyDenseToDense(that);\n}\n- public void copyShallow(MatrixBlock that) {\n+ public MatrixBlock copyShallow(MatrixBlock that) {\nrlen = that.rlen;\nclen = that.clen;\nnonZeros = that.nonZeros;\n@@ -1358,6 +1358,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\ndenseBlock = that.denseBlock;\nelse\nsparseBlock = that.sparseBlock;\n+ return this;\n}\nprivate void copySparseToSparse(MatrixBlock that)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2031] Fix robustness compression w/ incompressible data This patch improves the robustness of matrix compression by returning the uncompressed block if all columns are incompressible or the compression ratio is below 1. This avoids unnecessary size and computation overhead for an unnecessary indirection to a single uncompressed column group.
49,738
29.11.2017 22:30:30
28,800
d90073d804322f6b601b0f06d03979dae6322c25
New cleanup rewrite for removal of empty basic blocks This new rewrite removes empty basic blocks, i.e., last-level blocks without hops, which can originate from the original program or a sequence of rewrites.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/ProgramRewriter.java", "diff": "@@ -137,6 +137,7 @@ public class ProgramRewriter\n_dagRuleSet.add( new RewriteRemoveUnnecessaryCasts() );\nif( OptimizerUtils.ALLOW_COMMON_SUBEXPRESSION_ELIMINATION )\n_dagRuleSet.add( new RewriteCommonSubexpressionElimination(true) );\n+ _sbRuleSet.add( new RewriteRemoveEmptyBasicBlocks() );\n}\n/**\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteRemoveEmptyBasicBlocks.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.rewrite;\n+\n+import java.util.ArrayList;\n+import java.util.List;\n+\n+import org.apache.sysml.hops.HopsException;\n+import org.apache.sysml.parser.StatementBlock;\n+\n+/**\n+ * Rule: Simplify program structure by removing empty last-level blocks,\n+ * which may originate from the original program or due to a sequence of\n+ * rewrites (e.g., checkpoint injection and subsequent IPA).\n+ */\n+public class RewriteRemoveEmptyBasicBlocks extends StatementBlockRewriteRule\n+{\n+ @Override\n+ public List<StatementBlock> rewriteStatementBlock(StatementBlock sb, ProgramRewriteStatus state)\n+ throws HopsException\n+ {\n+ ArrayList<StatementBlock> ret = new ArrayList<>();\n+\n+ //prune last level blocks with empty hops\n+ if( HopRewriteUtils.isLastLevelStatementBlock(sb)\n+ && (sb.getHops() == null || sb.getHops().isEmpty()) ) {\n+ if( LOG.isDebugEnabled() )\n+ LOG.debug(\"Applied removeEmptyBasicBlocks (lines \"+sb.getBeginLine()+\"-\"+sb.getEndLine()+\").\");\n+ }\n+ else //keep original sb\n+ ret.add( sb );\n+\n+ return ret;\n+ }\n+\n+ @Override\n+ public List<StatementBlock> rewriteStatementBlocks(List<StatementBlock> sbs, ProgramRewriteStatus sate)\n+ throws HopsException {\n+ return sbs;\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1990] New cleanup rewrite for removal of empty basic blocks This new rewrite removes empty basic blocks, i.e., last-level blocks without hops, which can originate from the original program or a sequence of rewrites.
49,738
30.11.2017 15:30:06
28,800
78cc94a1e91102309d3e8834409f880cc0900f58
Fix matrix value types after as.matrix casts This patch fixes a minor issue of value type inference for as.matrix casts. Specifically, matrices should always be assigned the value type double independent of the input value type.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -502,7 +502,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\nif( getFirstExpr().getOutput().getDataType()==DataType.SCALAR )\noutput.setDimensions(1, 1); //correction scalars\noutput.setBlockDimensions(id.getRowsInBlock(), id.getColumnsInBlock());\n- output.setValueType(id.getValueType());\n+ output.setValueType(ValueType.DOUBLE); //matrices always in double\nbreak;\ncase CAST_AS_FRAME:\ncheckNumParameters(1);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1980] Fix matrix value types after as.matrix casts This patch fixes a minor issue of value type inference for as.matrix casts. Specifically, matrices should always be assigned the value type double independent of the input value type.
49,738
30.11.2017 19:41:47
28,800
d47414ed0728700776c19585533b5dfc0eb835e1
Fix rewrite fuse-datagen-binary-op (pdf awareness) There was a missing check for uniform pdf functions in the rewrite for fusing rand with min 0, max 1 and scalar variable multiplications. This patch fixes the rewrite and adds related negative tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -371,10 +371,11 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHop min = inputGen.getInput(DataExpression.RAND_MIN);\nHop max = inputGen.getInput(DataExpression.RAND_MAX);\ndouble sval = ((LiteralOp)right).getDoubleValue();\n+ boolean pdfUniform = pdf instanceof LiteralOp\n+ && DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue());\nif( HopRewriteUtils.isBinary(bop, OpOp2.MULT, OpOp2.PLUS, OpOp2.MINUS, OpOp2.DIV)\n- && min instanceof LiteralOp && max instanceof LiteralOp && pdf instanceof LiteralOp\n- && DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue()) )\n+ && min instanceof LiteralOp && max instanceof LiteralOp && pdfUniform )\n{\n//create fused data gen operator\nDataGenOp gen = null;\n@@ -392,7 +393,8 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHopRewriteUtils.replaceChildReference(p, bop, gen);\nhi = gen;\n- LOG.debug(\"Applied fuseDatagenAndBinaryOperation1 (line \"+bop.getBeginLine()+\").\");\n+ LOG.debug(\"Applied fuseDatagenAndBinaryOperation1 \"\n+ + \"(\"+bop.getFilename()+\", line \"+bop.getBeginLine()+\").\");\n}\n}\n//right input rand and hence output matrix double, left scalar literal\n@@ -404,10 +406,11 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHop min = inputGen.getInput(DataExpression.RAND_MIN);\nHop max = inputGen.getInput(DataExpression.RAND_MAX);\ndouble sval = ((LiteralOp)left).getDoubleValue();\n+ boolean pdfUniform = pdf instanceof LiteralOp\n+ && DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue());\nif( (bop.getOp()==OpOp2.MULT || bop.getOp()==OpOp2.PLUS)\n- && min instanceof LiteralOp && max instanceof LiteralOp && pdf instanceof LiteralOp\n- && DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue()) )\n+ && min instanceof LiteralOp && max instanceof LiteralOp && pdfUniform )\n{\n//create fused data gen operator\nDataGenOp gen = null;\n@@ -423,7 +426,8 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHopRewriteUtils.replaceChildReference(p, bop, gen);\nhi = gen;\n- LOG.debug(\"Applied fuseDatagenAndBinaryOperation2 (line \"+bop.getBeginLine()+\").\");\n+ LOG.debug(\"Applied fuseDatagenAndBinaryOperation2 \"\n+ + \"(\"+bop.getFilename()+\", line \"+bop.getBeginLine()+\").\");\n}\n}\n//left input rand and hence output matrix double, right scalar variable\n@@ -433,6 +437,10 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nDataGenOp gen = (DataGenOp)left;\nHop min = gen.getInput(DataExpression.RAND_MIN);\nHop max = gen.getInput(DataExpression.RAND_MAX);\n+ Hop pdf = gen.getInput(DataExpression.RAND_PDF);\n+ boolean pdfUniform = pdf instanceof LiteralOp\n+ && DataExpression.RAND_PDF_UNIFORM.equals(((LiteralOp)pdf).getStringValue());\n+\nif( HopRewriteUtils.isBinary(bop, OpOp2.PLUS)\n&& HopRewriteUtils.isLiteralOfValue(min, 0)\n@@ -445,10 +453,11 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nfor( Hop p : parents )\nHopRewriteUtils.replaceChildReference(p, bop, gen);\nhi = gen;\n- LOG.debug(\"Applied fuseDatagenAndBinaryOperation3a (line \"+bop.getBeginLine()+\").\");\n+ LOG.debug(\"Applied fuseDatagenAndBinaryOperation3a \"\n+ + \"(\"+bop.getFilename()+\", line \"+bop.getBeginLine()+\").\");\n}\nelse if( HopRewriteUtils.isBinary(bop, OpOp2.MULT)\n- && (HopRewriteUtils.isLiteralOfValue(min, 0)\n+ && ((HopRewriteUtils.isLiteralOfValue(min, 0) && pdfUniform)\n|| HopRewriteUtils.isLiteralOfValue(min, 1))\n&& HopRewriteUtils.isLiteralOfValue(max, 1) )\n{\n@@ -460,10 +469,10 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nfor( Hop p : parents )\nHopRewriteUtils.replaceChildReference(p, bop, gen);\nhi = gen;\n- LOG.debug(\"Applied fuseDatagenAndBinaryOperation3b (line \"+bop.getBeginLine()+\").\");\n+ LOG.debug(\"Applied fuseDatagenAndBinaryOperation3b \"\n+ + \"(\"+bop.getFilename()+\", line \"+bop.getBeginLine()+\").\");\n}\n}\n-\n}\nreturn hi;\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFusedRandTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/misc/RewriteFusedRandTest.java", "diff": "@@ -32,6 +32,7 @@ public class RewriteFusedRandTest extends AutomatedTestBase\nprivate static final String TEST_NAME1 = \"RewriteFusedRandLit\";\nprivate static final String TEST_NAME2 = \"RewriteFusedRandVar1\";\nprivate static final String TEST_NAME3 = \"RewriteFusedRandVar2\";\n+ private static final String TEST_NAME4 = \"RewriteFusedRandVar3\";\nprivate static final String TEST_DIR = \"functions/misc/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RewriteFusedRandTest.class.getSimpleName() + \"/\";\n@@ -46,6 +47,7 @@ public class RewriteFusedRandTest extends AutomatedTestBase\naddTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\naddTestConfiguration( TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME4, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME4, new String[] { \"R\" }) );\n}\n@Test\n@@ -79,15 +81,25 @@ public class RewriteFusedRandTest extends AutomatedTestBase\n}\n@Test\n- public void testRewriteFusedZerosPlusVar() {\n+ public void testRewriteFusedZerosPlusVarUniform() {\ntestRewriteFusedRand( TEST_NAME2, \"uniform\", true );\n}\n@Test\n- public void testRewriteFusedOnesMultVar() {\n+ public void testRewriteFusedOnesMultVarUniform() {\ntestRewriteFusedRand( TEST_NAME3, \"uniform\", true );\n}\n+ @Test\n+ public void testRewriteFusedOnesMult2VarUniform() {\n+ testRewriteFusedRand( TEST_NAME4, \"uniform\", true );\n+ }\n+\n+ @Test\n+ public void testRewriteFusedOnesMult2VarNormal() {\n+ testRewriteFusedRand( TEST_NAME4, \"normal\", true );\n+ }\n+\nprivate void testRewriteFusedRand( String testname, String pdf, boolean rewrites )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n@@ -115,9 +127,10 @@ public class RewriteFusedRandTest extends AutomatedTestBase\nAssert.assertEquals(\"Wrong result\", new Double(Math.pow(rows*cols, 2)), ret);\n//check for applied rewrites\n- if( rewrites && pdf.equals(\"uniform\") ) {\n- Assert.assertTrue(!heavyHittersContainsString(\"+\")\n- && !heavyHittersContainsString(\"*\"));\n+ if( rewrites ) {\n+ boolean expected = testname.equals(TEST_NAME2) || pdf.equals(\"uniform\");\n+ Assert.assertTrue(expected == (!heavyHittersContainsString(\"+\")\n+ && !heavyHittersContainsString(\"*\")));\n}\n}\nfinally {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/misc/RewriteFusedRandVar3.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(1, $1, $2)\n+while(FALSE){}\n+Y = rand(rows=$1, cols=$2, min=0, max=1, pdf=$3) * sum(X);\n+while(FALSE){}\n+R = as.matrix(sum(Y))\n+\n+write(R, $5);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2032] Fix rewrite fuse-datagen-binary-op (pdf awareness) There was a missing check for uniform pdf functions in the rewrite for fusing rand with min 0, max 1 and scalar variable multiplications. This patch fixes the rewrite and adds related negative tests.
49,738
30.11.2017 21:53:56
28,800
54cf8f0b8d84df878e014eed3e99878b8c45bf13
Fix corrupted GPU matrixmult transpose flags
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java", "diff": "@@ -669,7 +669,7 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\nHop h1 = getInput().get(0);\nHop h2 = getInput().get(1);\nboolean leftTrans = HopRewriteUtils.isTransposeOperation(h1);\n- boolean rightTrans = HopRewriteUtils.isTransposeOperation(h1);\n+ boolean rightTrans = HopRewriteUtils.isTransposeOperation(h2);\nLop left = !leftTrans ? h1.constructLops() :\nh1.getInput().get(0).constructLops();\nLop right = !rightTrans ? h2.constructLops() :\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2030] Fix corrupted GPU matrixmult transpose flags
49,738
30.11.2017 22:11:04
28,800
f53fcc8af4c4080a02c85a72645946ca508dd642
Fix MLContext handling of compiler configurations This patch fixes the handling of compiler configurations passed and modified via dml configuration properties. So far MLContext never set the optimization level, which ignored any custom configuration.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java", "diff": "@@ -31,6 +31,7 @@ import org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.mlcontext.MLContext.ExecutionType;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\n+import org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.HopsException;\n@@ -250,8 +251,18 @@ public class ScriptExecutor {\nDMLScript.USE_ACCELERATOR = gpu;\nDMLScript.STATISTICS_COUNT = statisticsMaxHeavyHitters;\n- // Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n- GPUContextPool.AVAILABLE_GPUS = ConfigurationManager.getDMLConfig().getTextValue(DMLConfig.AVAILABLE_GPUS);\n+ // set the global compiler configuration\n+ try {\n+ OptimizerUtils.resetStaticCompilerFlags();\n+ CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(\n+ ConfigurationManager.getCompilerConfig(), config);\n+ ConfigurationManager.setGlobalConfig(cconf);\n+ } catch(DMLRuntimeException ex) {\n+ throw new RuntimeException(ex);\n+ }\n+\n+ // set the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)\n+ GPUContextPool.AVAILABLE_GPUS = config.getTextValue(DMLConfig.AVAILABLE_GPUS);\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -263,12 +263,13 @@ public class OptimizerUtils\nreturn (getOptLevel() == level);\n}\n- public static CompilerConfig constructCompilerConfig( DMLConfig dmlconf )\n+ public static CompilerConfig constructCompilerConfig( DMLConfig dmlconf ) throws DMLRuntimeException {\n+ return constructCompilerConfig(new CompilerConfig(), dmlconf);\n+ }\n+\n+ public static CompilerConfig constructCompilerConfig( CompilerConfig cconf, DMLConfig dmlconf )\nthrows DMLRuntimeException\n{\n- //create default compiler configuration\n- CompilerConfig cconf = new CompilerConfig();\n-\n//each script sets its own block size, opt level etc\ncconf.set(ConfigType.BLOCK_SIZE, dmlconf.getIntValue( DMLConfig.DEFAULT_BLOCK_SIZE ));\n@@ -379,6 +380,25 @@ public class OptimizerUtils\nreturn cconf;\n}\n+ public static void resetStaticCompilerFlags() {\n+ //TODO this is a workaround for MLContext to avoid a major refactoring before the release; this method\n+ //should be removed as soon all modified static variables are properly handled in the compiler config\n+ ALLOW_ALGEBRAIC_SIMPLIFICATION = true;\n+ ALLOW_AUTO_VECTORIZATION = true;\n+ ALLOW_BRANCH_REMOVAL = true;\n+ ALLOW_CONSTANT_FOLDING = true;\n+ ALLOW_COMMON_SUBEXPRESSION_ELIMINATION = true;\n+ ALLOW_INTER_PROCEDURAL_ANALYSIS = true;\n+ ALLOW_LOOP_UPDATE_IN_PLACE = true;\n+ ALLOW_OPERATOR_FUSION = true;\n+ ALLOW_RAND_JOB_RECOMPILE = true;\n+ ALLOW_SIZE_EXPRESSION_EVALUATION = true;\n+ ALLOW_SPLIT_HOP_DAGS = true;\n+ ALLOW_SUM_PRODUCT_REWRITES = true;\n+ ALLOW_WORSTCASE_SIZE_EXPRESSION_EVALUATION = true;\n+ IPA_NUM_REPETITIONS = 3;\n+ }\n+\npublic static long getDefaultSize() {\n//we need to set default_size larger than any execution context\n//memory budget, however, it should not produce overflows on sum\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextOptLevelTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.mlcontext;\n+\n+import static org.apache.sysml.api.mlcontext.ScriptFactory.dml;\n+\n+import org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\n+import org.apache.sysml.api.mlcontext.MLResults;\n+import org.apache.sysml.api.mlcontext.Script;\n+import org.apache.sysml.conf.DMLConfig;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+public class MLContextOptLevelTest extends MLContextTestBase\n+{\n+ private final static int rows = 200;\n+ private final static int cols = 100;\n+\n+ @Test\n+ public void testOptLevel1() {\n+ runMLContextOptLevelTest(1);\n+ }\n+\n+ @Test\n+ public void testOptLevel2() {\n+ runMLContextOptLevelTest(2);\n+ }\n+\n+ private void runMLContextOptLevelTest(int optLevel)\n+ {\n+ try\n+ {\n+ String s = \"R = sum(matrix(0,\"+rows+\",\"+cols+\") + 7);\";\n+ ml.setExplain(true);\n+ ml.setExplainLevel(ExplainLevel.RUNTIME);\n+ ml.setStatistics(true);\n+ ml.setConfigProperty(DMLConfig.OPTIMIZATION_LEVEL, String.valueOf(optLevel));\n+\n+ Script script = dml(s).out(\"R\");\n+ MLResults results = ml.execute(script);\n+\n+ //check result correctness\n+ TestUtils.compareScalars(results.getDouble(\"R\"), rows*cols*7, 0.000001);\n+\n+ //check correct opt level\n+ Assert.assertTrue(heavyHittersContainsString(\"+\") == (optLevel==1));\n+ }\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/mlcontext/ZPackageSuite.java", "diff": "@@ -29,6 +29,7 @@ import org.junit.runners.Suite;\[email protected]({\nMLContextFrameTest.class,\nMLContextMultipleScriptsTest.class,\n+ MLContextOptLevelTest.class,\nMLContextOutputBlocksizeTest.class,\nMLContextParforDatasetTest.class,\nMLContextScratchCleanupTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2033] Fix MLContext handling of compiler configurations This patch fixes the handling of compiler configurations passed and modified via dml configuration properties. So far MLContext never set the optimization level, which ignored any custom configuration.
49,738
01.12.2017 18:35:43
28,800
4d760657456cc06450ce9fc3d01ccf1e63b124eb
Fix result correctness sparse maxpooling_backward So far all maxpooling backward tests ran exclusively against dense data, which has hidden result correctness issues of sparse-dense maxpooling backward operations and incorrect R comparison results in the presence of duplicate values. This patch addresses these correctness issues by explicitly correcting for unseen zero values.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java", "diff": "@@ -20,6 +20,7 @@ package org.apache.sysml.runtime.matrix.data;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.BitSet;\nimport java.util.concurrent.Callable;\nimport org.apache.sysml.hops.OptimizerUtils;\n@@ -421,10 +422,12 @@ public class LibMatrixDNNHelper {\nint [] tensorIndexes = new int[3];\n- int start_index_h = params.start_indexes_h[p];\n- int end_index_h = params.end_indexes_h[p];\n- int start_index_w = params.start_indexes_w[q];\n- int end_index_w = params.end_indexes_w[q];\n+ int start_h = params.start_indexes_h[p];\n+ int end_h = params.end_indexes_h[p];\n+ int start_w = params.start_indexes_w[q];\n+ int end_w = params.end_indexes_w[q];\n+ int numVals = (end_h-start_h)*(end_w-start_w);\n+ BitSet bmap = new BitSet(numVals);\nint maxIndex = -1;\ndouble maxVal = -Double.MAX_VALUE;\n@@ -446,17 +449,24 @@ public class LibMatrixDNNHelper {\ncontinue;\nint h = tensorIndexes[1];\nint w = tensorIndexes[2];\n- if(h >= start_index_h && h < end_index_h && w >= start_index_w && w < end_index_w) {\n+ if(h >= start_h && h < end_h && w >= start_w && w < end_w) {\ndouble val = performReluBackward && avals[j] < 0 ? 0 : avals[j];\nif(maxVal < val) {\nmaxIndex = inputOffset + h*params.W + w;\nmaxVal = val;\n}\n+ bmap.set((h-start_h)*(end_w-start_w) + (w-start_w));\n}\n}\n}\n- else {\n- maxIndex = inputOffset;\n+\n+ //correction for 0 maximum and subset of evaluated entries\n+ int firstMiss = bmap.nextClearBit(0);\n+ if( firstMiss < numVals && maxVal<0 ) {\n+ int lh = firstMiss/(end_w-start_w)+start_h;\n+ int lw = firstMiss%(end_w-start_w)+start_w;\n+ maxIndex = inputOffset + lh * params.W + lw;\n+ maxVal = 0;\n}\nreturn maxIndex;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/tensor/PoolBackwardTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/tensor/PoolBackwardTest.java", "diff": "@@ -32,7 +32,6 @@ import org.junit.Test;\npublic class PoolBackwardTest extends AutomatedTestBase\n{\n-\nprivate final static String TEST_NAME = \"PoolBackwardTest\";\nprivate final static String TEST_DIR = \"functions/tensor/\";\nprivate final static String TEST_CLASS_DIR = TEST_DIR + PoolBackwardTest.class.getSimpleName() + \"/\";\n@@ -40,157 +39,133 @@ public class PoolBackwardTest extends AutomatedTestBase\n@Override\npublic void setUp() {\n- addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,\n- new String[] {\"B\"}));\n+ addTestConfiguration(TEST_NAME, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME, new String[] {\"B\"}));\n}\n@Test\n- public void testMaxPool2DBackwardDense1()\n- {\n+ public void testMaxPool2DBackwardDense1() {\nint numImg = 1; int imgSize = 4; int numChannels = 1; int stride = 2; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, false);\n}\n@Test\n- public void testMaxPool2DBackwardDense2()\n- {\n+ public void testMaxPool2DBackwardDense2() {\nint numImg = 3; int imgSize = 6; int numChannels = 3; int stride = 1; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, false);\n}\n@Test\n- public void testMaxPool2DBackwardDense3()\n- {\n+ public void testMaxPool2DBackwardDense3() {\nint numImg = 2; int imgSize = 7; int numChannels = 2; int stride = 2; int pad = 0; int poolSize1 = 3; int poolSize2 = 3;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, false);\n}\n@Test\n- public void testMaxPool2DBackwardSparse1()\n- {\n+ public void testMaxPool2DBackwardSparse1() {\nint numImg = 1; int imgSize = 4; int numChannels = 1; int stride = 2; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, false);\n}\n@Test\n- public void testMaxPool2DBackwardSparse2()\n- {\n+ public void testMaxPool2DBackwardSparse2() {\nint numImg = 3; int imgSize = 6; int numChannels = 3; int stride = 1; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, false);\n}\n@Test\n- public void testMaxPool2DBackwardSparse3()\n- {\n+ public void testMaxPool2DBackwardSparse3() {\nint numImg = 2; int imgSize = 7; int numChannels = 2; int stride = 2; int pad = 0; int poolSize1 = 3; int poolSize2 = 3;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, false);\n}\n@Test\n- public void testMaxPool2DBackwardSparse4()\n- {\n+ public void testMaxPool2DBackwardSparse4() {\nint numImg = 1; int imgSize = 4; int numChannels = 1; int stride = 2; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, true);\n}\n@Test\n- public void testMaxPool2DBackwardSparse5()\n- {\n+ public void testMaxPool2DBackwardSparse5() {\nint numImg = 3; int imgSize = 6; int numChannels = 3; int stride = 1; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, true);\n}\n@Test\n- public void testMaxPool2DBackwardSparse6()\n- {\n+ public void testMaxPool2DBackwardSparse6() {\nint numImg = 2; int imgSize = 7; int numChannels = 2; int stride = 2; int pad = 0; int poolSize1 = 3; int poolSize2 = 3;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, true);\n}\n@Test\n- public void testMaxPool2DBackwardSparse7()\n- {\n+ public void testMaxPool2DBackwardSparse7() {\nint numImg = 1; int imgSize = 4; int numChannels = 1; int stride = 2; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, true);\n}\n@Test\n- public void testMaxPool2DBackwardSparse8()\n- {\n+ public void testMaxPool2DBackwardSparse8() {\nint numImg = 3; int imgSize = 6; int numChannels = 3; int stride = 1; int pad = 0; int poolSize1 = 2; int poolSize2 = 2;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, true);\n}\n@Test\n- public void testMaxPool2DBackwardSparse9()\n- {\n+ public void testMaxPool2DBackwardSparse9() {\nint numImg = 2; int imgSize = 7; int numChannels = 2; int stride = 2; int pad = 0; int poolSize1 = 3; int poolSize2 = 3;\nrunPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", false, true);\n}\n- /**\n- *\n- * @param et\n- * @param sparse\n- */\n+ @Test\n+ public void testMaxPool2DBackwardSparse10() {\n+ int numImg = 10; int imgSize = 28; int numChannels = 1; int stride = 1; int pad = 0; int poolSize1 = 5; int poolSize2 = 5;\n+ runPoolTest(ExecType.CP, imgSize, numImg, numChannels, stride, pad, poolSize1, poolSize2, \"max\", true, false);\n+ }\n+\npublic void runPoolTest( ExecType et, int imgSize, int numImg, int numChannels, int stride,\nint pad, int poolSize1, int poolSize2, String poolMode, boolean sparse1, boolean sparse2)\n{\n- RUNTIME_PLATFORM oldRTP = rtplatform;\n-\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID; break;\n+ }\nboolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\ntry\n{\n- String sparseVal1 = (\"\"+sparse1).toUpperCase();\n- String sparseVal2 = (\"\"+sparse2).toUpperCase();\n+ String sparseVal1 = String.valueOf(sparse1).toUpperCase();\n+ String sparseVal2 = String.valueOf(sparse2).toUpperCase();\nTestConfiguration config = getTestConfiguration(TEST_NAME);\n- if(et == ExecType.SPARK) {\n- rtplatform = RUNTIME_PLATFORM.SPARK;\n- }\n- else {\n- rtplatform = (et==ExecType.MR)? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.SINGLE_NODE;\n- }\n- if( rtplatform == RUNTIME_PLATFORM.SPARK )\n- DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n-\nloadTestConfiguration(config);\n- /* This is for running the junit test the new way, i.e., construct the arguments directly */\n- String RI_HOME = SCRIPT_DIR + TEST_DIR;\n- fullDMLScriptName = RI_HOME + TEST_NAME + \".dml\";\n-\nlong P = ConvolutionUtils.getP(imgSize, poolSize1, stride, pad);\n- programArgs = new String[]{\"-explain\", \"-args\", \"\" + imgSize, \"\" + numImg,\n- \"\" + numChannels, \"\" + poolSize1, \"\" + poolSize2,\n- \"\" + stride, \"\" + pad, poolMode,\n- \"\" + P, \"\" + P,\n- output(\"B\"), sparseVal1, sparseVal2};\n- boolean exceptionExpected = false;\n- int expectedNumberOfJobs = -1;\n- runTest(true, exceptionExpected, null, expectedNumberOfJobs);\n+ String RI_HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = RI_HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-args\", String.valueOf(imgSize), String.valueOf(numImg),\n+ String.valueOf(numChannels), String.valueOf(poolSize1), String.valueOf(poolSize2),\n+ String.valueOf(stride), String.valueOf(pad), String.valueOf(poolMode),\n+ String.valueOf(P), String.valueOf(P), output(\"B\"), sparseVal1, sparseVal2};\nfullRScriptName = RI_HOME + TEST_NAME + \".R\";\n- rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + imgSize + \" \" + numImg +\n- \" \" + numChannels + \" \" + poolSize1 +\n- \" \" + poolSize2 + \" \" + stride + \" \" + pad + \" \" + P + \" \" + P + \" \" + expectedDir() +\n- \" \" + sparseVal1 + \" \" + sparseVal2;\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + imgSize + \" \" + numImg + \" \" + numChannels +\n+ \" \" + poolSize1 + \" \" + poolSize2 + \" \" + stride + \" \" + pad + \" \" + P + \" \" + P +\n+ \" \" + expectedDir() + \" \" + sparseVal1 + \" \" + sparseVal2;\n- // Run comparison R script\n+ // run dml and r scripts\n+ runTest(true, false, null, -1);\nrunRScript(true);\n- HashMap<CellIndex, Double> bHM = readRMatrixFromFS(\"B\");\n+ // compare results\n+ HashMap<CellIndex, Double> bHM = readRMatrixFromFS(\"B\");\nHashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"B\");\n- TestUtils.compareMatrices(dmlfile, bHM, epsilon, \"B-DML\", \"NumPy\");\n-\n+ TestUtils.compareMatrices(dmlfile, bHM, epsilon, \"B-DML\", \"R\");\n}\n- finally\n- {\n- rtplatform = oldRTP;\n+ finally {\n+ rtplatform = platformOld;\nDMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n}\n-\n}\n-\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/tensor/PoolBackwardTest.R", "new_path": "src/test/scripts/functions/tensor/PoolBackwardTest.R", "diff": "@@ -35,15 +35,17 @@ Q=as.integer(args[9])\nx=matrix(seq(1, numImg*numChannels*imgSize*imgSize), numImg, numChannels*imgSize*imgSize, byrow=TRUE)\ndout=matrix(seq(1, numImg*numChannels*P*Q), numImg, numChannels*P*Q, byrow=TRUE)\nif(as.logical(args[11])) {\n- # zero_mask = (x - mean(x)) > 0\n- # x = x * zero_mask\n+ zero_mask = (x - mean(x)*1.5) > 0\n+ x = x * zero_mask\n+} else {\n+ x = x - mean(x)\n}\nif(as.logical(args[12])) {\n- # zero_mask = (dout - mean(dout)) > 0\n- # dout = dout * zero_mask\n-}\n-x = x - mean(x)\n+ zero_mask = (dout - mean(dout)*1.5) > 0\n+ dout = dout * zero_mask\n+} else {\ndout = dout - mean(dout)\n+}\nmax_pool_backward <- function(dout, Hout, Wout, X, C,\nHin, Win, Hf, Wf, strideh, stridew)\n{\n@@ -65,7 +67,8 @@ max_pool_backward <- function(dout, Hout, Wout, X, C,\nwin = (wout-1) * stridew + 1\nimg_slice_patch = img_slice[hin:(hin+Hf-1), win:(win+Wf-1)]\nmax_val = max(img_slice_patch)\n- max_val_ind = (img_slice_patch == max_val) # max value indicator\n+ max_val_ind = matrix(0, nrow(img_slice_patch), ncol(img_slice_patch))\n+ max_val_ind[which.max(img_slice_patch)] = 1 # first max value indicator\n# gradient passes through only for the max value in this patch\ndimg_slice_patch = max_val_ind * dout[n, (c-1)*Hout*Wout + (hout-1)*Wout + wout]\ndimg_slice[hin:(hin+Hf-1), win:(win+Wf-1)] =\n@@ -82,4 +85,3 @@ max_pool_backward <- function(dout, Hout, Wout, X, C,\noutput = max_pool_backward(dout, P, Q, x, numChannels, imgSize, imgSize, poolSize1, poolSize2, stride, stride)\nwriteMM(as(output,\"CsparseMatrix\"), paste(args[10], \"B\", sep=\"\"))\n-\n" }, { "change_type": "MODIFY", "old_path": "src/test/scripts/functions/tensor/PoolBackwardTest.dml", "new_path": "src/test/scripts/functions/tensor/PoolBackwardTest.dml", "diff": "@@ -34,15 +34,19 @@ Q = $10\nx=matrix(seq(1, numImg*numChannels*imgSize*imgSize), rows=numImg, cols=numChannels*imgSize*imgSize)\ndout=matrix(seq(1, numImg*numChannels*P*Q), rows=numImg, cols=numChannels*P*Q)\nif($12) {\n- # zero_mask = (x - mean(x)) > 0\n- # x = x * zero_mask\n+ zero_mask = (x - mean(x)*1.5) > 0\n+ x = x * zero_mask\n+}\n+else {\n+ x = x - mean(x)\n}\nif($13) {\n- # zero_mask = (dout - mean(dout)) > 0\n- # dout = dout * zero_mask\n+ zero_mask = (dout - mean(dout)*1.5) > 0\n+ dout = dout * zero_mask\n}\n-x = x - mean(x)\n+else {\ndout = dout - mean(dout)\n+}\nif(poolMode == \"max\") {\noutput = max_pool_backward(x, dout, stride=[stride, stride], padding=[pad, pad], input_shape=[numImg, numChannels, imgSize, imgSize], pool_size=[poolSize1, poolSize2])\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2035] Fix result correctness sparse maxpooling_backward So far all maxpooling backward tests ran exclusively against dense data, which has hidden result correctness issues of sparse-dense maxpooling backward operations and incorrect R comparison results in the presence of duplicate values. This patch addresses these correctness issues by explicitly correcting for unseen zero values.
49,736
04.12.2017 16:33:29
28,800
696fe3f6d0b57228f98e0959dc4de8b52ea0d6ed
[HOTFIX] Fix the loading logic of SystemML jars by associating the right jar files with the matching classloader
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/classloader.py", "new_path": "src/main/python/systemml/classloader.py", "diff": "@@ -140,14 +140,15 @@ def _createJavaObject(sc, obj_type):\nelse:\nraise ValueError('Incorrect usage: supported values: mlcontext or dummy')\n-def _getJarFileName(sc, suffix):\n+def _getJarFileNames(sc):\nimport imp, fnmatch\njar_file_name = '_ignore.jar'\njava_dir = os.path.join(imp.find_module(\"systemml\")[1], \"systemml-java\")\n+ jar_file_names = []\nfor file in os.listdir(java_dir):\n- if fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT' + suffix + '.jar') or fnmatch.fnmatch(file, 'systemml-*' + suffix + '.jar'):\n- jar_file_name = os.path.join(java_dir, file)\n- return jar_file_name\n+ if fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar'):\n+ jar_file_names = jar_file_names + [ os.path.join(java_dir, file) ]\n+ return jar_file_names\ndef _getLoaderInstance(sc, jar_file_name, className, hint):\nerr_msg = 'Unable to load systemml-*.jar into current pyspark session.'\n@@ -179,16 +180,18 @@ def createJavaObject(sc, obj_type):\nret = None\nerr_msg = 'Unable to load systemml-*.jar into current pyspark session.'\nhint = 'Provide the following argument to pyspark: --driver-class-path '\n- # First load SystemML\n- jar_file_name = _getJarFileName(sc, '')\n- x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.utils.SystemMLLoaderUtils', hint + 'SystemML.jar')\n+ jar_file_names = _getJarFileNames(sc)\n+ if len(jar_file_names) != 2:\n+ raise ImportError('Expected only systemml and systemml-extra jars, but found ' + str(jar_file_names))\n+ for jar_file_name in jar_file_names:\n+ if 'extra' in jar_file_name:\n+ x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.api.dl.Caffe2DMLLoader', hint + 'systemml-*-extra.jar')\n+ x.loadCaffe2DML(jar_file_name)\n+ else:\n+ x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.utils.SystemMLLoaderUtils', hint + 'systemml-*.jar')\nx.loadSystemML(jar_file_name)\ntry:\nret = _createJavaObject(sc, obj_type)\nexcept (py4j.protocol.Py4JError, TypeError):\nraise ImportError(err_msg + ' Hint: ' + hint + jar_file_name)\n- # Now load caffe2dml\n- jar_file_name = _getJarFileName(sc, '-extra')\n- x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.api.dl.Caffe2DMLLoader', hint + 'systemml-*-extra.jar')\n- x.loadCaffe2DML(jar_file_name)\nreturn ret\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix the loading logic of SystemML jars by associating the right jar files with the matching classloader
49,736
04.12.2017 19:08:37
28,800
3fa8d379308ec02971e63f686089d132ad8113d0
[MINOR] Bugfix in mllearn's SVM class The PR introduced a bug in mllearn SVM class as the scala wrappers were not updated after adding the command-line parameter scoring_only.
[ { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/ml/SVM.scala", "new_path": "src/main/scala/org/apache/sysml/api/ml/SVM.scala", "diff": "@@ -105,6 +105,7 @@ class SVMModel(override val uid: String)(estimator: SVM, val sc: SparkContext, v\nval script = dml(ScriptsUtils.getDMLScript(if (isMultiClass) SVMModel.predictionScriptPathMulticlass else SVMModel.predictionScriptPathBinary))\n.in(\"$X\", \" \")\n.in(\"$model\", \" \")\n+ .in(\"$scoring_only\", \"TRUE\")\n.out(\"scores\")\nval w = estimator.mloutput.getMatrix(\"w\")\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Bugfix in mllearn's SVM class - The PR #687 introduced a bug in mllearn SVM class as the scala wrappers were not updated after adding the command-line parameter scoring_only.
49,736
04.12.2017 20:01:38
28,800
e4c2880ac9b90111592fa1206245a82ecde96460
Added support for sequential model in Keras2DML Also cleaned up variable names in the documentation.
[ { "change_type": "MODIFY", "old_path": "docs/deep-learning.md", "new_path": "docs/deep-learning.md", "diff": "@@ -32,13 +32,13 @@ limitations under the License.\nThere are three different ways to implement a Deep Learning model in SystemML:\n1. Using the [DML-bodied NN library](https://github.com/apache/systemml/tree/master/scripts/nn): This library allows the user to exploit full flexibility of [DML language](http://apache.github.io/systemml/dml-language-reference) to implement your neural network.\n2. Using the experimental [Caffe2DML API](http://apache.github.io/systemml/beginners-guide-caffe2dml.html): This API allows a model expressed in Caffe's proto format to be imported into SystemML. This API **doesnot** require Caffe to be installed on your SystemML.\n-3. Using the experimental [Keras2DML API](http://apache.github.io/systemml/beginners-guide-keras2dml.html): This API allows a model expressed in Keras's functional API to be imported into SystemML. However, this API requires Keras to be installed on your driver.\n+3. Using the experimental [Keras2DML API](http://apache.github.io/systemml/beginners-guide-keras2dml.html): This API allows a model expressed in Keras's API to be imported into SystemML. However, this API requires Keras to be installed on your driver.\n| | NN library | Caffe2DML | Keras2DML |\n|------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------|\n| External dependency | None | None | Keras |\n| Ability to add custom layers | Yes | No | No |\n-| The user needs to know | [DML](http://apache.github.io/systemml/dml-language-reference) | [Caffe's proto API](http://apache.github.io/systemml/reference-guide-caffe2dml#layers-supported-in-caffe2dml) | [Keras' functional API](https://keras.io/getting-started/functional-api-guide/) |\n+| The user needs to know | [DML](http://apache.github.io/systemml/dml-language-reference) | [Caffe's proto API](http://apache.github.io/systemml/reference-guide-caffe2dml#layers-supported-in-caffe2dml) | [Keras' API](https://keras.io/models/about-keras-models/) |\n| Can be invoked using pyspark | Yes. Please see [Python MLContext API](http://apache.github.io/systemml/spark-mlcontext-programming-guide) | Yes. | Yes. |\n| Can be invoked using spark-shell | Yes. Please see [Scala MLContext API](http://apache.github.io/systemml/spark-mlcontext-programming-guide) | Limited support | No |\n| Can be invoked via command-line or JMLC API | Yes | No | No |\n@@ -184,20 +184,20 @@ lenet.score(X_test, y_test)\n<div data-lang=\"Keras2DML\" markdown=\"1\">\n{% highlight python %}\n+from keras.models import Sequential\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout,Flatten\nfrom keras import backend as K\nfrom keras.models import Model\ninput_shape = (1,28,28) if K.image_data_format() == 'channels_first' else (28,28, 1)\n-input_img = Input(shape=(input_shape))\n-x = Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape, padding='same')(input_img)\n-x = MaxPooling2D(pool_size=(2, 2))(x)\n-x = Conv2D(64, (5, 5), activation='relu', padding='same')(x)\n-x = MaxPooling2D(pool_size=(2, 2))(x)\n-x = Flatten()(x)\n-x = Dense(512, activation='relu')(x)\n-x = Dropout(0.5)(x)\n-x = Dense(10, activation='softmax')(x)\n-keras_model = Model(input_img, x)\n+keras_model = Sequential()\n+keras_model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape, padding='same'))\n+keras_model.add(MaxPooling2D(pool_size=(2, 2)))\n+keras_model.add(Conv2D(64, (5, 5), activation='relu', padding='same'))\n+keras_model.add(MaxPooling2D(pool_size=(2, 2)))\n+keras_model.add(Flatten())\n+keras_model.add(Dense(512, activation='relu'))\n+keras_model.add(Dropout(0.5))\n+keras_model.add(Dense(10, activation='softmax'))\nkeras_model.summary()\n# Scale the input features\n@@ -241,15 +241,15 @@ import keras, urllib\nfrom PIL import Image\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions, ResNet50\n-model = ResNet50(weights='imagenet',include_top=True,pooling='None',input_shape=(224,224,3))\n-model.compile(optimizer='sgd', loss= 'categorical_crossentropy')\n+keras_model = ResNet50(weights='imagenet',include_top=True,pooling='None',input_shape=(224,224,3))\n+keras_model.compile(optimizer='sgd', loss= 'categorical_crossentropy')\n-resnet = Keras2DML(spark,model,input_shape=(3,224,224), weights='weights_dir', labels='https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/labels.txt')\n-resnet.summary()\n+sysml_model = Keras2DML(spark,keras_model,input_shape=(3,224,224), weights='weights_dir', labels='https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/labels.txt')\n+sysml_model.summary()\nurllib.urlretrieve('https://upload.wikimedia.org/wikipedia/commons/f/f4/Cougar_sitting.jpg', 'test.jpg')\nimg_shape = (3, 224, 224)\ninput_image = sml.convertImageToNumPyArr(Image.open('test.jpg'), img_shape=img_shape)\n-resnet.predict(input_image)\n+sysml_model.predict(input_image)\n{% endhighlight %}\n</div>\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/estimators.py", "new_path": "src/main/python/systemml/mllearn/estimators.py", "diff": "@@ -891,13 +891,19 @@ class Keras2DML(Caffe2DML):\nParameters\n----------\nsparkSession: PySpark SparkSession\n- model: keras hdf5 model file path\n+ keras_model: keras model\ninput_shape: 3-element list (number of channels, input height, input width)\ntransferUsingDF: whether to pass the input dataset via PySpark DataFrame (default: False)\nweights: directory whether learned weights are stored (default: None)\n+ labels: file containing mapping between index and string labels (default: None)\n\"\"\"\nfrom .keras2caffe import *\nimport tempfile\n+ if type(keras_model) == keras.models.Sequential:\n+ # Convert the sequential model to functional model\n+ if keras_model.model is None:\n+ keras_model.build()\n+ keras_model = keras_model.model\nself.name = keras_model.name\ncreateJavaObject(sparkSession._sc, 'dummy')\nconvertKerasToCaffeNetwork(keras_model, self.name + \".proto\")\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Added support for sequential model in Keras2DML - Also cleaned up variable names in the documentation.
49,736
05.12.2017 11:11:35
28,800
f63b8c6faf074dae2a33be7898858a78174bf76b
Support softmax function on GPU via CuDNN This API only supports dense softmax function using CuDNN's cudnnSoftmaxForward kernel. Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/gen_softmax.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=$rows, cols=$cols, sparsity=$sp, min=-0.5, max=1)\n+write(X, \"input.mtx\", format=\"binary\")\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/test/compare_backends/run_tests.sh", "new_path": "scripts/nn/test/compare_backends/run_tests.sh", "diff": "./test_conv2d.sh\n./test_maxpool.sh\n./test_maxpool_bwd.sh\n+./test_softmax.sh\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/test_softmax.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+source(\"nn/layers/softmax.dml\") as softmax\n+fmt = ifdef($fmt, 'csv')\n+X = read(\"input.mtx\")\n+out = softmax::forward(X)\n+write(out, $out, format=fmt)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/nn/test/compare_backends/test_softmax.sh", "diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='systemml-*-extra.jar'\n+\n+for rows in 1 300\n+do\n+ for cols in 1 300\n+ do\n+ for sparsity in 0.1 0.2 0.6 0.9\n+ do\n+ # Generating the data\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_softmax.dml -nvargs sp=$sparsity rows=$rows cols=$cols\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_softmax.dml -nvargs out=out_cp.csv\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_softmax.dml -stats -gpu force -nvargs out=out_gpu.csv\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"softmax:rows=\"$rows\",cols=\"$cols\",sparsity=\"$sparsity\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ rm -rf input.mtx input.mtx.mtd\n+ done\n+ done\n+done\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "diff": "@@ -638,7 +638,19 @@ public class BinaryOp extends Hop\n{\n// Both operands are Matrixes\nExecType et = optFindExecType();\n- if ( et == ExecType.CP || et == ExecType.GPU )\n+ boolean isGPUSoftmax = et == ExecType.GPU && op == Hop.OpOp2.DIV &&\n+ getInput().get(0) instanceof UnaryOp && getInput().get(1) instanceof AggUnaryOp &&\n+ ((UnaryOp)getInput().get(0)).getOp() == OpOp1.EXP && ((AggUnaryOp)getInput().get(1)).getOp() == AggOp.SUM &&\n+ ((AggUnaryOp)getInput().get(1)).getDirection() == Direction.Row &&\n+ getInput().get(0) == getInput().get(1).getInput().get(0);\n+ if(isGPUSoftmax) {\n+ UnaryCP softmax = new UnaryCP(getInput().get(0).getInput().get(0).constructLops(), UnaryCP.OperationTypes.SOFTMAX,\n+ getDataType(), getValueType(), et);\n+ setOutputDimensions(softmax);\n+ setLineNumbers(softmax);\n+ setLops(softmax);\n+ }\n+ else if ( et == ExecType.CP || et == ExecType.GPU )\n{\nLop binary = null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/UnaryCP.java", "new_path": "src/main/java/org/apache/sysml/lops/UnaryCP.java", "diff": "@@ -36,7 +36,7 @@ public class UnaryCP extends Lop\npublic enum OperationTypes {\nNOT, ABS, SIN, COS, TAN, ASIN, ACOS, ATAN, SQRT, LOG, EXP, SINH, COSH, TANH,\nCAST_AS_SCALAR, CAST_AS_MATRIX, CAST_AS_FRAME, CAST_AS_DOUBLE, CAST_AS_INT, CAST_AS_BOOLEAN,\n- PRINT, NROW, NCOL, LENGTH, ROUND, STOP, CEIL, FLOOR, CUMSUM\n+ PRINT, NROW, NCOL, LENGTH, ROUND, STOP, CEIL, FLOOR, CUMSUM, SOFTMAX\n}\npublic static final String CAST_AS_SCALAR_OPCODE = \"castdts\";\n@@ -57,8 +57,9 @@ public class UnaryCP extends Lop\n* @param op operation type\n* @param dt data type\n* @param vt value type\n+ * @param et exec type\n*/\n- public UnaryCP(Lop input, OperationTypes op, DataType dt, ValueType vt) {\n+ public UnaryCP(Lop input, OperationTypes op, DataType dt, ValueType vt, ExecType et) {\nsuper(Lop.Type.UnaryCP, dt, vt);\noperation = op;\nthis.addInput(input);\n@@ -70,7 +71,11 @@ public class UnaryCP extends Lop\nboolean aligner = false;\nboolean definesMRJob = false;\nlps.addCompatibility(JobType.INVALID);\n- this.lps.setProperties(inputs, ExecType.CP, ExecLocation.ControlProgram, breaksAlignment, aligner, definesMRJob);\n+ this.lps.setProperties(inputs, et, ExecLocation.ControlProgram, breaksAlignment, aligner, definesMRJob);\n+ }\n+\n+ public UnaryCP(Lop input, OperationTypes op, DataType dt, ValueType vt) {\n+ this(input, op, dt, vt, ExecType.CP);\n}\n@Override\n@@ -171,6 +176,9 @@ public class UnaryCP extends Lop\ncase LENGTH:\nreturn \"length\";\n+ case SOFTMAX:\n+ return \"softmax\";\n+\ndefault:\nthrow new LopsException(this.printErrorLocation() + \"Unknown operation: \" + operation);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -100,6 +100,7 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"atan\", GPUINSTRUCTION_TYPE.BuiltinUnary);\nString2GPUInstructionType.put( \"sign\", GPUINSTRUCTION_TYPE.BuiltinUnary);\nString2GPUInstructionType.put( \"sigmoid\", GPUINSTRUCTION_TYPE.BuiltinUnary);\n+ String2GPUInstructionType.put( \"softmax\", GPUINSTRUCTION_TYPE.BuiltinUnary);\n// Binary Builtin functions\nString2GPUInstructionType.put( \"solve\", GPUINSTRUCTION_TYPE.BuiltinBinary);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinUnaryGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinUnaryGPUInstruction.java", "diff": "@@ -78,13 +78,14 @@ public abstract class BuiltinUnaryGPUInstruction extends GPUInstruction {\nopcode = parts[0];\nin.split(parts[1]);\nout.split(parts[2]);\n- func = Builtin.getBuiltinFnObject(opcode);\n+ // func = Builtin.getBuiltinFnObject(opcode);\n+ // new UnaryOperator(func)\nif(in.getDataType() == DataType.SCALAR)\nthrow new DMLRuntimeException(\"The instruction is not supported on GPU:\" + str);\n// return new ScalarBuiltinCPInstruction(new SimpleOperator(func), in, out, opcode, str);\nelse if(in.getDataType() == DataType.MATRIX)\n- return new MatrixBuiltinGPUInstruction(new UnaryOperator(func), in, out, opcode, str);\n+ return new MatrixBuiltinGPUInstruction(null, in, out, opcode, str);\n}\nreturn null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixBuiltinGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixBuiltinGPUInstruction.java", "diff": "@@ -82,6 +82,8 @@ public class MatrixBuiltinGPUInstruction extends BuiltinUnaryGPUInstruction {\nLibMatrixCUDA.sign(ec, ec.getGPUContext(0), getExtendedOpcode(), mat, _output.getName()); break;\ncase \"sigmoid\":\nLibMatrixCUDA.sigmoid(ec, ec.getGPUContext(0), getExtendedOpcode(), mat, _output.getName()); break;\n+ case \"softmax\":\n+ LibMatrixCuDNN.softmax(ec, ec.getGPUContext(0), getExtendedOpcode(), mat, _output.getName()); break;\ndefault:\nthrow new DMLRuntimeException(\"Unsupported GPU operator:\" + opcode);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -35,6 +35,7 @@ import static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\nimport static jcuda.runtime.JCuda.cudaMemset;\nimport jcuda.CudaException;\nimport jcuda.Pointer;\n+import jcuda.jcudnn.JCudnn;\nimport jcuda.jcudnn.cudnnActivationDescriptor;\nimport jcuda.jcudnn.cudnnConvolutionFwdPreference;\nimport jcuda.jcudnn.cudnnHandle;\n@@ -54,6 +55,9 @@ import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.utils.GPUStatistics;\nimport org.apache.sysml.utils.Statistics;\n+import static jcuda.jcudnn.cudnnSoftmaxAlgorithm.CUDNN_SOFTMAX_ACCURATE;\n+import static jcuda.jcudnn.cudnnSoftmaxMode.CUDNN_SOFTMAX_MODE_CHANNEL;\n+\n/**\n* This class contains method that invoke CuDNN operations.\n*/\n@@ -165,6 +169,47 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\n}\n}\n+ /**\n+ * Performs an \"softmax\" operation on a matrix on the GPU\n+ * @param ec execution context\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param in1 input matrix\n+ * @param outputName output matrix name\n+ * @throws DMLRuntimeException if DMLRuntimeException occurs\n+ */\n+ public static void softmax(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject in1, String outputName) throws DMLRuntimeException {\n+ if(LOG.isTraceEnabled()) {\n+ LOG.trace(\"GPU : softmax\" + \", GPUContext=\" + gCtx);\n+ }\n+ cudnnTensorDescriptor tensorDesc = allocateTensorDescriptor(toInt(in1.getNumRows()), toInt(in1.getNumColumns()), 1, 1);\n+ Pointer srcPointer = getDensePointerForCuDNN(gCtx, in1, instName);\n+ MatrixObject out = ec.getMatrixObject(outputName);\n+ ec.allocateGPUMatrixObject(outputName, in1.getNumRows(), in1.getNumColumns());\n+ out.getGPUObject(gCtx).allocateAndFillDense(0);\n+ Pointer dstPointer = getDensePointerForCuDNN(gCtx, out, instName);\n+ JCudnn.cudnnSoftmaxForward(gCtx.getCudnnHandle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,\n+ one(), tensorDesc, srcPointer,\n+ zero(), tensorDesc, dstPointer);\n+ cudnnDestroyTensorDescriptor(tensorDesc);\n+ }\n+\n+ /**\n+ * Convenience method to get tensor descriptor\n+ * @param N number of images\n+ * @param C number of channels\n+ * @param H height\n+ * @param W width\n+ * @return cudnn tensor descriptor\n+ * @throws DMLRuntimeException if the input descriptor and matrix dimensions don't match\n+ */\n+ private static cudnnTensorDescriptor allocateTensorDescriptor(int N, int C, int H, int W) throws DMLRuntimeException {\n+ cudnnTensorDescriptor tensorDescriptor = new cudnnTensorDescriptor();\n+ cudnnCreateTensorDescriptor(tensorDescriptor);\n+ cudnnSetTensor4dDescriptor(tensorDescriptor, CUDNN_TENSOR_NCHW, LibMatrixCUDA.CUDNN_DATA_TYPE, N, C, H, W);\n+ return tensorDescriptor;\n+ }\n+\n/**\n* Throw an user-friendly error that shows limitation of invoking a cuDNN kernel\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Support softmax function on GPU via CuDNN - This API only supports dense softmax function using CuDNN's cudnnSoftmaxForward kernel. Closes #703.
49,736
07.12.2017 07:43:04
28,800
8c11b5d828c1ec7aa6cb4d668ec793f571bff7cb
[HOTFIX] Disable transposed matrix multiplication optimization as cusparseDcsrmm2 failures. Also, added minor fixes for GPU tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/AggBinaryOp.java", "diff": "@@ -157,7 +157,7 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\ninput2.getDim1(), input2.getDim2(), mmtsj, chain, _hasLeftPMInput );\nswitch( _method ){\ncase TSMM:\n- return true;\n+ return false; // TODO: Disabling any fused transa optimization in 1.0 release.\ncase MAPMM_CHAIN:\nreturn false;\ncase PMM:\n@@ -668,8 +668,12 @@ public class AggBinaryOp extends Hop implements MultiThreadedHop\nif (et == ExecType.GPU) {\nHop h1 = getInput().get(0);\nHop h2 = getInput().get(1);\n- boolean leftTrans = HopRewriteUtils.isTransposeOperation(h1);\n- boolean rightTrans = HopRewriteUtils.isTransposeOperation(h2);\n+ // Since GPU backend is in experimental mode, rewrite optimization can be skipped.\n+ // CuSPARSE's cusparsecsrmm2 fails with only following parameters, but passes for all other settings:\n+ // transa=1 transb=1 m=300 n=300 k=300 ldb=300 ldc=300\n+ // Hence, we disable hope rewrite optimization.\n+ boolean leftTrans = false; // HopRewriteUtils.isTransposeOperation(h1);\n+ boolean rightTrans = false; // HopRewriteUtils.isTransposeOperation(h2);\nLop left = !leftTrans ? h1.constructLops() :\nh1.getInput().get(0).constructLops();\nLop right = !rightTrans ? h2.constructLops() :\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -480,6 +480,7 @@ public class LibMatrixCUDA {\n}\nif (ec.getGPUContext(0) != gCtx)\nthrow new DMLRuntimeException(\"GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function\");\n+\nif(isInSparseFormat(gCtx, left)) {\n// For sparse TSMM, invoke matmult (TODO: possible performance improvement)\nLibMatrixCuMatMult.matmult(ec, gCtx, instName, left, left, outputName, isLeftTransposed, !isLeftTransposed);\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/ml/ScriptsUtils.scala", "new_path": "src/main/scala/org/apache/sysml/api/ml/ScriptsUtils.scala", "diff": "@@ -25,7 +25,7 @@ import java.io.InputStreamReader\nimport org.apache.sysml.runtime.DMLRuntimeException\nobject ScriptsUtils {\n- var systemmlHome = System.getenv(\"SYSTEMML_HOME\")\n+ var systemmlHome:String = null // System.getenv(\"SYSTEMML_HOME\")\ntype SparkDataType = org.apache.spark.sql.Dataset[_] // org.apache.spark.sql.DataFrame for Spark 1.x\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/AggregateUnaryOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/AggregateUnaryOpTests.java", "diff": "@@ -31,6 +31,7 @@ public class AggregateUnaryOpTests extends UnaryOpTestsBase {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/AppendTest.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/AppendTest.java", "diff": "@@ -41,6 +41,7 @@ public class AppendTest extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/BinaryOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/BinaryOpTests.java", "diff": "@@ -37,6 +37,7 @@ public class BinaryOpTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/GPUTests.java", "diff": "@@ -24,6 +24,8 @@ import java.util.Formatter;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\n+import java.util.concurrent.locks.Lock;\n+import java.util.concurrent.locks.ReentrantLock;\nimport org.apache.spark.sql.SparkSession;\nimport org.apache.sysml.api.mlcontext.MLContext;\n@@ -80,8 +82,16 @@ public abstract class GPUTests extends AutomatedTestBase {\nelse throw new RuntimeException(\"Unsupported precision:\" + FLOATING_POINT_PRECISION);\n}\n+ // force junit to run only 1 GPU test at a time to avoid cascading failures.\n+ Lock sequential = new ReentrantLock();\n+ @Override\n+ public void setUp() {\n+ sequential.lock();\n+\n+ }\n@After\npublic void tearDown() {\n+ sequential.unlock();\nclearGPUMemory();\nsuper.tearDown();\n}\n@@ -242,6 +252,16 @@ public abstract class GPUTests extends AutomatedTestBase {\n*/\nprivate void assertEqualMatrices(Matrix expected, Matrix actual) {\ntry {\n+ // Faster way to compare two matrices\n+ MLContext cpuMLC = new MLContext(spark);\n+ String scriptStr = \"num_mismatch = sum((abs(X - Y) / X) > \" + getTHRESHOLD() + \");\";\n+ Script script = ScriptFactory.dmlFromString(scriptStr).in(\"X\", expected).in(\"Y\", actual).out(\"num_mismatch\");\n+ long num_mismatch = cpuMLC.execute(script).getLong(\"num_mismatch\");\n+ cpuMLC.close();\n+ if(num_mismatch == 0)\n+ return;\n+\n+ // If error, print the actual incorrect values\nMatrixBlock expectedMB = expected.toMatrixObject().acquireRead();\nMatrixBlock actualMB = actual.toMatrixObject().acquireRead();\n@@ -323,8 +343,13 @@ public abstract class GPUTests extends AutomatedTestBase {\n*/\nprotected List<Object> runOnGPU(SparkSession spark, String scriptStr, Map<String, Object> inputs,\nList<String> outStrs) {\n+ // Ensure that only one instance of ml.execute runs at a time to avoid incorrect memory estimates\n+ // and other side effects.\n+ synchronized(GPUTests.class) {\nMLContext gpuMLC = new MLContext(spark);\ngpuMLC.setConfigProperty(\"sysml.floating.point.precision\", FLOATING_POINT_PRECISION);\n+ if(IGNORE_CLEAR_MEMORY_BUG)\n+ gpuMLC.setConfigProperty(\"sysml.gpu.eager.cudaFree\", \"true\");\ngpuMLC.setGPU(true);\ngpuMLC.setForceGPU(true);\ngpuMLC.setStatistics(true);\n@@ -337,6 +362,7 @@ public abstract class GPUTests extends AutomatedTestBase {\ngpuMLC.close();\nreturn outputs;\n}\n+ }\n/**\n* Assert that the two objects are equal. Supported types are Boolean, Integer, String, Double and Matrix\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/MatrixMatrixElementWiseOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/MatrixMatrixElementWiseOpTests.java", "diff": "@@ -42,6 +42,7 @@ public class MatrixMatrixElementWiseOpTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/MatrixMultiplicationOpTest.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/MatrixMultiplicationOpTest.java", "diff": "@@ -36,6 +36,7 @@ public class MatrixMultiplicationOpTest extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/NeuralNetworkOpTests.java", "diff": "@@ -92,6 +92,7 @@ public class NeuralNetworkOpTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/ReorgOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/ReorgOpTests.java", "diff": "@@ -40,6 +40,7 @@ public class ReorgOpTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/RightIndexingTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/RightIndexingTests.java", "diff": "@@ -39,6 +39,7 @@ public class RightIndexingTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/ScalarMatrixElementwiseOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/ScalarMatrixElementwiseOpTests.java", "diff": "@@ -42,6 +42,7 @@ public class ScalarMatrixElementwiseOpTests extends GPUTests {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/gpu/UnaryOpTests.java", "new_path": "src/test/java/org/apache/sysml/test/gpu/UnaryOpTests.java", "diff": "@@ -31,6 +31,7 @@ public class UnaryOpTests extends UnaryOpTestsBase {\n@Override\npublic void setUp() {\n+ super.setUp();\nTestUtils.clearAssertionInformation();\naddTestConfiguration(TEST_DIR, TEST_NAME);\ngetAndLoadTestConfiguration(TEST_NAME);\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Disable transposed matrix multiplication optimization as cusparseDcsrmm2 failures. - Also, added minor fixes for GPU tests.
49,738
06.12.2017 18:26:38
28,800
c1860ce5630cc00cfe2312d1cb7f8fd90a513280
Fix removeEmpty nnz maintenance with selection vector This patch fixes the maintenance of nnz meta data in removeEmpty. So far, this operation propagated the nnz from the input, which is only correct without selection vector or if the selection vector indicates all rows with non-zeros.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -1781,7 +1781,8 @@ public class LibMatrixReorg\n}\n//check sparsity\n- ret.nonZeros = in.nonZeros;\n+ ret.nonZeros = (select==null) ?\n+ in.nonZeros : ret.recomputeNonZeros();\nret.examSparsity();\nreturn ret;\n@@ -1901,7 +1902,8 @@ public class LibMatrixReorg\n}\n//check sparsity\n- ret.nonZeros = in.nonZeros;\n+ ret.nonZeros = (select==null) ?\n+ in.nonZeros : ret.recomputeNonZeros();\nret.examSparsity();\nreturn ret;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2036] Fix removeEmpty nnz maintenance with selection vector This patch fixes the maintenance of nnz meta data in removeEmpty. So far, this operation propagated the nnz from the input, which is only correct without selection vector or if the selection vector indicates all rows with non-zeros.
49,738
06.12.2017 19:36:54
28,800
f13f697e08a2a87cff61ca5dfd62f73f10555d3e
[MINOR] Improved primitives for memory profiling and sparse checks
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/LocalVariableMap.java", "diff": "@@ -27,6 +27,7 @@ import java.util.Set;\nimport java.util.StringTokenizer;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.CacheableData;\nimport org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDSequence;\nimport org.apache.sysml.runtime.instructions.cp.Data;\n@@ -122,6 +123,14 @@ public class LocalVariableMap implements Cloneable\nreturn outputs;\n}\n+ public double getPinnedDataSize() {\n+ //note: this method returns the total size of pinned data objects\n+ //that are not subject to automatic eviction.\n+ return localMap.values().stream()\n+ .filter(d -> (d instanceof CacheableData))\n+ .mapToDouble(d -> ((CacheableData<?>)d).getDataSize()).sum();\n+ }\n+\npublic String serialize() throws DMLRuntimeException {\nStringBuilder sb = new StringBuilder();\nint count = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ProgramBlock.java", "diff": "@@ -371,7 +371,7 @@ public class ProgramBlock implements ParseInfo\nif( nnz1 != nnz2 )\nthrow new DMLRuntimeException(\"Matrix nnz meta data was incorrect: (\"+varname+\", actual=\"+nnz1+\", expected=\"+nnz2+\", inst=\"+lastInst+\")\");\n- if( sparse1 != sparse2 )\n+ if( sparse1 != sparse2 && mb.isAllocated() )\nthrow new DMLRuntimeException(\"Matrix was in wrong data representation: (\"+varname+\", actual=\"+sparse1+\", expected=\"+sparse2 +\n\", nrow=\"+mb.getNumRows()+\", ncol=\"+mb.getNumColumns()+\", nnz=\"+nnz1+\", inst=\"+lastInst+\")\");\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java", "diff": "@@ -956,6 +956,10 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nreturn (_data.getInMemorySize() <= CACHING_THRESHOLD);\n}\n+ public long getDataSize() {\n+ return (_data != null) ?_data.getInMemorySize() : 0;\n+ }\n+\nprotected ValueType[] getSchema() {\nreturn null;\n}\n@@ -1262,7 +1266,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nsizePinned.set( Math.max(size,0) );\n}\n- protected long getPinnedSize() {\n+ protected static long getPinnedSize() {\nreturn sizePinned.get();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1312,9 +1312,13 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nint apos = sparseBlock.pos(i);\nint alen = sparseBlock.size(i);\nint[] aix = sparseBlock.indexes(i);\n+ double[] avals = sparseBlock.values(i);\nfor( int k=apos+1; k<apos+alen; k++ )\nif( aix[k-1] >= aix[k] )\nthrow new RuntimeException(\"Wrong sparse row ordering: \"+k+\" \"+aix[k-1]+\" \"+aix[k]);\n+ for( int k=apos; k<apos+alen; k++ )\n+ if( avals[k] == 0 )\n+ throw new RuntimeException(\"Wrong sparse row: zero at \"+k);\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved primitives for memory profiling and sparse checks
49,738
07.12.2017 02:05:58
28,800
fdc24bb7d46858f2300efd7bee302c3ce6a6365d
Fix sparse tsmm, rowIndexMax, cumsum over CSR inputs The recent change to use the memory-efficient CSR format for all dense-sparse conversions revealed a couple of hidden correctness issues with sparse tsmm (transpose-self-matrix multiply), rowIndexMax, and cumsum block operations over CSR blocks.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinUnaryGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinUnaryGPUInstruction.java", "diff": "@@ -23,12 +23,9 @@ package org.apache.sysml.runtime.instructions.gpu;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.functionobjects.Builtin;\n-import org.apache.sysml.runtime.functionobjects.ValueFunction;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\nimport org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\n-import org.apache.sysml.runtime.matrix.operators.UnaryOperator;\npublic abstract class BuiltinUnaryGPUInstruction extends GPUInstruction {\nint _arity;\n@@ -56,21 +53,10 @@ public abstract class BuiltinUnaryGPUInstruction extends GPUInstruction {\nString[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\nString opcode = null;\n- ValueFunction func = null;\n//print or stop or cumulative aggregates\n- if( parts.length==4 )\n- {\n- opcode = parts[0];\n- in.split(parts[1]);\n- out.split(parts[2]);\n- func = Builtin.getBuiltinFnObject(opcode);\n-\n+ if( parts.length==4 ) {\nthrow new DMLRuntimeException(\"The instruction is not supported on GPU:\" + str);\n-// if( Arrays.asList(new String[]{\"ucumk+\",\"ucum*\",\"ucummin\",\"ucummax\"}).contains(opcode) )\n-// return new MatrixBuiltinCPInstruction(new UnaryOperator(func,Integer.parseInt(parts[3])), in, out, opcode, str);\n-// else\n-// return new ScalarBuiltinCPInstruction(new SimpleOperator(func), in, out, opcode, str);\n}\nelse //2+1, general case\n{\n@@ -78,12 +64,9 @@ public abstract class BuiltinUnaryGPUInstruction extends GPUInstruction {\nopcode = parts[0];\nin.split(parts[1]);\nout.split(parts[2]);\n- // func = Builtin.getBuiltinFnObject(opcode);\n- // new UnaryOperator(func)\nif(in.getDataType() == DataType.SCALAR)\nthrow new DMLRuntimeException(\"The instruction is not supported on GPU:\" + str);\n-// return new ScalarBuiltinCPInstruction(new SimpleOperator(func), in, out, opcode, str);\nelse if(in.getDataType() == DataType.MATRIX)\nreturn new MatrixBuiltinGPUInstruction(null, in, out, opcode, str);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixAgg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixAgg.java", "diff": "@@ -2634,7 +2634,7 @@ public class LibMatrixAgg\nif(alen < n && (builtin.execute2( 0, c[cix+1] ) == 1))\n{\nint ix = n-1; //find last 0 value\n- for( int j=alen-1; j>=0; j--, ix-- )\n+ for( int j=apos+alen-1; j>=apos; j--, ix-- )\nif( aix[j]!=ix )\nbreak;\nc[cix+0] = ix + 1; //max index (last)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -1820,22 +1820,20 @@ public class LibMatrixMult\nif( LOW_LEVEL_OPTIMIZATION )\n{\nint arlen = a.numRows();\n- for( int r=0; r<arlen; r++ )\n- if( !a.isEmpty(r) )\n- {\n+ for( int r=0; r<arlen; r++ ) {\n+ if( a.isEmpty(r) ) continue;\nint apos = a.pos(r);\nint alen = a.size(r);\nint[] aix = a.indexes(r);\ndouble[] avals = a.values(r);\nint rlix = (rl==0) ? 0 : a.posFIndexGTE(r, rl);\nrlix = (rlix>=0) ? apos+rlix : apos+alen;\n-\n- for(int i = rlix; i < apos+alen && aix[i]<ru; i++)\n- {\n+ int len = apos + alen;\n+ for(int i = rlix; i < len && aix[i]<ru; i++) {\ndouble val = avals[i];\nif( val != 0 ) {\nint ix2 = aix[i]*n;\n- vectMultiplyAdd(val, avals, c, aix, i, ix2, alen-i);\n+ vectMultiplyAdd(val, avals, c, aix, i, ix2, len-i);\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -29,6 +29,7 @@ import java.io.ObjectOutput;\nimport java.io.ObjectOutputStream;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import java.util.Collections;\nimport java.util.Iterator;\nimport java.util.stream.LongStream;\n@@ -553,7 +554,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//check for existing sparse block: return empty list\nif( sparseBlock==null )\n- return new ArrayList<IJV>().iterator();\n+ return Collections.EMPTY_LIST.iterator();\n//get iterator over sparse block\nreturn sparseBlock.getIterator(rl, ru);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCSR.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCSR.java", "diff": "@@ -463,7 +463,7 @@ public class SparseBlockCSR extends SparseBlock\n//delete existing values if necessary\nif( len > 0 ) //incl size update\n- deleteIndexRange(r, aix[0], aix[alen-1]+1);\n+ deleteIndexRange(r, aix[pos], aix[pos+len-1]+1);\n//prepare free space (allocate and shift)\nint lsize = _size+alen;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2038] Fix sparse tsmm, rowIndexMax, cumsum over CSR inputs The recent change to use the memory-efficient CSR format for all dense-sparse conversions revealed a couple of hidden correctness issues with sparse tsmm (transpose-self-matrix multiply), rowIndexMax, and cumsum block operations over CSR blocks.
49,719
07.12.2017 16:16:07
28,800
0ef6b924612951ccd003e8466fc9a911b098297f
[Minor]: minor additions to notebooks.
[ { "change_type": "DELETE", "old_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb", "new_path": null, "diff": "-{\n- \"cells\": [\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Linear Regression Algorithms using Apache SystemML\\n\",\n- \"\\n\",\n- \"This notebook shows:\\n\",\n- \"- Install SystemML Python package and jar file\\n\",\n- \" - pip\\n\",\n- \" - SystemML 'Hello World'\\n\",\n- \"- Example 1: Matrix Multiplication\\n\",\n- \" - SystemML script to generate a random matrix, perform matrix multiplication, and compute the sum of the output\\n\",\n- \" - Examine execution plans, and increase data size to observe changed execution plans\\n\",\n- \"- Load diabetes dataset from scikit-learn\\n\",\n- \"- Example 2: Implement three different algorithms to train linear regression model\\n\",\n- \" - Algorithm 1: Linear Regression - Direct Solve (no regularization)\\n\",\n- \" - Algorithm 2: Linear Regression - Batch Gradient Descent (no regularization)\\n\",\n- \" - Algorithm 3: Linear Regression - Conjugate Gradient (no regularization)\\n\",\n- \"- Example 3: Invoke existing SystemML algorithm script LinearRegDS.dml using MLContext API\\n\",\n- \"- Example 4: Invoke existing SystemML algorithm using scikit-learn/SparkML pipeline like API\\n\",\n- \"- Uninstall/Clean up SystemML Python package and jar file\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### This notebook is supported with SystemML 0.14.0 and above.\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"!pip show systemml\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### Import SystemML API \"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"from systemml import MLContext, dml, dmlFromResource\\n\",\n- \"\\n\",\n- \"ml = MLContext(sc)\\n\",\n- \"\\n\",\n- \"print (\\\"Spark Version:\\\" + sc.version)\\n\",\n- \"print (\\\"SystemML Version:\\\" + ml.version())\\n\",\n- \"print (\\\"SystemML Built-Time:\\\"+ ml.buildTime())\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"ml.execute(dml(\\\"\\\"\\\"s = 'Hello World!'\\\"\\\"\\\").output(\\\"s\\\")).get(\\\"s\\\")\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### Import numpy, sklearn, and define some helper functions\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"import sys, os, glob, subprocess\\n\",\n- \"import matplotlib.pyplot as plt\\n\",\n- \"import numpy as np\\n\",\n- \"from sklearn import datasets\\n\",\n- \"plt.switch_backend('agg')\\n\",\n- \" \\n\",\n- \"def printLastLogLines(n):\\n\",\n- \" fname = max(glob.iglob(os.sep.join([os.environ[\\\"HOME\\\"],'/logs/notebook/kernel-pyspark-*.log'])), key=os.path.getctime)\\n\",\n- \" print(subprocess.check_output(['tail', '-' + str(n), fname]))\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Example 1: Matrix Multiplication\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### SystemML script to generate a random matrix, perform matrix multiplication, and compute the sum of the output\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"slideshow\": {\n- \"slide_type\": \"-\"\n- }\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"script = \\\"\\\"\\\"\\n\",\n- \" X = rand(rows=$nr, cols=1000, sparsity=0.5)\\n\",\n- \" A = t(X) %*% X\\n\",\n- \" s = sum(A)\\n\",\n- \"\\\"\\\"\\\"\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"prog = dml(script).input('$nr', 1e5).output('s')\\n\",\n- \"s = ml.execute(prog).get('s')\\n\",\n- \"print (s)\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"### Examine execution plans, and increase data size to observe changed execution plans\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"ml = MLContext(sc)\\n\",\n- \"ml = ml.setStatistics(True)\\n\",\n- \"# re-execute ML program\\n\",\n- \"# printLastLogLines(22)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"prog = dml(script).input('$nr', 1e6).output('s')\\n\",\n- \"out = ml.execute(prog).get('s')\\n\",\n- \"print (out)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"ml = MLContext(sc)\\n\",\n- \"ml = ml.setStatistics(False)\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Load diabetes dataset from scikit-learn \"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"%matplotlib inline\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"diabetes = datasets.load_diabetes()\\n\",\n- \"diabetes_X = diabetes.data[:, np.newaxis, 2]\\n\",\n- \"diabetes_X_train = diabetes_X[:-20]\\n\",\n- \"diabetes_X_test = diabetes_X[-20:]\\n\",\n- \"diabetes_y_train = diabetes.target[:-20].reshape(-1,1)\\n\",\n- \"diabetes_y_test = diabetes.target[-20:].reshape(-1,1)\\n\",\n- \"\\n\",\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"diabetes.data.shape\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Example 2: Implement three different algorithms to train linear regression model\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"source\": [\n- \"## Algorithm 1: Linear Regression - Direct Solve (no regularization) \"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"#### Least squares formulation\\n\",\n- \"w* = argminw ||Xw-y||2 = argminw (y - Xw)'(y - Xw) = argminw (w'(X'X)w - w'(X'y))/2\\n\",\n- \"\\n\",\n- \"#### Setting the gradient\\n\",\n- \"dw = (X'X)w - (X'y) to 0, w = (X'X)-1(X' y) = solve(X'X, X'y)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"script = \\\"\\\"\\\"\\n\",\n- \" # add constant feature to X to model intercept\\n\",\n- \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n- \" A = t(X) %*% X\\n\",\n- \" b = t(X) %*% y\\n\",\n- \" w = solve(A, b)\\n\",\n- \" bias = as.scalar(w[nrow(w),1])\\n\",\n- \" w = w[1:nrow(w)-1,]\\n\",\n- \"\\\"\\\"\\\"\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"scrolled\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w', 'bias')\\n\",\n- \"w, bias = ml.execute(prog).get('w','bias')\\n\",\n- \"w = w.toNumPy()\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n- \"\\n\",\n- \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='blue', linestyle ='dotted')\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"source\": [\n- \"## Algorithm 2: Linear Regression - Batch Gradient Descent (no regularization)\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"#### Algorithm\\n\",\n- \"`Step 1: Start with an initial point \\n\",\n- \"while(not converged) { \\n\",\n- \" Step 2: Compute gradient dw. \\n\",\n- \" Step 3: Compute stepsize alpha. \\n\",\n- \" Step 4: Update: wnew = wold + alpha*dw \\n\",\n- \"}`\\n\",\n- \"\\n\",\n- \"#### Gradient formula\\n\",\n- \"`dw = r = (X'X)w - (X'y)`\\n\",\n- \"\\n\",\n- \"#### Step size formula\\n\",\n- \"`Find number alpha to minimize f(w + alpha*r) \\n\",\n- \"alpha = -(r'r)/(r'X'Xr)`\\n\",\n- \"\\n\",\n- \"![Gradient Descent](http://blog.datumbox.com/wp-content/uploads/2013/10/gradient-descent.png)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"script = \\\"\\\"\\\"\\n\",\n- \" # add constant feature to X to model intercepts\\n\",\n- \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n- \" max_iter = 100\\n\",\n- \" w = matrix(0, rows=ncol(X), cols=1)\\n\",\n- \" for(i in 1:max_iter){\\n\",\n- \" XtX = t(X) %*% X\\n\",\n- \" dw = XtX %*%w - t(X) %*% y\\n\",\n- \" alpha = -(t(dw) %*% dw) / (t(dw) %*% XtX %*% dw)\\n\",\n- \" w = w + dw*alpha\\n\",\n- \" }\\n\",\n- \" bias = as.scalar(w[nrow(w),1])\\n\",\n- \" w = w[1:nrow(w)-1,] \\n\",\n- \"\\\"\\\"\\\"\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w').output('bias')\\n\",\n- \"w, bias = ml.execute(prog).get('w', 'bias')\\n\",\n- \"w = w.toNumPy()\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n- \"\\n\",\n- \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Algorithm 3: Linear Regression - Conjugate Gradient (no regularization)\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"Problem with gradient descent: Takes very similar directions many times\\n\",\n- \"\\n\",\n- \"Solution: Enforce conjugacy\\n\",\n- \"\\n\",\n- \"`Step 1: Start with an initial point \\n\",\n- \"while(not converged) {\\n\",\n- \" Step 2: Compute gradient dw.\\n\",\n- \" Step 3: Compute stepsize alpha.\\n\",\n- \" Step 4: Compute next direction p by enforcing conjugacy with previous direction.\\n\",\n- \" Step 5: Update: w_new = w_old + alpha*p\\n\",\n- \"}`\\n\",\n- \"\\n\",\n- \"![Gradient Descent vs Conjugate Gradient](http://i.stack.imgur.com/zh1HH.png)\\n\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"script = \\\"\\\"\\\"\\n\",\n- \" # add constant feature to X to model intercepts\\n\",\n- \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n- \" m = ncol(X); i = 1; \\n\",\n- \" max_iter = 20;\\n\",\n- \" w = matrix (0, rows = m, cols = 1); # initialize weights to 0\\n\",\n- \" dw = - t(X) %*% y; p = - dw; # dw = (X'X)w - (X'y)\\n\",\n- \" norm_r2 = sum (dw ^ 2); \\n\",\n- \" for(i in 1:max_iter) {\\n\",\n- \" q = t(X) %*% (X %*% p)\\n\",\n- \" alpha = norm_r2 / sum (p * q); # Minimizes f(w - alpha*r)\\n\",\n- \" w = w + alpha * p; # update weights\\n\",\n- \" dw = dw + alpha * q; \\n\",\n- \" old_norm_r2 = norm_r2; norm_r2 = sum (dw ^ 2);\\n\",\n- \" p = -dw + (norm_r2 / old_norm_r2) * p; # next direction - conjugacy to previous direction\\n\",\n- \" i = i + 1;\\n\",\n- \" }\\n\",\n- \" bias = as.scalar(w[nrow(w),1])\\n\",\n- \" w = w[1:nrow(w)-1,] \\n\",\n- \"\\\"\\\"\\\"\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w').output('bias')\\n\",\n- \"w, bias = ml.execute(prog).get('w','bias')\\n\",\n- \"w = w.toNumPy()\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n- \"\\n\",\n- \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Example 3: Invoke existing SystemML algorithm script LinearRegDS.dml using MLContext API\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"import os\\n\",\n- \"from subprocess import call\\n\",\n- \"\\n\",\n- \"dirName = os.path.dirname(os.path.realpath(\\\"~\\\")) + \\\"/scripts\\\"\\n\",\n- \"call([\\\"mkdir\\\", \\\"-p\\\", dirName])\\n\",\n- \"call([\\\"wget\\\", \\\"-N\\\", \\\"-q\\\", \\\"-P\\\", dirName, \\\"https://raw.githubusercontent.com/apache/systemml/master/scripts/algorithms/LinearRegDS.dml\\\"])\\n\",\n- \"\\n\",\n- \"scriptName = dirName + \\\"/LinearRegDS.dml\\\"\\n\",\n- \"dml_script = dmlFromResource(scriptName)\\n\",\n- \"\\n\",\n- \"prog = dml_script.input(X=diabetes_X_train, y=diabetes_y_train).input('$icpt',1.0).output('beta_out')\\n\",\n- \"w = ml.execute(prog).get('beta_out')\\n\",\n- \"w = w.toNumPy()\\n\",\n- \"bias=w[1]\\n\",\n- \"print (bias)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n- \"\\n\",\n- \"plt.plot(diabetes_X_test, (w[0]*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Example 4: Invoke existing SystemML algorithm using scikit-learn/SparkML pipeline like API\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"*mllearn* API allows a Python programmer to invoke SystemML's algorithms using scikit-learn like API as well as Spark's MLPipeline API.\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"from pyspark.sql import SQLContext\\n\",\n- \"from systemml.mllearn import LinearRegression\\n\",\n- \"sqlCtx = SQLContext(sc)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"regr = LinearRegression(sqlCtx)\\n\",\n- \"# Train the model using the training sets\\n\",\n- \"regr.fit(diabetes_X_train, diabetes_y_train)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"predictions = regr.predict(diabetes_X_test)\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {},\n- \"outputs\": [],\n- \"source\": [\n- \"# Use the trained model to perform prediction\\n\",\n- \"%matplotlib inline\\n\",\n- \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n- \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n- \"\\n\",\n- \"plt.plot(diabetes_X_test, predictions, color='black')\"\n- ]\n- }\n- ],\n- \"metadata\": {\n- \"kernelspec\": {\n- \"display_name\": \"Python 2\",\n- \"language\": \"python\",\n- \"name\": \"python2\"\n- },\n- \"language_info\": {\n- \"codemirror_mode\": {\n- \"name\": \"ipython\",\n- \"version\": 2\n- },\n- \"file_extension\": \".py\",\n- \"mimetype\": \"text/x-python\",\n- \"name\": \"python\",\n- \"nbconvert_exporter\": \"python\",\n- \"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.13\"\n- }\n- },\n- \"nbformat\": 4,\n- \"nbformat_minor\": 1\n-}\n" } ]
Java
Apache License 2.0
apache/systemds
[Minor]: minor additions to notebooks.
49,736
12.12.2017 11:45:16
28,800
70c7952b92fe88541aeaf11a2ce3a7aea71c24b3
Add support for RNN and LSTM in Caffe2DML and Keras2DML Closes
[ { "change_type": "MODIFY", "old_path": "docs/reference-guide-caffe2dml.md", "new_path": "docs/reference-guide-caffe2dml.md", "diff": "@@ -168,6 +168,61 @@ layer {\n}\n```\n+## Recurrent Layers\n+\n+### RNN Layer\n+\n+In a simple RNN, the output of the previous timestep is fed back in as an additional input at the current timestep.\n+\n+Invokes [nn/layers/rnn.dml](https://github.com/apache/systemml/blob/master/scripts/nn/layers/rnn.dml) layer.\n+\n+**Required Parameters:**\n+\n+- num_output: number of output\n+- return_sequences: Whether to return output at all timesteps, or just for the final timestep.\n+\n+**Sample Usage:**\n+```\n+layer {\n+ top: \"rnn_1\"\n+ recurrent_param {\n+ return_sequences: false\n+ num_output: 32\n+ }\n+ type: \"RNN\"\n+ name: \"rnn_1\"\n+ bottom: \"rnn_1_input\"\n+}\n+```\n+\n+### LSTM Layer\n+\n+In an LSTM, an internal cell state is maintained, additive\n+interactions operate over the cell state at each timestep, and\n+some amount of this cell state is exposed as output at each\n+timestep. Additionally, the output of the previous timestep is fed\n+back in as an additional input at the current timestep.\n+\n+Invokes [nn/layers/lstm.dml](https://github.com/apache/systemml/blob/master/scripts/nn/layers/lstm.dml) layer.\n+\n+**Required Parameters:**\n+\n+- num_output: number of output\n+- return_sequences: Whether to return output at all timesteps, or just for the final timestep.\n+\n+**Sample Usage:**\n+```\n+layer {\n+ top: \"lstm_1\"\n+ recurrent_param {\n+ return_sequences: false\n+ num_output: 32\n+ }\n+ type: \"LSTM\"\n+ name: \"lstm_1\"\n+ bottom: \"lstm_1_input\"\n+}\n+```\n## Common Layers\n" }, { "change_type": "MODIFY", "old_path": "docs/release-process.md", "new_path": "docs/release-process.md", "diff": "@@ -382,6 +382,17 @@ file and remove all the `@Ignore` annotations from all the tests. Then run the N\nmvn -Dit.test=org.apache.sysml.test.gpu.NeuralNetworkOpTests verify -PgpuTests\n+# Run other GPU Unit Tests\n+\n+ rm result.txt\n+ for t in AggregateUnaryOpTests BinaryOpTests MatrixMatrixElementWiseOpTests RightIndexingTests AppendTest MatrixMultiplicationOpTest ReorgOpTests ScalarMatrixElementwiseOpTests UnaryOpTests\n+ do\n+ mvn -Dit.test=\"org.apache.sysml.test.gpu.\"$t verify -PgpuTests &> tmp.txt\n+ SUCCESS=`grep \"BUILD SUCCESS\" tmp.txt`\n+ echo $t\" => \"$SUCCESS >> result.txt\n+ rm tmp.txt\n+ done\n+\n# Voting\nFollowing a successful release candidate vote by SystemML PMC members on the SystemML mailing list, the release candidate\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/layers/lstm.dml", "new_path": "scripts/nn/layers/lstm.dml", "diff": "@@ -70,6 +70,12 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int T,\n*/\nN = nrow(X)\nM = as.integer(ncol(W)/4)\n+ N1 = nrow(out0)\n+ if(N < N1) {\n+ # Allow for smaller out0 for last batch\n+ out0 = out0[1:N,]\n+ c0 = c0[1:N,]\n+ }\nout_prev = out0\nc_prev = c0\nc = c_prev\n@@ -161,6 +167,12 @@ backward = function(matrix[double] dout, matrix[double] dc,\n*/\nN = nrow(X)\nM = as.integer(ncol(W)/4)\n+ N1 = nrow(out0)\n+ if(N < N1) {\n+ # Allow for smaller out0 for last batch\n+ out0 = out0[1:N,]\n+ c0 = c0[1:N,]\n+ }\ndX = matrix(0, rows=N, cols=T*D)\ndW = matrix(0, rows=D+M, cols=4*M)\ndb = matrix(0, rows=1, cols=4*M)\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/layers/rnn.dml", "new_path": "scripts/nn/layers/rnn.dml", "diff": "@@ -54,6 +54,11 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int T,\n*/\nN = nrow(X)\nM = ncol(W)\n+ N1 = nrow(out0)\n+ if(N < N1) {\n+ # Allow for smaller out0 for last batch\n+ out0 = out0[1:N,]\n+ }\nout_prev = out0\nif (return_sequences) {\nout = matrix(0, rows=N, cols=T*M)\n@@ -113,6 +118,12 @@ backward = function(matrix[double] dout, matrix[double] X, matrix[double] W, mat\n*/\nN = nrow(X)\nM = ncol(W)\n+ N1 = nrow(out0)\n+ if(N < N1) {\n+ # Allow for smaller out0 for last batch\n+ out0 = out0[1:N,]\n+ cache_out = cache_out[,1:(N*M)]\n+ }\ndX = matrix(0, rows=N, cols=T*D)\ndW = matrix(0, rows=D+M, cols=M)\ndb = matrix(0, rows=1, cols=M)\n" }, { "change_type": "MODIFY", "old_path": "src/main/proto/caffe/caffe.proto", "new_path": "src/main/proto/caffe/caffe.proto", "diff": "@@ -959,6 +959,12 @@ message RecurrentParameter {\n// blobs. The number of additional bottom/top blobs required depends on the\n// recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs.\noptional bool expose_hidden = 5 [default = false];\n+\n+ // ========================================================================\n+ // SystemML extension:\n+ // Whether to return `out` at all timesteps, or just for the final timestep.\n+ optional bool return_sequences = 6 [default = false];\n+ // ========================================================================\n}\n// Message that stores parameters used by ReductionLayer\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/keras2caffe.py", "new_path": "src/main/python/systemml/mllearn/keras2caffe.py", "diff": "@@ -60,6 +60,8 @@ supportedLayers = {\nkeras.layers.Conv2D: 'Convolution',\nkeras.layers.MaxPooling2D: 'Pooling',\nkeras.layers.AveragePooling2D: 'Pooling',\n+ keras.layers.SimpleRNN: 'RNN',\n+ keras.layers.LSTM: 'LSTM',\nkeras.layers.Flatten: 'None',\nkeras.layers.BatchNormalization: 'None',\nkeras.layers.Activation: 'None'\n@@ -122,6 +124,9 @@ def _parseActivation(layer, customLayerName=None):\nreturn { 'layer':{'name':layer.name, 'type':supportedCaffeActivations[kerasActivation], 'top':layer.name, 'bottom':_getBottomLayers(layer) }}\n+def _shouldParseActivation(layer):\n+ ignore_activation = [ keras.layers.SimpleRNN , keras.layers.LSTM ]\n+ return hasattr(layer, 'activation') and (type(layer) not in ignore_activation) and keras.activations.serialize(layer.activation) != 'linear'\ndef _parseKerasLayer(layer):\nlayerType = type(layer)\n@@ -135,7 +140,7 @@ def _parseKerasLayer(layer):\nret = { 'layer': { 'name':layer.name, 'type':'Data', 'top':layer.name, paramName:param[paramName] } }\nelse:\nret = { 'layer': { 'name':layer.name, 'type':supportedLayers[layerType], 'bottom':_getBottomLayers(layer), 'top':layer.name, paramName:param[paramName] } }\n- return [ ret, _parseActivation(layer, layer.name + '_activation') ] if hasattr(layer, 'activation') and keras.activations.serialize(layer.activation) != 'linear' else [ ret ]\n+ return [ ret, _parseActivation(layer, layer.name + '_activation') ] if _shouldParseActivation(layer) else [ ret ]\ndef _parseBatchNorm(layer):\n@@ -164,6 +169,15 @@ def getPoolingParam(layer, pool='MAX'):\npadding = [layer.pool_size[0] / 2, layer.pool_size[1] / 2] if layer.padding == 'same' else [0, 0]\nreturn {'pool':pool, 'kernel_h':layer.pool_size[0], 'kernel_w':layer.pool_size[1], 'stride_h':stride[0],'stride_w':stride[1],'pad_h':padding[0], 'pad_w':padding[1]}\n+def getRecurrentParam(layer):\n+ if(not layer.use_bias):\n+ raise Exception('Only use_bias=True supported for recurrent layers')\n+ if(keras.activations.serialize(layer.activation) != 'tanh'):\n+ raise Exception('Only tanh activation supported for recurrent layers')\n+ if(layer.dropout != 0 or layer.recurrent_dropout != 0):\n+ raise Exception('Only dropout not supported for recurrent layers')\n+ return {'num_output': layer.units, 'return_sequences': str(layer.return_sequences).lower() }\n+\n# TODO: Update AveragePooling2D when we add maxpooling support\nlayerParamMapping = {\nkeras.layers.InputLayer: lambda l: \\\n@@ -184,6 +198,10 @@ layerParamMapping = {\n{'pooling_param': getPoolingParam(l, 'MAX')},\nkeras.layers.AveragePooling2D: lambda l: \\\n{'pooling_param': getPoolingParam(l, 'MAX')},\n+ keras.layers.SimpleRNN: lambda l: \\\n+ {'recurrent_param': getRecurrentParam(l)},\n+ keras.layers.LSTM: lambda l: \\\n+ {'recurrent_param': getRecurrentParam(l)},\n}\ndef _checkIfValid(myList, fn, errorMessage):\n@@ -235,6 +253,13 @@ def convertKerasToCaffeSolver(kerasModel, caffeNetworkFilePath, outCaffeSolverFi\nf.write(defaultSolver)\n+def getInputMatrices(layer):\n+ if type(layer) == keras.layers.LSTM or type(layer) == keras.layers.SimpleRNN:\n+ weights = layer.get_weights()\n+ return [np.vstack((weights[0], weights[1])), np.matrix(weights[2]) ]\n+ else:\n+ return [ getNumPyMatrixFromKerasWeight(param) for param in layer.get_weights() ]\n+\ndef convertKerasToSystemMLModel(spark, kerasModel, outDirectory):\n_checkIfValid(kerasModel.layers, lambda layer: False if len(layer.get_weights()) <= 4 or len(layer.get_weights()) != 3 else True, 'Unsupported number of weights:')\nlayers = [layer for layer in kerasModel.layers if len(layer.get_weights()) > 0]\n@@ -243,7 +268,7 @@ def convertKerasToSystemMLModel(spark, kerasModel, outDirectory):\ndmlLines = []\nscript_java = sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml('')\nfor layer in layers:\n- inputMatrices = [ getNumPyMatrixFromKerasWeight(param) for param in layer.get_weights() ]\n+ inputMatrices = getInputMatrices(layer)\npotentialVar = [ layer.name + '_weight', layer.name + '_bias', layer.name + '_1_weight', layer.name + '_1_bias' ]\nfor i in range(len(inputMatrices)):\ndmlLines = dmlLines + [ 'write(' + potentialVar[i] + ', \"' + outDirectory + '/' + potentialVar[i] + '.mtx\", format=\"binary\");\\n' ]\n" }, { "change_type": "MODIFY", "old_path": "src/main/python/tests/test_nn_numpy.py", "new_path": "src/main/python/tests/test_nn_numpy.py", "diff": "@@ -38,7 +38,7 @@ import unittest\nimport numpy as np\nfrom keras.models import Sequential\n-from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout,Flatten\n+from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Flatten, LSTM\nfrom keras import backend as K\nfrom keras.models import Model\nfrom systemml.mllearn import Keras2DML\n@@ -82,5 +82,56 @@ class TestNNLibrary(unittest.TestCase):\nkeras_model.add(Dense(10, activation='softmax'))\nself.failUnless(are_predictions_all_close(keras_model))\n+ def test_simplernn_predictions1(self):\n+ data_dim = 16\n+ timesteps = 8\n+ num_classes = 10\n+ batch_size = 64\n+ model = Sequential()\n+ model.add(SimpleRNN(32, return_sequences=False, input_shape=(timesteps, data_dim)))\n+ model.add(Dense(10, activation='softmax'))\n+ x_train = np.random.random((batch_size, timesteps, data_dim))\n+ y_train = np.random.random((batch_size, num_classes))\n+ from systemml.mllearn import Keras2DML\n+ sysml_model = Keras2DML(spark, model, input_shape=(timesteps,data_dim,1), weights='weights_dir').set(debug=True)\n+ keras_preds = model.predict(x_train).flatten()\n+ sysml_preds = sysml_model.predict_proba(x_train.reshape((batch_size, -1))).flatten()\n+ self.failUnless(np.allclose(sysml_preds, keras_preds))\n+\n+ def test_simplernn_predictions2(self):\n+ data_dim = 16\n+ timesteps = 8\n+ num_classes = 10\n+ batch_size = 100\n+ model = Sequential()\n+ model.add(SimpleRNN(32, return_sequences=False, input_shape=(timesteps, data_dim)))\n+ model.add(Dense(10, activation='softmax'))\n+ x_train = np.random.random((batch_size, timesteps, data_dim))\n+ y_train = np.random.random((batch_size, num_classes))\n+ from systemml.mllearn import Keras2DML\n+ sysml_model = Keras2DML(spark, model, input_shape=(timesteps,data_dim,1), weights='weights_dir').set(debug=True)\n+ keras_preds = model.predict(x_train).flatten()\n+ sysml_preds = sysml_model.predict_proba(x_train.reshape((batch_size, -1))).flatten()\n+ self.failUnless(np.allclose(sysml_preds, keras_preds))\n+\n+ def test_lstm_predictions1(self):\n+ data_dim = 32\n+ timesteps = 8\n+ num_classes = 10\n+ batch_size = 64\n+ w1 = np.random.random((data_dim, 4*data_dim))\n+ w2 = np.random.random((data_dim, 4*data_dim))\n+ b = np.zeros(128)\n+ model = Sequential()\n+ model.add(LSTM(32, return_sequences=False, recurrent_activation='sigmoid', input_shape=(timesteps, data_dim), weights=[w1, w2, b]))\n+ model.add(Dense(10, activation='softmax'))\n+ x_train = np.random.random((batch_size, timesteps, data_dim))\n+ y_train = np.random.random((batch_size, num_classes))\n+ from systemml.mllearn import Keras2DML\n+ sysml_model = Keras2DML(spark, model, input_shape=(timesteps,data_dim,1), weights='weights_dir').set(debug=True)\n+ keras_preds = model.predict(x_train)\n+ sysml_preds = sysml_model.predict_proba(x_train.reshape((batch_size, -1)))\n+ np.allclose(sysml_preds, keras_preds)\n+\nif __name__ == '__main__':\nunittest.main()\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala", "new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala", "diff": "@@ -882,6 +882,80 @@ class InnerProduct(val param: LayerParameter, val id: Int, val net: CaffeNetwork\noverride def biasShape(): Array[Int] = Array(1, numNeurons.toInt)\n}\n+\n+class RNN(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {\n+ val return_sequences = param.getRecurrentParam.getReturnSequences\n+\n+ // ---------------------------------------------------------\n+ // Note: since Caffe doesnot have return_sequences, number of output is same as number of neurons\n+ def M():String = param.getRecurrentParam.getNumOutput.toString\n+ // ---------------------------------------------------------\n+\n+ def timesteps():String = bottomLayerOutputShape._1\n+ def input_features():String = bottomLayerOutputShape._2\n+ def output_features():Int = param.getRecurrentParam.getNumOutput\n+ override def sourceFileName = \"rnn\"\n+ override def outputShape = if(return_sequences) (timesteps, output_features.toString, \"1\") else (output_features.toString, \"1\", \"1\")\n+ override def biasShape(): Array[Int] = Array(1, M.toInt)\n+ override def weightShape(): Array[Int] = Array(input_features.toInt + M.toInt, M.toInt)\n+\n+ override def init(dmlScript: StringBuilder) = {\n+ invokeInit(dmlScript, List[String](weight, bias, out0), Caffe2DML.batchSize, input_features, M)\n+ }\n+\n+ override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = {\n+ invokeForward(dmlScript, List[String](out, cache_out), X, weight, bias, timesteps, input_features, return_sequences.toString.toUpperCase, out0)\n+ }\n+\n+ override def backward(dmlScript: StringBuilder, outSuffix: String) = {\n+ invokeBackward(dmlScript, outSuffix, List[String](\"dOut\" + id, dWeight, dBias, dout0), dout, X, weight, bias,\n+ timesteps, input_features, return_sequences.toString.toUpperCase, out0, cache_out)\n+ }\n+\n+ val cache_out = \"cache_out_\" + id\n+ val out0 = \"out0_\" + id\n+ val dout0 = \"dout0_\" + id\n+}\n+\n+class LSTM(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {\n+ val return_sequences = param.getRecurrentParam.getReturnSequences\n+\n+ // ---------------------------------------------------------\n+ // Note: since Caffe doesnot have return_sequences, number of output is same as number of neurons\n+ def M():String = param.getRecurrentParam.getNumOutput.toString\n+ // ---------------------------------------------------------\n+\n+ def timesteps():String = bottomLayerOutputShape._1\n+ def input_features():String = bottomLayerOutputShape._2\n+ def output_features():Int = param.getRecurrentParam.getNumOutput\n+ override def sourceFileName = \"lstm\"\n+ override def outputShape = if(return_sequences) (timesteps, output_features.toString, \"1\") else (output_features.toString, \"1\", \"1\")\n+ override def biasShape(): Array[Int] = Array(1, 4*M.toInt)\n+ override def weightShape(): Array[Int] = Array(input_features.toInt + M.toInt, 4*M.toInt)\n+\n+ override def init(dmlScript: StringBuilder) = {\n+ invokeInit(dmlScript, List[String](weight, bias, out0, c0), Caffe2DML.batchSize, input_features, M)\n+ }\n+\n+ override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = {\n+ invokeForward(dmlScript, List[String](out, c, cache_out, cache_c, cache_ifog), X, weight, bias, timesteps, input_features, return_sequences.toString.toUpperCase, out0, c0)\n+ }\n+\n+ override def backward(dmlScript: StringBuilder, outSuffix: String) = {\n+ invokeBackward(dmlScript, outSuffix, List[String](\"dOut\" + id, dWeight, dBias, dout0, dc0), dout, dc0, X, weight, bias,\n+ timesteps, input_features, return_sequences.toString.toUpperCase, out0, c0, cache_out, cache_c, cache_ifog)\n+ }\n+\n+ val cache_out = \"cache_out_\" + id\n+ val out0 = \"out0_\" + id\n+ val dout0 = \"dout0_\" + id\n+ val c0 = \"cellState0_\" + id\n+ val dc0 = \"dcellState0_\" + id\n+ val c = \"cellState_\" + id\n+ val cache_c = \"cache_c_\" + id\n+ val cache_ifog = \"cache_ifog_\" + id\n+}\n+\nclass MaxPooling(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {\n// -------------------------------------------------\noverride def sourceFileName = \"max_pool2d_builtin\"\n" }, { "change_type": "MODIFY", "old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala", "new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala", "diff": "@@ -246,6 +246,8 @@ class CaffeNetwork(netFilePath: String, val currentPhase: Phase, var numChannels\ncase \"deconvolution\" => new DeConvolution(param, id, this)\ncase \"threshold\" => new Threshold(param, id, this)\ncase \"softmax\" => new Softmax(param, id, this)\n+ case \"rnn\" => new RNN(param, id, this)\n+ case \"lstm\" => new LSTM(param, id, this)\ncase _ => throw new LanguageException(\"Layer of type \" + param.getType + \" is not supported\")\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Add support for RNN and LSTM in Caffe2DML and Keras2DML Closes #707.
49,738
08.12.2017 20:27:58
28,800
ead46f728a0f94237ecb63f95e02a757d7c216d1
New abstraction for large dense row blocks This patch introduces a new abstraction for dense row blocks, which is the basis for supporting both small and large (>16GB) dense blocks, while enabling memory-efficient linearized representations and efficient operations for entire dense blocks.
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.apache.sysml.runtime.matrix.data;\n+\n+import java.io.Serializable;\n+\n+/**\n+ * This DenseBlock is an abstraction for different dense, row-major\n+ * matrix formats. For efficient dense operations, this API does not\n+ * expose a row but a row-block abstraction, where a block can contain\n+ * one or many contiguous rows.\n+ *\n+ */\n+public abstract class DenseBlock implements Serializable\n+{\n+ private static final long serialVersionUID = 7517220490270237832L;\n+\n+ public enum Type {\n+ DRB, //dense row block\n+ LDBR, //large dense row block\n+ }\n+\n+ /**\n+ * Resets the dense block by deleting non-zero values. After this\n+ * call all countNonZeros() calls are guaranteed to return 0.\n+ */\n+ public abstract void reset();\n+\n+ /**\n+ * Resets the dense block by deleting non-zero values. After this\n+ * call all countNonZeros() calls are guaranteed to return 0. If\n+ * the new dimensions exceed the current capacity, the underlying\n+ * storage is extended accordingly.\n+ *\n+ * @param rlen number of rows\n+ * @param clen number of columns\n+ */\n+ public abstract void reset(int rlen, int clen);\n+\n+ /**\n+ * Get the number of rows.\n+ *\n+ * @return number of rows\n+ */\n+ public abstract int numRows();\n+\n+ /**\n+ * Get the number of allocated blocks.\n+ *\n+ * @return number of blocks\n+ */\n+ public abstract int numBlocks();\n+\n+ /**\n+ * Get the length of the dense block as the product\n+ * of row and column dimensions.\n+ *\n+ * @return length\n+ */\n+ public abstract long size();\n+\n+ /**\n+ * Get the total length of allocated blocks.\n+ *\n+ * @return capacity\n+ */\n+ public abstract long capacity();\n+\n+ /**\n+ * Compute the number of non-zero values, which potentially\n+ * makes a full pass over the underlying blocks.\n+ *\n+ * @return number of non-zeros\n+ */\n+ public abstract long countNonZeros();\n+\n+ /**\n+ * Get the allocated blocks.\n+ *\n+ * @return blocks\n+ */\n+ public abstract double[][] values();\n+\n+\n+ /**\n+ * Get an allocated block.\n+ *\n+ * @param bix block index\n+ * @return block\n+ */\n+ public abstract double[] values(int bix);\n+\n+ /**\n+ * Get the block index for a given row.\n+ *\n+ * @param r row index\n+ * @return block index\n+ */\n+ public abstract int index(int r);\n+\n+ /**\n+ * Get the position for a given row within\n+ * its associated block.\n+ *\n+ * @param r row index\n+ * @return block position\n+ */\n+ public abstract int pos(int r);\n+\n+ /**\n+ * Get the position for a given row and column\n+ * within the associated block.\n+ *\n+ * @param r row index\n+ * @param c column index\n+ * @return block position\n+ */\n+ public abstract int pos(int r, int c);\n+\n+ /**\n+ * Set the given value for a given row and column.\n+ *\n+ * @param r row index\n+ * @param c column index\n+ * @param v value\n+ */\n+ public abstract void set(int r, int c, double v);\n+\n+ /**\n+ * Get the value for a given row and column.\n+ *\n+ * @param r row index\n+ * @param c column index\n+ * @return value\n+ */\n+ public abstract double get(int r, int c);\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2043] New abstraction for large dense row blocks This patch introduces a new abstraction for dense row blocks, which is the basis for supporting both small and large (>16GB) dense blocks, while enabling memory-efficient linearized representations and efficient operations for entire dense blocks.
49,738
08.12.2017 21:17:57
28,800
9526f7d943b78cc5c15b032a0f2e73a4f7e501d4
[SYSTEMML-2044,2045] New dense row block implementations (DRB, LDRB) This patch adds small and large dense row block implementations as instantiations of the new dense block abstraction. Furthermore, this also includes a basic factory for constructing these blocks according to the given row and column dimensions.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "@@ -35,7 +35,7 @@ public abstract class DenseBlock implements Serializable\npublic enum Type {\nDRB, //dense row block\n- LDBR, //large dense row block\n+ LDRB, //large dense row block\n}\n/**\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.apache.sysml.runtime.matrix.data;\n+\n+import java.util.Arrays;\n+\n+public class DenseBlockDRB extends DenseBlock\n+{\n+ private static final long serialVersionUID = 8546723684649816489L;\n+\n+ private double[] data;\n+ private int rlen;\n+ private int clen;\n+\n+ public DenseBlockDRB(int rlen, int clen) {\n+ reset(rlen, clen);\n+ }\n+\n+ @Override\n+ public void reset() {\n+ reset(rlen, clen);\n+ }\n+\n+ @Override\n+ public void reset(int rlen, int clen) {\n+ int len = rlen * clen;\n+ if( len < capacity() )\n+ Arrays.fill(data, 0, len, 0);\n+ else\n+ data = new double[len];\n+ this.rlen = rlen;\n+ this.clen = clen;\n+ }\n+\n+ @Override\n+ public int numRows() {\n+ return rlen;\n+ }\n+\n+ @Override\n+ public int numBlocks() {\n+ return 1;\n+ }\n+\n+ @Override\n+ public long size() {\n+ return rlen * clen;\n+ }\n+\n+ @Override\n+ public long capacity() {\n+ return (data!=null) ? data.length : -1;\n+ }\n+\n+ @Override\n+ public long countNonZeros() {\n+ final int len = rlen * clen;\n+ int nnz = 0;\n+ for(int i=0; i<len; i++)\n+ nnz += (data[i]!=0) ? 1 : 0;\n+ return nnz;\n+ }\n+\n+ @Override\n+ public double[][] values() {\n+ return new double[][]{data};\n+ }\n+\n+ @Override\n+ public double[] values(int bix) {\n+ return data;\n+ }\n+\n+ @Override\n+ public int index(int r) {\n+ return 0;\n+ }\n+\n+ @Override\n+ public int pos(int r) {\n+ return r * clen;\n+ }\n+\n+ @Override\n+ public int pos(int r, int c) {\n+ return r * clen + c;\n+ }\n+\n+ @Override\n+ public void set(int r, int c, double v) {\n+ data[pos(r, c)] = v;\n+ }\n+\n+ @Override\n+ public double get(int r, int c) {\n+ return data[pos(r, c)];\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockFactory.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.apache.sysml.runtime.matrix.data;\n+\n+public abstract class DenseBlockFactory\n+{\n+ public static DenseBlock createDenseBlock(int rlen, int clen) {\n+ DenseBlock.Type type = ((long)rlen*clen < Integer.MAX_VALUE) ?\n+ DenseBlock.Type.DRB : DenseBlock.Type.LDRB;\n+ return createDenseBlock(type, rlen, clen);\n+ }\n+\n+ public static DenseBlock createDenseBlock(DenseBlock.Type type, int rlen, int clen) {\n+ switch( type ) {\n+ case DRB: return new DenseBlockDRB(rlen, clen);\n+ case LDRB: return new DenseBlockLDRB(rlen, clen);\n+ default:\n+ throw new RuntimeException(\"Unexpected dense block type: \"+type.name());\n+ }\n+ }\n+\n+ public static boolean isDenseBlockType(DenseBlock sblock, DenseBlock.Type type) {\n+ return (getDenseBlockType(sblock) == type);\n+ }\n+\n+ public static DenseBlock.Type getDenseBlockType(DenseBlock dblock) {\n+ return (dblock instanceof DenseBlockDRB) ? DenseBlock.Type.DRB :\n+ (dblock instanceof DenseBlockLDRB) ? DenseBlock.Type.LDRB : null;\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.apache.sysml.runtime.matrix.data;\n+\n+import java.util.Arrays;\n+\n+public class DenseBlockLDRB extends DenseBlock\n+{\n+ private static final long serialVersionUID = -7285459683402612969L;\n+\n+ private double[][] data;\n+ private int rlen;\n+ private int clen;\n+ private int blen;\n+\n+ public DenseBlockLDRB(int rlen, int clen) {\n+ this(rlen, clen, blocksize(rlen, clen));\n+ }\n+\n+ public DenseBlockLDRB(int rlen, int clen, int blen) {\n+ reset(rlen, clen, blen);\n+ }\n+\n+ @Override\n+ public void reset() {\n+ reset(rlen, clen, blen);\n+ }\n+\n+ @Override\n+ public void reset(int rlen, int clen) {\n+ reset(rlen, clen, blen);\n+ }\n+\n+ private void reset(int rlen, int clen, int blen) {\n+ long llen = (long) rlen * clen;\n+ int numPart = (int)Math.ceil((double)rlen / blen);\n+ if( this.blen == blen && llen < capacity() ) {\n+ for(int i=0; i<numPart; i++) {\n+ int len = Math.min((i+1)*blen,rlen)-i*blen;\n+ Arrays.fill(data[i], 0, len, 0);\n+ }\n+ }\n+ else {\n+ data = new double[numPart][];\n+ for(int i=0; i<numPart; i++) {\n+ int len = Math.min((i+1)*blen,rlen)-i*blen;\n+ data[i] = new double[len];\n+ }\n+ }\n+\n+ this.rlen = rlen;\n+ this.clen = clen;\n+ this.blen = blen;\n+ }\n+\n+ @Override\n+ public int numRows() {\n+ return rlen;\n+ }\n+\n+ @Override\n+ public int numBlocks() {\n+ return data.length;\n+ }\n+\n+ @Override\n+ public long size() {\n+ return (long)rlen * clen;\n+ }\n+\n+ @Override\n+ public long capacity() {\n+ int len = 0;\n+ for(int i=0; i<numBlocks(); i++)\n+ len += data[i].length;\n+ return len;\n+ }\n+\n+ @Override\n+ public long countNonZeros() {\n+ long nnz = 0;\n+ for(int i=0; i<numBlocks(); i++ ) {\n+ double[] a = values(i);\n+ for(int j=0; j<a.length; j++)\n+ nnz += (a[j]!=0) ? 1 : 0;\n+ }\n+ return nnz;\n+ }\n+\n+ @Override\n+ public double[][] values() {\n+ return data;\n+ }\n+\n+ @Override\n+ public double[] values(int bix) {\n+ return data[bix];\n+ }\n+\n+ @Override\n+ public int index(int r) {\n+ return r / blen;\n+ }\n+\n+ @Override\n+ public int pos(int r) {\n+ return (r % blen) * clen;\n+ }\n+\n+ @Override\n+ public int pos(int r, int c) {\n+ return (r % blen) * clen + c;\n+ }\n+\n+ @Override\n+ public void set(int r, int c, double v) {\n+ data[index(r)][pos(r, c)] = v;\n+ }\n+\n+ @Override\n+ public double get(int r, int c) {\n+ return data[index(r)][pos(r, c)];\n+ }\n+\n+ private static int blocksize(int rlen, int clen) {\n+ return Math.min(rlen, Integer.MAX_VALUE / clen);\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2044,2045] New dense row block implementations (DRB, LDRB) This patch adds small and large dense row block implementations as instantiations of the new dense block abstraction. Furthermore, this also includes a basic factory for constructing these blocks according to the given row and column dimensions.
49,738
12.12.2017 16:16:04
28,800
47e50af3f48ed6b15574278dfb974640022e5619
Fix print instruction scheduling (sort by line numbers) This patch improves the new two-level instruction scheduling by additionally sorting the partition of root nodes by line numbers, which ensures that all prints appear in the order of their specification.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java", "diff": "@@ -3625,10 +3625,11 @@ public class Dag<N extends Lop>\nprivate ArrayList<Lop> doTopologicalSortTwoLevelOrder(ArrayList<Lop> v) {\n//partition nodes into leaf/inner nodes and dag root nodes,\n- //sort leaf/inner nodes by ID to force depth-first scheduling\n+ //+ sort leaf/inner nodes by ID to force depth-first scheduling\n+ //+ sort root nodes by line numbers to force ordering of prints\nLop[] nodearray = Stream.concat(\nv.stream().filter(l -> !l.getOutputs().isEmpty()).sorted(Comparator.comparing(l -> l.getID())),\n- v.stream().filter(l -> l.getOutputs().isEmpty()))\n+ v.stream().filter(l -> l.getOutputs().isEmpty()).sorted(Comparator.comparing(l -> l.getBeginLine())))\n.toArray(Lop[]::new);\nreturn createIDMapping(nodearray);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2050] Fix print instruction scheduling (sort by line numbers) This patch improves the new two-level instruction scheduling by additionally sorting the partition of root nodes by line numbers, which ensures that all prints appear in the order of their specification.
49,698
17.12.2017 11:36:53
28,800
c382386a0f8aa17cc8107ee0d0f51306c2710cd9
New xor builtin functions over scalars Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1077,7 +1077,7 @@ public abstract class Hop implements ParseInfo\n// Operations that require two operands\npublic enum OpOp2 {\nPLUS, MINUS, MULT, DIV, MODULUS, INTDIV, LESS, LESSEQUAL, GREATER, GREATEREQUAL, EQUAL, NOTEQUAL,\n- MIN, MAX, AND, OR, LOG, POW, PRINT, CONCAT, QUANTILE, INTERQUANTILE, IQM,\n+ MIN, MAX, AND, OR, XOR, LOG, POW, PRINT, CONCAT, QUANTILE, INTERQUANTILE, IQM,\nCENTRALMOMENT, COVARIANCE, CBIND, RBIND, SOLVE, MEDIAN, INVALID,\n//fused ML-specific operators for performance\nMINUS_NZ, //sparse-safe minus: X-(mean*ppred(X,0,!=))\n@@ -1219,6 +1219,7 @@ public abstract class Hop implements ParseInfo\nHopsOpOp2LopsB.put(OpOp2.MIN, Binary.OperationTypes.MIN);\nHopsOpOp2LopsB.put(OpOp2.MAX, Binary.OperationTypes.MAX);\nHopsOpOp2LopsB.put(OpOp2.AND, Binary.OperationTypes.OR);\n+ HopsOpOp2LopsB.put(OpOp2.XOR, Binary.OperationTypes.XOR);\nHopsOpOp2LopsB.put(OpOp2.OR, Binary.OperationTypes.AND);\nHopsOpOp2LopsB.put(OpOp2.SOLVE, Binary.OperationTypes.SOLVE);\nHopsOpOp2LopsB.put(OpOp2.POW, Binary.OperationTypes.POW);\n@@ -1244,6 +1245,7 @@ public abstract class Hop implements ParseInfo\nHopsOpOp2LopsBS.put(OpOp2.MAX, BinaryScalar.OperationTypes.MAX);\nHopsOpOp2LopsBS.put(OpOp2.AND, BinaryScalar.OperationTypes.AND);\nHopsOpOp2LopsBS.put(OpOp2.OR, BinaryScalar.OperationTypes.OR);\n+ HopsOpOp2LopsBS.put(OpOp2.XOR, BinaryScalar.OperationTypes.XOR);\nHopsOpOp2LopsBS.put(OpOp2.LOG, BinaryScalar.OperationTypes.LOG);\nHopsOpOp2LopsBS.put(OpOp2.POW, BinaryScalar.OperationTypes.POW);\nHopsOpOp2LopsBS.put(OpOp2.PRINT, BinaryScalar.OperationTypes.PRINT);\n@@ -1442,6 +1444,7 @@ public abstract class Hop implements ParseInfo\nHopsOpOp2String.put(OpOp2.CBIND, \"cbind\");\nHopsOpOp2String.put(OpOp2.RBIND, \"rbind\");\nHopsOpOp2String.put(OpOp2.SOLVE, \"solve\");\n+ HopsOpOp2String.put(OpOp2.XOR, \"xor\");\n}\npublic static String getBinaryOpCode(OpOp2 op) {\n@@ -1528,6 +1531,7 @@ public abstract class Hop implements ParseInfo\nelse if( \"==\".equals(op) ) return OpOp2.EQUAL;\nelse if( \"!=\".equals(op) ) return OpOp2.NOTEQUAL;\nelse if( \"|\".equals(op) ) return OpOp2.OR;\n+ else if( \"xor\".equals(op) ) return OpOp2.XOR;\nelse if( \"&\".equals(op) ) return OpOp2.AND;\nelse if( \"log\".equals(op) ) return OpOp2.LOG;\nelse if( \"^\".equals(op) ) return OpOp2.POW;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/Binary.java", "new_path": "src/main/java/org/apache/sysml/lops/Binary.java", "diff": "@@ -35,7 +35,7 @@ public class Binary extends Lop\npublic enum OperationTypes {\nADD, SUBTRACT, MULTIPLY, DIVIDE, MINUS1_MULTIPLY, MODULUS, INTDIV, MATMULT,\nLESS_THAN, LESS_THAN_OR_EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, EQUALS, NOT_EQUALS,\n- AND, OR,\n+ AND, OR, XOR,\nMAX, MIN, POW, SOLVE, NOTSUPPORTED\n}\n@@ -157,6 +157,10 @@ public class Binary extends Lop\ncase OR:\nreturn \"||\";\n+ /* Binary Builtin Function */\n+ case XOR:\n+ return \"xor\";\n+\n/* Builtin Functions */\ncase MIN:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/lops/BinaryScalar.java", "new_path": "src/main/java/org/apache/sysml/lops/BinaryScalar.java", "diff": "@@ -35,7 +35,7 @@ public class BinaryScalar extends Lop\npublic enum OperationTypes {\nADD, SUBTRACT, MULTIPLY, DIVIDE, MODULUS, INTDIV,\nLESS_THAN, LESS_THAN_OR_EQUALS, GREATER_THAN, GREATER_THAN_OR_EQUALS, EQUALS, NOT_EQUALS,\n- AND, OR,\n+ AND, OR, XOR,\nLOG,POW,MAX,MIN,PRINT,\nIQSIZE,\n}\n@@ -146,6 +146,10 @@ public class BinaryScalar extends Lop\ncase OR:\nreturn \"||\";\n+ /* Boolean built in binary function */\n+ case XOR:\n+ return \"xor\";\n+\n/* Builtin Functions */\ncase LOG:\nreturn \"log\";\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -446,6 +446,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\noutput.setValueType(id.getValueType());\nbreak;\n+ case XOR:\ncase MIN:\ncase MAX:\n//min(X), min(X,s), min(s,X), min(s,r), min(X,Y)\n@@ -1342,6 +1343,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\ncase CEIL:\ncase FLOOR:\ncase MEDIAN:\n+ case XOR:\nreturn true;\ndefault:\nreturn false;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -683,7 +683,7 @@ public class DMLTranslator\nl.addToDag(dag);\n}\n- // Instructions for Lobs DAGs\n+ // Instructions for Lops DAGs\ninstruct = dag.getJobs(sb, config);\nrtpb.addInstructions(instruct);\n}\n@@ -2661,6 +2661,12 @@ public class DMLTranslator\ncase CAST_AS_BOOLEAN:\ncurrBuiltinOp = new UnaryOp(target.getName(), target.getDataType(), ValueType.BOOLEAN, Hop.OpOp1.CAST_AS_BOOLEAN, expr);\nbreak;\n+\n+ case XOR:\n+ currBuiltinOp = new BinaryOp(target.getName(), target.getDataType(),\n+ ValueType.BOOLEAN, Hop.OpOp2.XOR, expr, expr2);\n+ break;\n+\ncase ABS:\ncase SIN:\ncase COS:\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/Expression.java", "new_path": "src/main/java/org/apache/sysml/parser/Expression.java", "diff": "@@ -135,7 +135,8 @@ public abstract class Expression implements ParseInfo\nTANH,\nTRACE,\nTRANS,\n- VAR\n+ VAR,\n+ XOR\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/functionobjects/Builtin.java", "new_path": "src/main/java/org/apache/sysml/runtime/functionobjects/Builtin.java", "diff": "@@ -49,7 +49,9 @@ public class Builtin extends ValueFunction\nprivate static final long serialVersionUID = 3836744687789840574L;\n- public enum BuiltinCode { SIN, COS, TAN, SINH, COSH, TANH, ASIN, ACOS, ATAN, LOG, LOG_NZ, MIN, MAX, ABS, SIGN, SQRT, EXP, PLOGP, PRINT, PRINTF, NROW, NCOL, LENGTH, ROUND, MAXINDEX, MININDEX, STOP, CEIL, FLOOR, CUMSUM, CUMPROD, CUMMIN, CUMMAX, INVERSE, SPROP, SIGMOID, SELP }\n+ public enum BuiltinCode { SIN, COS, TAN, SINH, COSH, TANH, ASIN, ACOS, ATAN, LOG, LOG_NZ, MIN,\n+ MAX, ABS, SIGN, SQRT, EXP, PLOGP, PRINT, PRINTF, NROW, NCOL, LENGTH, ROUND, MAXINDEX, MININDEX,\n+ STOP, CEIL, FLOOR, CUMSUM, CUMPROD, CUMMIN, CUMMAX, INVERSE, SPROP, SIGMOID, SELP }\npublic BuiltinCode bFunc;\nprivate static final boolean FASTMATH = true;\n@@ -399,8 +401,6 @@ public class Builtin extends ValueFunction\ncase LOG_NZ:\n//faster in Math\nreturn (in1==0) ? 0 : (Math.log(in1)/Math.log(in2));\n-\n-\ndefault:\nthrow new DMLRuntimeException(\"Builtin.execute(): Unknown operation: \" + bFunc);\n}\n@@ -428,7 +428,6 @@ public class Builtin extends ValueFunction\nreturn (in1 >= in2) ? 1 : 0;\ncase MININDEX:\nreturn (in1 <= in2) ? 1 : 0;\n-\ndefault:\n// For performance reasons, avoid throwing an exception\nreturn -1;\n@@ -455,8 +454,6 @@ public class Builtin extends ValueFunction\n//faster in Math\nreturn (in1==0) ? 0 : Math.log(in1)/Math.log(in2);\n-\n-\ndefault:\nthrow new DMLRuntimeException(\"Builtin.execute(): Unknown operation: \" + bFunc);\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/functionobjects/Xor.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.functionobjects;\n+\n+import java.io.Serializable;\n+\n+public class Xor extends ValueFunction implements Serializable\n+{\n+ private static final long serialVersionUID = -2847467729680510910L;\n+\n+ private static Xor singleObj = null;\n+\n+ private Xor() {\n+ // nothing to do here\n+ }\n+\n+ public static Xor getXorFnObject() {\n+ if ( singleObj == null )\n+ singleObj = new Xor();\n+ return singleObj;\n+ }\n+\n+ @Override\n+ public boolean execute(boolean in1, boolean in2) {\n+ return in1 != in2;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/CPInstructionParser.java", "diff": "@@ -133,6 +133,7 @@ public class CPInstructionParser extends InstructionParser\n// Boolean Instruction Opcodes\nString2CPInstructionType.put( \"&&\" , CPType.BooleanBinary);\nString2CPInstructionType.put( \"||\" , CPType.BooleanBinary);\n+ String2CPInstructionType.put( \"xor\" , CPType.BooleanBinary);\nString2CPInstructionType.put( \"!\" , CPType.BooleanUnary);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "diff": "@@ -72,6 +72,7 @@ import org.apache.sysml.runtime.functionobjects.ReduceAll;\nimport org.apache.sysml.runtime.functionobjects.ReduceCol;\nimport org.apache.sysml.runtime.functionobjects.ReduceDiag;\nimport org.apache.sysml.runtime.functionobjects.ReduceRow;\n+import org.apache.sysml.runtime.functionobjects.Xor;\nimport org.apache.sysml.runtime.instructions.cp.CPInstruction.CPType;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction.GPUINSTRUCTION_TYPE;\nimport org.apache.sysml.runtime.instructions.mr.MRInstruction.MRINSTRUCTION_TYPE;\n@@ -499,6 +500,8 @@ public class InstructionUtils\nreturn new BinaryOperator(And.getAndFnObject());\nelse if(opcode.equalsIgnoreCase(\"||\"))\nreturn new BinaryOperator(Or.getOrFnObject());\n+ else if(opcode.equalsIgnoreCase(\"xor\"))\n+ return new BinaryOperator(Xor.getXorFnObject());\nelse if(opcode.equalsIgnoreCase(\"+\"))\nreturn new BinaryOperator(Plus.getPlusFnObject());\nelse if(opcode.equalsIgnoreCase(\"-\"))\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/operators/BinaryOperator.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/operators/BinaryOperator.java", "diff": "@@ -43,6 +43,7 @@ import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.functionobjects.PlusMultiply;\nimport org.apache.sysml.runtime.functionobjects.Power;\nimport org.apache.sysml.runtime.functionobjects.ValueFunction;\n+import org.apache.sysml.runtime.functionobjects.Xor;\nimport org.apache.sysml.runtime.functionobjects.Builtin.BuiltinCode;\npublic class BinaryOperator extends Operator implements Serializable\n@@ -53,8 +54,8 @@ public class BinaryOperator extends Operator implements Serializable\npublic BinaryOperator(ValueFunction p) {\n//binaryop is sparse-safe iff (0 op 0) == 0\n- super (p instanceof Plus || p instanceof Multiply\n- || p instanceof Minus || p instanceof And || p instanceof Or\n+ super (p instanceof Plus || p instanceof Multiply || p instanceof Minus\n+ || p instanceof And || p instanceof Or\n|| p instanceof PlusMultiply || p instanceof MinusMultiply);\nfn = p;\n}\n@@ -82,6 +83,7 @@ public class BinaryOperator extends Operator implements Serializable\nelse if( fn instanceof NotEquals ) return OpOp2.NOTEQUAL;\nelse if( fn instanceof And ) return OpOp2.AND;\nelse if( fn instanceof Or ) return OpOp2.OR;\n+ else if( fn instanceof Xor ) return OpOp2.XOR;\nelse if( fn instanceof Power ) return OpOp2.POW;\nelse if( fn instanceof MinusNz ) return OpOp2.MINUS_NZ;\nelse if( fn instanceof Builtin ) {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/scalar/XorTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+\n+package org.apache.sysml.test.integration.functions.binary.scalar;\n+\n+import java.io.IOException;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+import org.apache.sysml.runtime.util.MapReduceTool;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+\n+/**\n+ * The main purpose of this test is to verify all combinations of\n+ * `xor` operands.\n+ *\n+ */\n+public class XorTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"XorTest\";\n+ private final static String TEST_DIR = \"functions/binary/scalar/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + XorTest.class.getSimpleName() + \"/\";\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"B\" }) );\n+ }\n+\n+ @Test\n+ public void testXor1() {\n+ runXor(\"TRUE\", \"FALSE\", true);\n+ }\n+\n+ @Test\n+ public void testXor2() {\n+ runXor(\"TRUE\", \"TRUE\", false);\n+ }\n+\n+ @Test\n+ public void testXor3() {\n+ runXor(\"FALSE\", \"FALSE\", false);\n+ }\n+\n+ @Test\n+ public void testXor4() {\n+ runXor(\"FALSE\", \"TRUE\", true);\n+ }\n+\n+ private void runXor( String op1, String op2, boolean trueCondition )\n+ {\n+ String TEST_NAME = TEST_NAME1;\n+ getAndLoadTestConfiguration(TEST_NAME);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", op1, op2, output(\"B\") };\n+\n+ //run tests\n+ runTest(true, false, null, -1);\n+\n+ //compare result\n+ try {\n+ boolean retCondition = MapReduceTool.readBooleanFromHDFSFile(output(\"B\"));\n+ Assert.assertEquals(trueCondition, retCondition);\n+ }\n+ catch (IOException e) {\n+ Assert.fail(e.getMessage());\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/XorTest.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+B = xor($1, $2);\n+write(B, $3);\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/scalar/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/binary/scalar/ZPackageSuite.java", "diff": "@@ -40,7 +40,8 @@ import org.junit.runners.Suite;\nMultiplicationTest.class,\nOrTest.class,\nPowerTest.class,\n- SubtractionTest.class\n+ SubtractionTest.class,\n+ XorTest.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1883] New xor builtin functions over scalars Closes #708.
49,738
16.12.2017 18:33:23
-3,600
63ad279fd8a1db216ba6a99b684eaf5d703db934
Large dense matrix blocks in unary/binary operations This patch extends all unary, binary, and scalar operations to support large dense matrix blocks >16GB. Additionally, this also includes minor extensions of the new dense block abstraction and a cleanup of the matrix value abstraction.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java", "diff": "@@ -1925,14 +1925,6 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nreturn tmp.unaryOperations(op, result);\n}\n- @Override\n- public void unaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException {\n- printDecompressWarning(\"unaryOperationsInPlace\");\n- MatrixBlock tmp = isCompressed() ? decompress() : this;\n- tmp.unaryOperationsInPlace(op);\n- }\n-\n@Override\npublic MatrixValue binaryOperations(BinaryOperator op, MatrixValue thatValue, MatrixValue result)\nthrows DMLRuntimeException {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CM_N_COVCell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/CM_N_COVCell.java", "diff": "@@ -171,12 +171,6 @@ public class CM_N_COVCell extends MatrixValue implements WritableComparable\nthrow new DMLRuntimeException(\"operation not supported fro WeightedCell\");\n}\n- @Override\n- public void unaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException {\n- throw new DMLRuntimeException(\"operation not supported fro WeightedCell\");\n- }\n-\n@Override\npublic void readFields(DataInput in) throws IOException {\ncm.w=in.readDouble();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "@@ -79,6 +79,21 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract int numBlocks();\n+ /**\n+ * Get the number of rows per block, except last one.\n+ *\n+ * @return number of rows in block\n+ */\n+ public abstract int blockSize();\n+\n+ /**\n+ * Get the number of rows of the given block.\n+ *\n+ * @param bix block index\n+ * @return number of rows in block\n+ */\n+ public abstract int blockSize(int bix);\n+\n/**\n* Get the length of the dense block as the product\n* of row and column dimensions.\n@@ -87,6 +102,15 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract long size();\n+ /**\n+ * Get the length of the given block.\n+ *\n+ * @param bix block index\n+ * @return length\n+ */\n+ public abstract int size(int bix);\n+\n+\n/**\n* Get the total length of allocated blocks.\n*\n@@ -195,6 +219,14 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract void set(int r, double[] v);\n+ /**\n+ * Copy the given dense block.\n+ *\n+ * @param db dense block\n+ */\n+ public abstract void set(DenseBlock db);\n+\n+\n/**\n* Get the value for a given row and column.\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "@@ -75,11 +75,26 @@ public class DenseBlockDRB extends DenseBlock\nreturn 1;\n}\n+ @Override\n+ public int blockSize() {\n+ return rlen;\n+ }\n+\n+ @Override\n+ public int blockSize(int bix) {\n+ return rlen;\n+ }\n+\n@Override\npublic long size() {\nreturn rlen * clen;\n}\n+ @Override\n+ public int size(int bix) {\n+ return rlen * clen;\n+ }\n+\n@Override\npublic long capacity() {\nreturn (data!=null) ? data.length : -1;\n@@ -151,6 +166,11 @@ public class DenseBlockDRB extends DenseBlock\ndata[pos(r, c)] = v;\n}\n+ @Override\n+ public void set(DenseBlock db) {\n+ System.arraycopy(db.values(0), 0, data, 0, rlen*clen);\n+ }\n+\n@Override\npublic void set(int r, double[] v) {\nSystem.arraycopy(v, 0, data, pos(r), clen);\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -87,11 +87,26 @@ public class DenseBlockLDRB extends DenseBlock\nreturn data.length;\n}\n+ @Override\n+ public int blockSize() {\n+ return blen;\n+ }\n+\n+ @Override\n+ public int blockSize(int bix) {\n+ return Math.min(blen, rlen-bix*blen);\n+ }\n+\n@Override\npublic long size() {\nreturn (long)rlen * clen;\n}\n+ @Override\n+ public int size(int bix) {\n+ return blockSize(bix) * clen;\n+ }\n+\n@Override\npublic long capacity() {\nint len = 0;\n@@ -116,7 +131,8 @@ public class DenseBlockLDRB extends DenseBlock\nlong nnz = 0;\nfor(int bi=index(rl); bi<index(ru); bi++) {\ndouble[] a = data[bi];\n- for(int i=pos(bi), ix=pos(bi)*clen; i<blen(bi); i++, ix+=clen)\n+ int blen = blockSize(bi);\n+ for(int i=pos(bi), ix=pos(bi)*clen; i<blen; i++, ix+=clen)\nfor( int j=cl; j<cu; j++ )\nnnz += (a[ix+j]!=0) ? 1 : 0;\n}\n@@ -171,7 +187,13 @@ public class DenseBlockLDRB extends DenseBlock\n@Override\npublic void set(int r, double[] v) {\n- System.arraycopy(v, 0, data[index(r)], pos(r), v.length);\n+ System.arraycopy(v, 0, data[index(r)], pos(r), clen);\n+ }\n+\n+ @Override\n+ public void set(DenseBlock db) {\n+ for(int bi=0; bi<numBlocks(); bi++)\n+ System.arraycopy(db.values(bi), 0, data[bi], 0, size(bi));\n}\n@Override\n@@ -194,10 +216,6 @@ public class DenseBlockLDRB extends DenseBlock\nreturn sb.toString();\n}\n- private int blen(int bix) {\n- return Math.min(blen, rlen-bix*blen);\n- }\n-\nprivate static int blocksize(int rlen, int clen) {\nreturn Math.min(rlen, Integer.MAX_VALUE / clen);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -265,17 +265,23 @@ public class LibMatrixBincell\nreturn; // skip entire empty block\nret.allocateDenseBlock();\n- double[] a = (m1.denseBlock != null) ? m1.getDenseBlockValues() : null;\n- double[] b = (m2.denseBlock != null) ? m2.getDenseBlockValues() : null;\n- double[] c = ret.getDenseBlockValues();\n- int nnz = 0;\n+ DenseBlock da = m1.getDenseBlock();\n+ DenseBlock dc = ret.getDenseBlock();\n+ long nnz = 0;\nif( atype == BinaryAccessType.MATRIX_COL_VECTOR )\n{\n- for( int i=0, ix=0; i<rlen; i++, ix+=clen )\n+ double[] b = m2.getDenseBlockValues(); // always single block\n+\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] a = da.values(bi);\n+ double[] c = dc.values(bi);\n+ int len = dc.blockSize(bi);\n+ int off = bi * dc.blockSize();\n+ for( int i=0, ix=0; i<len; i++, ix+=clen )\n{\n//replicate vector value\n- double v2 = (b==null) ? 0 : b[i];\n+ double v2 = (b==null) ? 0 : b[off+i];\nif( skipEmpty && v2 == 0 ) //skip empty rows\ncontinue;\n@@ -298,33 +304,42 @@ public class LibMatrixBincell\n}\n}\n}\n+ }\nelse if( atype == BinaryAccessType.MATRIX_ROW_VECTOR )\n{\n- if( a==null && b==null ) { //both empty\n+ double[] b = m2.getDenseBlockValues(); // always single block\n+\n+ if( da==null && b==null ) { //both empty\ndouble v = op.fn.execute( 0, 0 );\n- Arrays.fill(c, 0, rlen*clen, v);\n- nnz += (v != 0) ? rlen*clen : 0;\n+ dc.set(v);\n+ nnz += (v != 0) ? (long)rlen*clen : 0;\n}\n- else if( a==null ) //left empty\n+ else if( da==null ) //left empty\n{\n//compute first row\n+ double[] c = dc.values(0);\nfor( int j=0; j<clen; j++ ) {\nc[j] = op.fn.execute( 0, b[j] );\nnnz += (c[j] != 0) ? rlen : 0;\n}\n//copy first to all other rows\n- for( int i=1, ix=clen; i<rlen; i++, ix+=clen )\n- System.arraycopy(c, 0, c, ix, clen);\n+ for( int i=1; i<rlen; i++ )\n+ dc.set(i, c);\n}\nelse //default case (incl right empty)\n{\n- for( int i=0, ix=0; i<rlen; i++, ix+=clen )\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] a = da.values(bi);\n+ double[] c = dc.values(bi);\n+ int len = dc.blockSize(bi);\n+ for( int i=0, ix=0; i<len; i++, ix+=clen )\nfor( int j=0; j<clen; j++ ) {\nc[ix+j] = op.fn.execute( a[ix+j], ((b!=null) ? b[j] : 0) );\nnnz += (c[ix+j] != 0) ? 1 : 0;\n}\n}\n}\n+ }\nret.nonZeros = nnz;\n}\n@@ -466,16 +481,14 @@ public class LibMatrixBincell\nif(isMultiply && v2 == 1) //ROW COPY\n{\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = m1.quickGetValue(i, j);\nret.appendValue(i, j, v1);\n}\n}\nelse //GENERAL CASE\n{\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = m1.quickGetValue(i, j);\ndouble v = op.fn.execute( v1, v2 );\nret.appendValue(i, j, v);\n@@ -492,8 +505,7 @@ public class LibMatrixBincell\n{\n//note: sparse block guaranteed to be allocated (otherwise early about)\nSparseBlock b = m2.sparseBlock;\n- if( !b.isEmpty(0) )\n- {\n+ if( b.isEmpty(0) ) return;\nint blen = b.size(0); //always pos 0\nint[] bix = b.indexes(0);\ndouble[] bvals = b.values(0);\n@@ -506,12 +518,10 @@ public class LibMatrixBincell\n}\n}\n}\n- }\nelse //GENERAL CASE\n{\nfor( int i=0; i<rlen; i++ )\n- for( int j=0; j<clen; j++ )\n- {\n+ for( int j=0; j<clen; j++ ) {\ndouble v1 = m1.quickGetValue(i, j);\ndouble v2 = m2.quickGetValue(0, j); //replicated vector value\ndouble v = op.fn.execute( v1, v2 );\n@@ -540,8 +550,7 @@ public class LibMatrixBincell\nelse {\nfor(int r=0; r<rlen; r++) {\ndouble v1 = m1.quickGetValue(r, 0);\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v2 = m2.quickGetValue(0, c);\ndouble v = op.fn.execute( v1, v2 );\nret.appendValue(r, c, v);\n@@ -629,72 +638,79 @@ public class LibMatrixBincell\n{\n//specific case in order to prevent binary search on sparse inputs (see quickget and quickset)\nret.allocateDenseBlock();\n- final int m = ret.rlen;\nfinal int n = ret.clen;\n- double[] c = ret.getDenseBlockValues();\n+ DenseBlock dc = ret.getDenseBlock();\n//1) process left input: assignment\n- if( m1.sparse ) //SPARSE left\n- {\n- if( m1.sparseBlock != null )\n+ if( m1.sparse && m1.sparseBlock != null ) //SPARSE left\n{\nSparseBlock a = m1.sparseBlock;\n-\n- for( int i=0, ix=0; i<m; i++, ix+=n ) {\n- if( !a.isEmpty(i) )\n- {\n- int apos = a.pos(i);\n- int alen = a.size(i);\n- int[] aix = a.indexes(i);\n- double[] avals = a.values(i);\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] c = dc.values(bi);\n+ int blen = dc.blockSize(bi);\n+ int off = bi * dc.blockSize();\n+ for( int i=0, ix=0; i<blen; i++, ix+=n ) {\n+ int ai = off + i;\n+ if( a.isEmpty(ai) ) continue;\n+ int apos = a.pos(ai);\n+ int alen = a.size(ai);\n+ int[] aix = a.indexes(ai);\n+ double[] avals = a.values(ai);\nfor(int k = apos; k < apos+alen; k++)\nc[ix+aix[k]] = avals[k];\n}\n}\n}\n- }\n- else //DENSE left\n+ else if( !m1.sparse ) //DENSE left\n{\nif( !m1.isEmptyBlock(false) )\n- System.arraycopy(m1.getDenseBlockValues(), 0, c, 0, m*n);\n+ dc.set(m1.getDenseBlock());\nelse\n- ret.denseBlock.set(0);\n+ dc.set(0);\n}\n//2) process right input: op.fn (+,-,*), * only if dense\nlong lnnz = 0;\n- if( m2.sparse ) //SPARSE right\n- {\n- if(m2.sparseBlock!=null)\n+ if( m2.sparse && m2.sparseBlock!=null ) //SPARSE right\n{\nSparseBlock a = m2.sparseBlock;\n-\n- for( int i=0, ix=0; i<m; i++, ix+=n ) {\n- if( !a.isEmpty(i) ) {\n- int apos = a.pos(i);\n- int alen = a.size(i);\n- int[] aix = a.indexes(i);\n- double[] avals = a.values(i);\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] c = dc.values(bi);\n+ int blen = dc.blockSize(bi);\n+ int off = bi * dc.blockSize();\n+ for( int i=0, ix=0; i<blen; i++, ix+=n ) {\n+ int ai = off + i;\n+ if( !a.isEmpty(ai) ) {\n+ int apos = a.pos(ai);\n+ int alen = a.size(ai);\n+ int[] aix = a.indexes(ai);\n+ double[] avals = a.values(ai);\nfor(int k = apos; k < apos+alen; k++)\nc[ix+aix[k]] = op.fn.execute(c[ix+aix[k]], avals[k]);\n}\n//exploit temporal locality of rows\n- lnnz += ret.recomputeNonZeros(i, i, 0, n-1);\n+ lnnz += ret.recomputeNonZeros(ai, ai, 0, n-1);\n}\n}\n}\n- else //DENSE right\n+ else if( !m2.sparse ) //DENSE right\n{\nif( !m2.isEmptyBlock(false) ) {\n- double[] a = m2.getDenseBlockValues();\n- for( int i=0; i<m*n; i++ ) {\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] a = m2.getDenseBlock().values(bi);\n+ double[] c = dc.values(bi);\n+ int len = dc.size(bi);\n+ for( int i=0; i<len; i++ ) {\nc[i] = op.fn.execute(c[i], a[i]);\nlnnz += (c[i]!=0) ? 1 : 0;\n}\n}\n+ }\nelse if(op.fn instanceof Multiply)\nret.denseBlock.set(0);\n+ else\n+ lnnz = m1.nonZeros;\n}\n//3) recompute nnz\n@@ -705,19 +721,23 @@ public class LibMatrixBincell\nthrows DMLRuntimeException\n{\nret.allocateDenseBlock();\n- final int m = ret.rlen;\n- final int n = ret.clen;\n- double[] a = m1.getDenseBlockValues();\n- double[] b = m2.getDenseBlockValues();\n- double[] c = ret.getDenseBlockValues();\n+ DenseBlock da = m1.getDenseBlock();\n+ DenseBlock db = m2.getDenseBlock();\n+ DenseBlock dc = ret.getDenseBlock();\nValueFunction fn = op.fn;\n//compute dense-dense binary, maintain nnz on-the-fly\n- int lnnz = 0;\n- for( int i=0; i<m*n; i++ ) {\n+ long lnnz = 0;\n+ for( int bi=0; bi<da.numBlocks(); bi++ ) {\n+ double[] a = da.values(bi);\n+ double[] b = db.values(bi);\n+ double[] c = dc.values(bi);\n+ int len = da.size(bi);\n+ for( int i=0; i<len; i++ ) {\nc[i] = fn.execute(a[i], b[i]);\nlnnz += (c[i]!=0)? 1 : 0;\n}\n+ }\nret.setNonZeros(lnnz);\n}\n@@ -774,15 +794,15 @@ public class LibMatrixBincell\n* @param bOp binary operator\n*\n*/\n- private static void performBinOuterOperation(MatrixBlock mbLeft, MatrixBlock mbRight, MatrixBlock mbOut, BinaryOperator bOp)\n+ private static void performBinOuterOperation(MatrixBlock m1, MatrixBlock m2, MatrixBlock ret, BinaryOperator bOp)\nthrows DMLRuntimeException\n{\n- int rlen = mbLeft.rlen;\n- int clen = mbOut.clen;\n- double b[] = DataConverter.convertToDoubleVector(mbRight);\n- if(!mbOut.isAllocated())\n- mbOut.allocateDenseBlock();\n- double c[] = mbOut.getDenseBlockValues();\n+ int rlen = m1.rlen;\n+ int clen = ret.clen;\n+ double b[] = DataConverter.convertToDoubleVector(m2);\n+ if(!ret.isAllocated())\n+ ret.allocateDenseBlock();\n+ DenseBlock dc = ret.getDenseBlock();\n//pre-materialize various types used in inner loop\nboolean scanType1 = (bOp.fn instanceof LessThan || bOp.fn instanceof Equals\n@@ -794,8 +814,10 @@ public class LibMatrixBincell\nboolean eqNeq = (bOp.fn instanceof Equals || bOp.fn instanceof NotEquals);\nlong lnnz = 0;\n- for( int r=0, off=0; r<rlen; r++, off+=clen ) {\n- double value = mbLeft.quickGetValue(r, 0);\n+ for( int bi=0; bi<dc.numBlocks(); bi++ ) {\n+ double[] c = dc.values(bi);\n+ for( int r=bi*dc.blockSize(), off=0; r<rlen; r++, off+=clen ) {\n+ double value = m1.quickGetValue(r, 0);\nint ixPos1 = Arrays.binarySearch(b, value);\nint ixPos2 = ixPos1;\nif( ixPos1 >= 0 ) { //match, scan to next val\n@@ -817,8 +839,9 @@ public class LibMatrixBincell\nlnnz += (end-start);\n}\n}\n- mbOut.setNonZeros(lnnz);\n- mbOut.examSparsity();\n+ }\n+ ret.setNonZeros(lnnz);\n+ ret.examSparsity();\n}\nprivate static void unsafeBinary(MatrixBlock m1, MatrixBlock m2, MatrixBlock ret, BinaryOperator op)\n@@ -870,7 +893,7 @@ public class LibMatrixBincell\n}\nelse // MATRIX - MATRIX\n{\n- //dense non-empty vectors\n+ //dense non-empty vectors (always single block)\nif( m1.clen==1 && !m1.sparse && !m1.isEmptyBlock(false)\n&& !m2.sparse && !m2.isEmptyBlock(false) )\n{\n@@ -991,18 +1014,20 @@ public class LibMatrixBincell\nret.allocateDenseBlock();\nSparseBlock a = m1.sparseBlock;\n- double[] c = ret.getDenseBlockValues();\n+ DenseBlock dc = ret.getDenseBlock();\nint m = m1.rlen;\nint n = m1.clen;\n//init dense result with unsafe 0-value\n- double cval0 = op.executeScalar(0);\n- Arrays.fill(c, cval0);\n+ dc.set(op.executeScalar(0));\n//compute non-zero input values\n- int nnz = m*n;\n- for(int i=0, cix=0; i<m; i++, cix+=n) {\n- if( !a.isEmpty(i) ) {\n+ long nnz = m * n;\n+ for(int bi=0; bi<dc.numBlocks(); bi++) {\n+ int blen = dc.blockSize(bi);\n+ double[] c = dc.values(bi);\n+ for(int i=bi*dc.blockSize(), cix=i*n; i<blen && i<m; i++, cix+=n) {\n+ if( a.isEmpty(i) ) continue;\nint apos = a.pos(i);\nint alen = a.size(i);\nint[] aix = a.indexes(i);\n@@ -1027,16 +1052,20 @@ public class LibMatrixBincell\n//allocate dense block (if necessary), incl clear nnz\nret.allocateDenseBlock(true);\n- double[] a = m1.getDenseBlockValues();\n- double[] c = ret.getDenseBlockValues();\n+ DenseBlock da = m1.getDenseBlock();\n+ DenseBlock dc = ret.getDenseBlock();\n//compute scalar operation, incl nnz maintenance\n- int limit = m1.rlen*m1.clen;\n- int nnz = 0;\n+ long nnz = 0;\n+ for( int bi=0; bi<da.numBlocks(); bi++) {\n+ double[] a = da.values(bi);\n+ double[] c = dc.values(bi);\n+ int limit = da.size(bi);\nfor( int i=0; i<limit; i++ ) {\nc[i] = op.executeScalar( a[i] );\nnnz += (c[i] != 0) ? 1 : 0;\n}\n+ }\nret.nonZeros = nnz;\n}\n@@ -1109,9 +1138,7 @@ public class LibMatrixBincell\nelse if(m1ret.sparseBlock==null)\n{\nm1ret.sparseBlock = SparseBlockFactory.createSparseBlock(rlen);\n-\n- for(int r=0; r<rlen; r++)\n- {\n+ for(int r=0; r<rlen; r++) {\nif( !b.isEmpty(r) ) {\nSparseRow tmp = new SparseRowVector( b.size(r), clen );\nappendRightForSparseBinary(op, b.values(r), b.indexes(r), b.pos(r), b.size(r), 0, r, m1ret);\n@@ -1143,8 +1170,7 @@ public class LibMatrixBincell\nelse //one side dense\n{\nfor(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble thisvalue = m1ret.quickGetValue(r, c);\ndouble thatvalue = m2.quickGetValue(r, c);\ndouble resultvalue = op.fn.execute(thisvalue, thatvalue);\n@@ -1161,13 +1187,10 @@ public class LibMatrixBincell\nif( atype == BinaryAccessType.MATRIX_COL_VECTOR ) //MATRIX - COL_VECTOR\n{\n- for(int r=0; r<rlen; r++)\n- {\n+ for(int r=0; r<rlen; r++) {\n//replicated value\ndouble v2 = m2.quickGetValue(r, 0);\n-\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1ret.quickGetValue(r, c);\ndouble v = op.fn.execute( v1, v2 );\nm1ret.quickSetValue(r, c, v);\n@@ -1177,8 +1200,7 @@ public class LibMatrixBincell\nelse if( atype == BinaryAccessType.MATRIX_ROW_VECTOR ) //MATRIX - ROW_VECTOR\n{\nfor(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1ret.quickGetValue(r, c);\ndouble v2 = m2.quickGetValue(0, c); //replicated value\ndouble v = op.fn.execute( v1, v2 );\n@@ -1188,8 +1210,7 @@ public class LibMatrixBincell\nelse // MATRIX - MATRIX\n{\nfor(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n+ for(int c=0; c<clen; c++) {\ndouble v1 = m1ret.quickGetValue(r, c);\ndouble v2 = m2.quickGetValue(r, c);\ndouble v = op.fn.execute( v1, v2 );\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1094,10 +1094,10 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nfinal int n = clen;\n//allocate target in memory-efficient CSR format\n- int nnz = (int) nonZeros;\n+ int lnnz = (int) nonZeros;\nint[] rptr = new int[m+1];\n- int[] indexes = new int[nnz];\n- double[] values = new double[nnz];\n+ int[] indexes = new int[lnnz];\n+ double[] values = new double[lnnz];\nfor( int i=0, pos=0, aix=0; i<m; i++ ) {\nfor(int j=0; j<n; j++, aix++) {\ndouble aval = a[aix];\n@@ -1110,10 +1110,9 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nrptr[i+1]=pos;\n}\nsparseBlock = new SparseBlockCSR(\n- rptr, indexes, values, nnz);\n+ rptr, indexes, values, lnnz);\n//update nnz and cleanup dense block\n- nonZeros = nnz;\ndenseBlock = null;\n}\n@@ -2663,7 +2662,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nreturn;\nfinal int m = rlen;\n- final int n = clen;\nif( sparse && ret.sparse ) //SPARSE <- SPARSE\n{\n@@ -2712,16 +2710,20 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n{\n//allocate dense output block\nret.allocateDenseBlock();\n- double[] a = getDenseBlockValues();\n- double[] c = ret.getDenseBlockValues();\n- int len = m * n;\n+ DenseBlock da = getDenseBlock();\n+ DenseBlock dc = ret.getDenseBlock();\n//unary op, incl nnz maintenance\n- int nnz = 0;\n+ long nnz = 0;\n+ for( int bi=0; bi<da.numBlocks(); bi++ ) {\n+ double[] a = da.values(bi);\n+ double[] c = dc.values(bi);\n+ int len = da.size(bi);\nfor( int i=0; i<len; i++ ) {\nc[i] = op.fn.execute(a[i]);\nnnz += (c[i] != 0) ? 1 : 0;\n}\n+ }\nret.nonZeros = nnz;\n}\n}\n@@ -2749,113 +2751,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nsparseUnaryOperations(op, ret);\n}\n- @Override\n- public void unaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException\n- {\n- if(op.sparseSafe)\n- sparseUnaryOperationsInPlace(op);\n- else\n- denseUnaryOperationsInPlace(op);\n- }\n-\n- /**\n- * only apply to non zero cells\n- *\n- * @param op unary operator\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n- */\n- private void sparseUnaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException\n- {\n- //early abort possible since sparse-safe\n- if( isEmptyBlock(false) )\n- return;\n-\n- if(sparse)\n- {\n- nonZeros=0;\n- for(int r=0; r<Math.min(rlen, sparseBlock.numRows()); r++)\n- {\n- if(sparseBlock.isEmpty(r))\n- continue;\n-\n- int apos = sparseBlock.pos(r);\n- int alen = sparseBlock.size(r);\n- int[] aix = sparseBlock.indexes(r);\n- double[] avals = sparseBlock.values(r);\n-\n- int pos=0;\n- for(int i=apos; i<apos+alen; i++)\n- {\n- double v=op.fn.execute(avals[i]);\n- if(v!=0) {\n- avals[pos]=v;\n- aix[pos]=aix[i];\n- pos++;\n- nonZeros++;\n- }\n- }\n- //TODO perf sparse block: truncate replaced by deleteIndexrange\n- sparseBlock.deleteIndexRange(r, pos, clen);\n- }\n-\n- }\n- else\n- {\n- double[] a = getDenseBlockValues();\n- int limit = rlen*clen;\n- long nnz = 0;\n- for(int i=0; i<limit; i++) {\n- a[i] = op.fn.execute(a[i]);\n- if(a[i] != 0)\n- nnz++;\n- }\n- nonZeros = nnz;\n- }\n- }\n-\n- private void denseUnaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException\n- {\n- if( sparse ) //SPARSE MATRIX\n- {\n- double v;\n- for(int r=0; r<rlen; r++)\n- for(int c=0; c<clen; c++)\n- {\n- v=op.fn.execute(quickGetValue(r, c));\n- quickSetValue(r, c, v);\n- }\n- }\n- else//DENSE MATRIX\n- {\n- //early abort not possible because not sparsesafe\n- if(denseBlock==null)\n- allocateDenseBlock();\n-\n- //compute values in-place and update nnz\n- double[] a = getDenseBlockValues();\n- final int limit = rlen*clen;\n- int lnnz = 0;\n- for( int i=0; i<limit; i++ ) {\n- a[i] = op.fn.execute(a[i]);\n- if( a[i]!=0 )\n- lnnz++;\n- }\n- nonZeros = lnnz;\n-\n- //IBM JVM bug (JDK6) causes crash for certain inputs (w/ infinities)\n- //nonZeros = 0;\n- //for(int i=0; i<limit; i++)\n- //{\n- // denseBlock[i]=op.fn.execute(denseBlock[i]);\n- // if(denseBlock[i]!=0)\n- // nonZeros++;\n- //}\n- }\n- }\n-\n@Override\npublic MatrixValue binaryOperations(BinaryOperator op, MatrixValue thatValue, MatrixValue result)\nthrows DMLRuntimeException\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixCell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixCell.java", "diff": "@@ -266,12 +266,6 @@ public class MatrixCell extends MatrixValue implements WritableComparable, Seria\nreturn c3;\n}\n- @Override\n- public void unaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException {\n- value=op.fn.execute(value);\n- }\n-\npublic int compareTo(MatrixCell o) {\nreturn Double.compare(this.value, o.value);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixValue.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixValue.java", "diff": "@@ -41,7 +41,6 @@ import org.apache.sysml.runtime.util.UtilFunctions;\n@SuppressWarnings(\"rawtypes\")\npublic abstract class MatrixValue implements WritableComparable\n{\n-\nstatic public class CellIndex {\npublic int row;\npublic int column;\n@@ -153,8 +152,6 @@ public abstract class MatrixValue implements WritableComparable\npublic abstract MatrixValue unaryOperations(UnaryOperator op, MatrixValue result)\nthrows DMLRuntimeException;\n- public abstract void unaryOperationsInPlace(UnaryOperator op) throws DMLRuntimeException;\n-\npublic abstract void incrementalAggregate(AggregateOperator aggOp, MatrixValue correction,\nMatrixValue newWithCorrection) throws DMLRuntimeException;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/WeightedCell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/WeightedCell.java", "diff": "@@ -174,10 +174,4 @@ public class WeightedCell extends MatrixCell\nc3.setWeight(weight);\nreturn c3;\n}\n-\n- @Override\n- public void unaryOperationsInPlace(UnaryOperator op)\n- throws DMLRuntimeException {\n- value=op.fn.execute(value);\n- }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Large dense matrix blocks in unary/binary operations This patch extends all unary, binary, and scalar operations to support large dense matrix blocks >16GB. Additionally, this also includes minor extensions of the new dense block abstraction and a cleanup of the matrix value abstraction.
49,738
18.12.2017 00:37:17
-3,600
ce8ee96dca2dfe65f4f91e39ab2988a86e2af3a9
[HOTFIX][SYSTEMML-1883] Fix parsing new xor builtin function
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java", "diff": "@@ -1735,6 +1735,8 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbifop = Expression.BuiltinFunctionOp.SAMPLE;\nelse if ( functionName.equals(\"outer\") )\nbifop = Expression.BuiltinFunctionOp.OUTER;\n+ else if ( functionName.equals(\"xor\") )\n+ bifop = Expression.BuiltinFunctionOp.XOR;\nelse\nreturn null;\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1883] Fix parsing new xor builtin function
49,737
18.12.2017 11:48:38
-3,600
660ba763074083cf63b876c42d4d00fe52620636
strip for new line Closes 706
[ { "change_type": "MODIFY", "old_path": "scripts/perftest/python/utils_exec.py", "new_path": "scripts/perftest/python/utils_exec.py", "diff": "@@ -101,7 +101,7 @@ def parse_hdfs_base(std_outs):\nhdfs_uri = None\nfor line in std_outs:\nif line.startswith('hdfs://'):\n- hdfs_uri = line\n+ hdfs_uri = line.strip()\nif hdfs_uri is None:\nsys.exit('HDFS URI not found')\nreturn hdfs_uri\n@@ -160,7 +160,7 @@ def parse_hdfs_paths(std_outs):\nif 'No such file or directory' in i:\nbreak\nelif 'hdfs' in i:\n- current_dir = i.split(' ')[-1]\n+ current_dir = i.split(' ')[-1].strip()\nhdfs_dir.append(current_dir)\nreturn hdfs_dir\n" } ]
Java
Apache License 2.0
apache/systemds
strip for new line Closes 706
49,738
24.12.2017 12:06:22
-3,600
8d80041211919b60417d773a53580cc82dcc7f70
Fix ultra-sparse matrix block merge COO format This patch fixes the the matrix block merge primitive - as used for matrix reblock - for ultra-sparse blocks in COO format. Furthermore, this also includes a set of tests for all combinations of sparse formats and different sparsity configurations.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -534,6 +534,10 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nreturn sparseBlock;\n}\n+ public void setSparseBlock(SparseBlock sblock) {\n+ sparseBlock = sblock;\n+ }\n+\npublic Iterator<IJV> getSparseBlockIterator() {\n//check for valid format, should have been checked from outside\nif( !sparse )\n@@ -1722,17 +1726,19 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nprivate void mergeIntoSparse(MatrixBlock that, boolean appendOnly)\n{\n+ SparseBlock a = sparseBlock;\n+ final boolean COO = (a instanceof SparseBlockCOO);\n+ final int m = rlen;\n+ final int n = clen;\n+\nif( that.sparse ) //SPARSE <- SPARSE\n{\n- SparseBlock a = sparseBlock;\nSparseBlock b = that.sparseBlock;\n- int m = rlen;\nfor( int i=0; i<m; i++ )\n{\n- if( !b.isEmpty(i) )\n- {\n- if( a.isEmpty(i) ) {\n+ if( b.isEmpty(i) ) continue;\n+ if( !COO && a.isEmpty(i) ) {\n//copy entire sparse row (no sort required)\na.set(i, b.get(i), true);\n}\n@@ -1755,16 +1761,11 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n}\n- }\nelse //SPARSE <- DENSE\n{\n- SparseBlock a = sparseBlock;\ndouble[] b = that.getDenseBlockValues();\n- int m = rlen;\n- int n = clen;\n- for( int i=0, bix=0; i<m; i++, bix+=n )\n- {\n+ for( int i=0, bix=0; i<m; i++, bix+=n ) {\nboolean appended = false;\nfor( int j=0; j<n; j++ ) {\nif( b[bix+j] != 0 ) {\n@@ -1773,10 +1774,14 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n//only sort if value appended\n- if( !appendOnly && appended )\n+ if( !COO && !appendOnly && appended )\na.sort(i);\n}\n}\n+\n+ //full sort of coordinate blocks\n+ if( COO && !appendOnly )\n+ a.sort();\n}\n////////\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCOO.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCOO.java", "diff": "@@ -319,12 +319,16 @@ public class SparseBlockCOO extends SparseBlock\nint alen = row.size();\nint[] aix = row.indexes();\ndouble[] avals = row.values();\n+ //delete existing values in range if necessary\ndeleteIndexRange(r, aix[0], aix[alen-1]+1);\n+ //prepare free space (allocate and shift)\n+ int lsize = _size+alen;\n+ if( _values.length < lsize )\n+ resize(lsize);\nshiftRightByN(pos, alen);\nArrays.fill(_rindexes, pos, pos+alen, r);\nSystem.arraycopy(aix, 0, _cindexes, pos, alen);\nSystem.arraycopy(avals, 0, _values, pos, alen);\n- _size+=alen;\n}\n@Override\n@@ -393,7 +397,8 @@ public class SparseBlockCOO extends SparseBlock\nint r = _rindexes[index];\nint len = 0;\nwhile( r == _rindexes[index] ) {\n- len ++; index ++;\n+ len ++;\n+ index ++;\n}\nSortUtils.sortByIndex(index-len, index, _cindexes, _values);\n}\n@@ -530,7 +535,7 @@ public class SparseBlockCOO extends SparseBlock\nprivate void resize() {\n//compute new size\n- double tmpCap = _values.length * RESIZE_FACTOR1;\n+ double tmpCap = Math.ceil(_values.length * RESIZE_FACTOR1);\nint newCap = (int)Math.min(tmpCap, Integer.MAX_VALUE);\nresize(newCap);\n@@ -545,7 +550,7 @@ public class SparseBlockCOO extends SparseBlock\nprivate void resizeAndInsert(int ix, int r, int c, double v) {\n//compute new size\n- double tmpCap = _values.length * RESIZE_FACTOR1;\n+ double tmpCap = Math.ceil(_values.length * RESIZE_FACTOR1);\nint newCap = (int)Math.min(tmpCap, Integer.MAX_VALUE);\nint[] oldrindexes = _rindexes;\n@@ -588,8 +593,7 @@ public class SparseBlockCOO extends SparseBlock\n_size--;\n}\n- private void shiftRightByN(int ix, int n)\n- {\n+ private void shiftRightByN(int ix, int n) {\n//overlapping array copy (shift rhs values right by 1)\nSystem.arraycopy(_rindexes, ix, _rindexes, ix+n, _size-ix);\nSystem.arraycopy(_cindexes, ix, _cindexes, ix+n, _size-ix);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockMerge.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.sparse;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.SparseBlock;\n+import org.apache.sysml.runtime.matrix.data.SparseBlockFactory;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class SparseBlockMerge extends AutomatedTestBase\n+{\n+ private final static int rows = 1000;\n+ private final static int cols = 1000;\n+ private final static double sparsity1 = 0.001;\n+ private final static double sparsity2 = 0.01;\n+ private final static double sparsity3 = 0.1;\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_MCSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_MCSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_MCSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_CSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_CSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_CSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_COO_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_COO_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeMCSR_COO_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_CSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_CSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_CSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_MCSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_MCSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_MCSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_COO_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_COO_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCSR_COO_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_COO_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_COO_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_COO_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_MCSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_MCSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_MCSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity3);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_CSR_1() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.CSR, sparsity1);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_CSR_2() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.CSR, sparsity2);\n+ }\n+\n+ @Test\n+ public void testMergeCOO_CSR_3() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.CSR, sparsity3);\n+ }\n+\n+ private void runSparseBlockMergeTest( SparseBlock.Type btype1, SparseBlock.Type btype2, double sparsity)\n+ {\n+ try\n+ {\n+ //data generation\n+ double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 1234);\n+ double[][] B1 = new double[A.length][];\n+ double[][] B2 = new double[A.length][];\n+ for(int i=0; i<A.length; i++) {\n+ B1[i] = new double[A[i].length];\n+ B2[i] = new double[A[2].length];\n+ for(int j=0; j<A[i].length; j++) {\n+ if( j%2 == 0 )\n+ B1[i][j] = A[i][j];\n+ else\n+ B2[i][j] = A[i][j];\n+ }\n+ }\n+\n+ //init sparse block\n+ MatrixBlock mb1 = DataConverter.convertToMatrixBlock(B1);\n+ MatrixBlock mb2 = DataConverter.convertToMatrixBlock(B2);\n+ long nnz = mb1.getNonZeros() + mb2.getNonZeros();\n+ mb1.setSparseBlock(SparseBlockFactory.copySparseBlock(btype1, mb1.getSparseBlock(), false));\n+ mb2.setSparseBlock(SparseBlockFactory.copySparseBlock(btype2, mb2.getSparseBlock(), false));\n+\n+ //execute merge\n+ mb1.merge(mb2, false);\n+\n+ //check for correct number of non-zeros\n+ if( nnz != mb1.getNonZeros() )\n+ Assert.fail(\"Wrong number of non-zeros: \"+mb1.getNonZeros()+\", expected: \"+nnz);\n+\n+ //check correct values\n+ long count = 0;\n+ SparseBlock sblock = mb1.getSparseBlock();\n+ for( int i=0; i<rows; i++) {\n+ if( sblock.isEmpty(i) ) continue;\n+ int alen = sblock.size(i);\n+ int apos = sblock.pos(i);\n+ int[] aix = sblock.indexes(i);\n+ double[] avals = sblock.values(i);\n+ for( int j=0; j<alen; j++ ) {\n+ if( avals[apos+j] != A[i][aix[apos+j]] )\n+ Assert.fail(\"Wrong value returned by scan: \"+avals[apos+j]+\", expected: \"+A[i][apos+aix[j]]);\n+ count++;\n+ }\n+ }\n+ if( count != nnz )\n+ Assert.fail(\"Wrong number of values returned by merge: \"+count+\", expected: \"+nnz);\n+ }\n+ catch(Exception ex) {\n+ ex.printStackTrace();\n+ throw new RuntimeException(ex);\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/sparse/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/sparse/ZPackageSuite.java", "diff": "@@ -34,6 +34,7 @@ import org.junit.runners.Suite;\nSparseBlockIndexRange.class,\nSparseBlockIterator.class,\nSparseBlockMemEstimate.class,\n+ SparseBlockMerge.class,\nSparseBlockScan.class,\nSparseBlockSize.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2051] Fix ultra-sparse matrix block merge COO format This patch fixes the the matrix block merge primitive - as used for matrix reblock - for ultra-sparse blocks in COO format. Furthermore, this also includes a set of tests for all combinations of sparse formats and different sparsity configurations.
49,698
24.12.2017 06:13:53
28,800
581077712cb1749db67859d4c8a9332ce95c5384
[MINOR] New tests for cholesky builtin function, incl. related fixes Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveUnnecessaryCheckpoints.java", "new_path": "src/main/java/org/apache/sysml/hops/ipa/IPAPassRemoveUnnecessaryCheckpoints.java", "diff": "@@ -215,10 +215,9 @@ public class IPAPassRemoveUnnecessaryCheckpoints extends IPAPass\n{\nList<StatementBlock> sbs = dmlp.getStatementBlocks();\n- if( sbs.size()==1 & !(sbs.get(0) instanceof IfStatementBlock\n+ if (sbs.size() == 1 && !(sbs.get(0) instanceof IfStatementBlock\n|| sbs.get(0) instanceof WhileStatementBlock\n- || sbs.get(0) instanceof ForStatementBlock) )\n- {\n+ || sbs.get(0) instanceof ForStatementBlock)) {\n//recursively process all dag roots\nif (sbs.get(0).getHops() != null) {\nHop.resetVisitStatus(sbs.get(0).getHops());\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/BuiltinUnaryCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/BuiltinUnaryCPInstruction.java", "diff": "@@ -27,14 +27,14 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.functionobjects.Builtin;\nimport org.apache.sysml.runtime.functionobjects.ValueFunction;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.matrix.data.LibCommonsMath;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\nimport org.apache.sysml.runtime.matrix.operators.SimpleOperator;\nimport org.apache.sysml.runtime.matrix.operators.UnaryOperator;\npublic abstract class BuiltinUnaryCPInstruction extends UnaryCPInstruction {\n- protected BuiltinUnaryCPInstruction(Operator op, CPOperand in, CPOperand out, String opcode,\n- String istr) {\n+ protected BuiltinUnaryCPInstruction(Operator op, CPOperand in, CPOperand out, String opcode, String istr) {\nsuper(CPType.BuiltinUnary, op, in, out, opcode, istr);\n}\n@@ -49,8 +49,7 @@ public abstract class BuiltinUnaryCPInstruction extends UnaryCPInstruction {\nValueFunction func = null;\n//print or stop or cumulative aggregates\n- if( parts.length==4 )\n- {\n+ if( parts.length==4 ) {\nopcode = parts[0];\nin.split(parts[1]);\nout.split(parts[2]);\n@@ -61,15 +60,15 @@ public abstract class BuiltinUnaryCPInstruction extends UnaryCPInstruction {\nelse\nreturn new ScalarBuiltinCPInstruction(new SimpleOperator(func), in, out, opcode, str);\n}\n- else //2+1, general case\n- {\n+ else { //2+1, general case\nopcode = parseUnaryInstruction(str, in, out);\nfunc = Builtin.getBuiltinFnObject(opcode);\nif(in.getDataType() == DataType.SCALAR)\nreturn new ScalarBuiltinCPInstruction(new SimpleOperator(func), in, out, opcode, str);\nelse if(in.getDataType() == DataType.MATRIX)\n- return new MatrixBuiltinCPInstruction(new UnaryOperator(func), in, out, opcode, str);\n+ return new MatrixBuiltinCPInstruction(LibCommonsMath.isSupportedUnaryOperation(opcode) ?\n+ null : new UnaryOperator(func), in, out, opcode, str);\n}\nreturn null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/MatrixBuiltinCPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/MatrixBuiltinCPInstruction.java", "diff": "@@ -35,7 +35,6 @@ public class MatrixBuiltinCPInstruction extends BuiltinUnaryCPInstruction {\npublic void processInstruction(ExecutionContext ec)\nthrows DMLRuntimeException\n{\n- UnaryOperator u_op = (UnaryOperator) _optr;\nString output_name = output.getName();\nString opcode = getOpcode();\n@@ -44,6 +43,7 @@ public class MatrixBuiltinCPInstruction extends BuiltinUnaryCPInstruction {\nec.setMatrixOutput(output_name, retBlock, getExtendedOpcode());\n}\nelse {\n+ UnaryOperator u_op = (UnaryOperator) _optr;\nMatrixBlock inBlock = ec.getMatrixInput(input1.getName(), getExtendedOpcode());\nMatrixBlock retBlock = (MatrixBlock) (inBlock.unaryOperations(u_op, new MatrixBlock()));\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibCommonsMath.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibCommonsMath.java", "diff": "@@ -281,7 +281,8 @@ public class LibCommonsMath\nif ( !in.isSquare() )\nthrow new DMLRuntimeException(\"Input to cholesky() must be square matrix -- given: a \" + in.getRowDimension() + \"x\" + in.getColumnDimension() + \" matrix.\");\n- CholeskyDecomposition cholesky = new CholeskyDecomposition(in);\n+ CholeskyDecomposition cholesky = new CholeskyDecomposition(in, 1e-14,\n+ CholeskyDecomposition.DEFAULT_ABSOLUTE_POSITIVITY_THRESHOLD);\nRealMatrix rmL = cholesky.getL();\nreturn DataConverter.convertToMatrixBlock(rmL.getData());\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/unary/matrix/CholeskyTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.unary.matrix;\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.junit.Assert;\n+import org.junit.Test;\n+\n+/**\n+ * Tests for the Cholesky matrix factorization\n+ * Note:\n+ * 1. Only tested for dense configuration of matrices (small, & large)\n+ * 2. The remaining tests for matrix dim check, positive definiteness\n+ * already tested at commons-math.\n+ */\n+\n+public class CholeskyTest extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME = \"cholesky\";\n+ private final static String TEST_DIR = \"functions/unary/matrix/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + CholeskyTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows1 = 500;\n+ private final static int rows2 = 2500;\n+ private final static int cols1 = 500;\n+ private final static int cols2 = 2500;\n+ private final static double sparsity = 0.9;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(\n+ TEST_NAME,\n+ new TestConfiguration(TEST_CLASS_DIR, TEST_NAME,\n+ new String[] { \"D\" }) );\n+ }\n+\n+ @Test\n+ public void testCholeskyDenseCP() {\n+ runTestCholesky( rows1, cols1, DMLScript.RUNTIME_PLATFORM.SINGLE_NODE );\n+ }\n+\n+ @Test\n+ public void testCholeskyDenseSP() {\n+ runTestCholesky( rows1, cols1, RUNTIME_PLATFORM.SPARK );\n+ }\n+\n+ @Test\n+ public void testCholeskyDenseMR() {\n+ runTestCholesky( rows1, cols1, RUNTIME_PLATFORM.HADOOP );\n+ }\n+\n+ @Test\n+ public void testCholeskyDenseHybrid() {\n+ runTestCholesky( rows1, cols1, RUNTIME_PLATFORM.HYBRID );\n+ }\n+\n+ @Test\n+ public void testLargeCholeskyDenseCP() {\n+ runTestCholesky( rows2, cols2, RUNTIME_PLATFORM.SINGLE_NODE );\n+ }\n+\n+ @Test\n+ public void testLargeCholeskyDenseSP() {\n+ runTestCholesky( rows2, cols2, RUNTIME_PLATFORM.SPARK );\n+ }\n+\n+ @Test\n+ public void testLargeCholeskyDenseMR() {\n+ runTestCholesky( rows2, cols2, RUNTIME_PLATFORM.HADOOP );\n+ }\n+\n+ @Test\n+ public void testLargeCholeskyDenseHybrid() {\n+ runTestCholesky( rows2, cols2, RUNTIME_PLATFORM.HYBRID );\n+ }\n+\n+ private void runTestCholesky( int rows, int cols, RUNTIME_PLATFORM rt) {\n+ RUNTIME_PLATFORM rtold = rtplatform;\n+ rtplatform = rt;\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try {\n+ getAndLoadTestConfiguration(TEST_NAME);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME+ TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-args\", input(\"A\"), output(\"D\") };\n+\n+ double[][] A = getRandomMatrix(rows, cols, 0, 1, sparsity, 10);\n+ MatrixCharacteristics mc = new MatrixCharacteristics(rows, cols, -1, -1, -1);\n+ writeInputMatrixWithMTD(\"A\", A, false, mc);\n+\n+ //run tests and compare results\n+ runTest(true, false, null, -1);\n+ Assert.assertEquals(0, readDMLMatrixFromHDFS(\"D\")\n+ .get(new CellIndex(1,1)), 1e-5);\n+ }\n+ finally {\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ rtplatform = rtold;\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/unary/matrix/cholesky.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * DML script to test Cholesky Factorization (cholesky), as a Builtin function.\n+ *\n+ * Reference:\n+ * Theorem 4.2.7, page No. 163\n+ * \"Matrix Computations\" by Golub & Van Loan (4th Edition)\n+ *\n+ * Cholesky Factorization:\n+ * If A belongs to R(nxn), is symmetric positive definite,\n+ * then there exists a unique lower triangular G belongs to R(nxn), with positive\n+ * diagonal entries such that.\n+ * A = G %*% t(G);\n+ *\n+ */\n+\n+\n+X = read($1);\n+\n+#1. generate a symmetric positive definite matrix\n+A = X %*% t(X);\n+\n+#2. obtain unique lower triangular matrix, G\n+G = cholesky(A);\n+\n+#3. Recompute the positive definite matrix\n+B = G %*% t(G);\n+D = as.matrix(sum(A-B));\n+\n+write(D, $2);\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/matrix/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/unary/matrix/ZPackageSuite.java", "diff": "@@ -32,6 +32,7 @@ import org.junit.runners.Suite;\nATanTest.class,\nCastAsScalarTest.class,\nCosTest.class,\n+ CholeskyTest.class,\nDiagTest.class,\nEigenFactorizeTest.class,\nFullCummaxTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] New tests for cholesky builtin function, incl. related fixes Closes #710.
49,738
25.12.2017 10:14:33
-3,600
38332acde74cb235cf22397981cd9aa6f5598c9b
Fix sort of sparse blocks in COO format There was an issue with sorting sparse blocks in COO format if the underlying allocated arrays match exactly the logical size. This patch fixes this issue and makes minor cleanups of the matrix block sparse merge primitive, which applies to all sparse block formats.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -1756,7 +1756,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n//only sort if value appended\n- if( !appendOnly && appended )\n+ if( !COO && !appendOnly && appended )\na.sort(i);\n}\n}\n@@ -1769,7 +1769,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nboolean appended = false;\nfor( int j=0; j<n; j++ ) {\nif( b[bix+j] != 0 ) {\n- appendValue(i, j, b[bix+j]);\n+ appendValue(i, j, b[bix+j]); //incl alloc\nappended = true;\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCOO.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCOO.java", "diff": "@@ -396,7 +396,7 @@ public class SparseBlockCOO extends SparseBlock\nwhile( index < _size ) {\nint r = _rindexes[index];\nint len = 0;\n- while( r == _rindexes[index] ) {\n+ while( index < _size && r == _rindexes[index] ) {\nlen ++;\nindex ++;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockMerge.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/sparse/SparseBlockMerge.java", "diff": "@@ -32,6 +32,7 @@ public class SparseBlockMerge extends AutomatedTestBase\n{\nprivate final static int rows = 1000;\nprivate final static int cols = 1000;\n+ private final static double sparsity0 = 0.000005;\nprivate final static double sparsity1 = 0.001;\nprivate final static double sparsity2 = 0.01;\nprivate final static double sparsity3 = 0.1;\n@@ -41,6 +42,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nTestUtils.clearAssertionInformation();\n}\n+ @Test\n+ public void testMergeMCSR_MCSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeMCSR_MCSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity1);\n@@ -56,6 +62,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.MCSR, sparsity3);\n}\n+ @Test\n+ public void testMergeMCSR_CSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeMCSR_CSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity1);\n@@ -71,6 +82,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.CSR, sparsity3);\n}\n+ @Test\n+ public void testMergeMCSR_COO_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity0);\n+ }\n+\n@Test\npublic void testMergeMCSR_COO_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity1);\n@@ -86,6 +102,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.MCSR, SparseBlock.Type.COO, sparsity3);\n}\n+ @Test\n+ public void testMergeCSR_CSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCSR_CSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity1);\n@@ -101,6 +122,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.CSR, sparsity3);\n}\n+ @Test\n+ public void testMergeCSR_MCSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCSR_MCSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity1);\n@@ -116,6 +142,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.MCSR, sparsity3);\n}\n+ @Test\n+ public void testMergeCSR_COO_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCSR_COO_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity1);\n@@ -131,6 +162,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.CSR, SparseBlock.Type.COO, sparsity3);\n}\n+ @Test\n+ public void testMergeCOO_COO_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCOO_COO_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity1);\n@@ -146,6 +182,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.COO, sparsity3);\n}\n+ @Test\n+ public void testMergeCOO_MCSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCOO_MCSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity1);\n@@ -161,6 +202,11 @@ public class SparseBlockMerge extends AutomatedTestBase\nrunSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.MCSR, sparsity3);\n}\n+ @Test\n+ public void testMergeCOO_CSR_0() {\n+ runSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.CSR, sparsity0);\n+ }\n+\n@Test\npublic void testMergeCOO_CSR_1() {\nrunSparseBlockMergeTest(SparseBlock.Type.COO, SparseBlock.Type.CSR, sparsity1);\n@@ -212,6 +258,7 @@ public class SparseBlockMerge extends AutomatedTestBase\n//check correct values\nlong count = 0;\nSparseBlock sblock = mb1.getSparseBlock();\n+ if( sblock != null ) {\nfor( int i=0; i<rows; i++) {\nif( sblock.isEmpty(i) ) continue;\nint alen = sblock.size(i);\n@@ -224,6 +271,7 @@ public class SparseBlockMerge extends AutomatedTestBase\ncount++;\n}\n}\n+ }\nif( count != nnz )\nAssert.fail(\"Wrong number of values returned by merge: \"+count+\", expected: \"+nnz);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2051] Fix sort of sparse blocks in COO format There was an issue with sorting sparse blocks in COO format if the underlying allocated arrays match exactly the logical size. This patch fixes this issue and makes minor cleanups of the matrix block sparse merge primitive, which applies to all sparse block formats.
49,738
30.12.2017 12:06:59
28,800
b152d73b378055cc5e52ac954b0edd98f1254be8
Fix right indexing / recompute nnz large dense blocks This patch fixes two special cases related to large dense block, specifically column right indexing and the recomputation of non-zeros.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "@@ -22,6 +22,8 @@ package org.apache.sysml.runtime.matrix.data;\nimport java.util.Arrays;\n+import org.apache.sysml.runtime.util.UtilFunctions;\n+\npublic class DenseBlockDRB extends DenseBlock\n{\nprivate static final long serialVersionUID = 8546723684649816489L;\n@@ -114,13 +116,11 @@ public class DenseBlockDRB extends DenseBlock\npublic long countNonZeros(int rl, int ru, int cl, int cu) {\nlong nnz = 0;\nif( cl == 0 && cu == clen ) { //specific case: all cols\n- for( int i=rl*clen; i<ru*clen; i++ )\n- nnz += (data[i]!=0) ? 1 : 0;\n+ nnz += UtilFunctions.computeNnz(data, rl*clen, (ru-rl)*clen);\n}\nelse {\nfor( int i=rl, ix=rl*clen; i<ru; i++, ix+=clen )\n- for( int j=cl; j<cu; j++ )\n- nnz += (data[ix+j]!=0) ? 1 : 0;\n+ nnz += UtilFunctions.computeNnz(data, ix+cl, cu-cl);\n}\nreturn nnz;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -22,6 +22,8 @@ package org.apache.sysml.runtime.matrix.data;\nimport java.util.Arrays;\n+import org.apache.sysml.runtime.util.UtilFunctions;\n+\npublic class DenseBlockLDRB extends DenseBlock\n{\nprivate static final long serialVersionUID = -7285459683402612969L;\n@@ -129,12 +131,17 @@ public class DenseBlockLDRB extends DenseBlock\n@Override\npublic long countNonZeros(int rl, int ru, int cl, int cu) {\nlong nnz = 0;\n- for(int bi=index(rl); bi<index(ru); bi++) {\n- double[] a = data[bi];\n- int blen = blockSize(bi);\n- for(int i=pos(bi), ix=pos(bi)*clen; i<blen; i++, ix+=clen)\n- for( int j=cl; j<cu; j++ )\n- nnz += (a[ix+j]!=0) ? 1 : 0;\n+ boolean rowBlock = (cl == 0 && cu == clen);\n+ final int bil = index(rl);\n+ final int biu = index(ru-1);\n+ for(int bi=bil; bi<=biu; bi++) {\n+ int lpos = (bi==bil) ? pos(rl) : 0;\n+ int len = (bi==biu) ? pos(ru-1)-lpos+clen : blockSize(bi)*clen;\n+ if( rowBlock )\n+ nnz += UtilFunctions.computeNnz(data[bi], lpos, len);\n+ else\n+ for(int i=lpos; i<lpos+len; i+=clen)\n+ nnz += UtilFunctions.computeNnz(data[i], i+cl, cu-cl);\n}\nreturn nnz;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -3755,7 +3755,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nDenseBlock a = getDenseBlock();\ndouble[] c = dest.getDenseBlockValues();\nfor( int i=rl; i<=ru; i++ )\n- c[i] = a.get(i, cl);\n+ c[i-rl] = a.get(i, cl);\n}\n}\nelse { // GENERAL RANGE INDEXING\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Fix right indexing / recompute nnz large dense blocks This patch fixes two special cases related to large dense block, specifically column right indexing and the recomputation of non-zeros.
49,738
01.01.2018 19:28:49
28,800
6c4cc17006864bfb7dc3d4d4df8325bbee314735
[MINOR] Cleanup spark codegen instructions (number of partitions)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SpoofSPInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/SpoofSPInstruction.java", "diff": "@@ -144,13 +144,10 @@ public class SpoofSPInstruction extends SPInstruction {\nif( (op.getCellType()==CellType.ROW_AGG && mcIn.getCols() > mcIn.getColsPerBlock())\n|| (op.getCellType()==CellType.COL_AGG && mcIn.getRows() > mcIn.getRowsPerBlock())) {\n- //TODO investigate if some other side effect of correct blocks\nlong numBlocks = (op.getCellType()==CellType.ROW_AGG ) ?\nmcIn.getNumRowBlocks() : mcIn.getNumColBlocks();\n- if( out.partitions().size() > numBlocks )\n- out = RDDAggregateUtils.aggByKeyStable(out, aggop, (int)numBlocks, false);\n- else\n- out = RDDAggregateUtils.aggByKeyStable(out, aggop, false);\n+ out = RDDAggregateUtils.aggByKeyStable(out, aggop,\n+ (int)Math.min(out.getNumPartitions(), numBlocks), false);\n}\nsec.setRDDHandleForVariable(_out.getName(), out);\n@@ -189,11 +186,9 @@ public class SpoofSPInstruction extends SPInstruction {\nout = in.mapPartitionsToPair(new OuterProductFunction(\n_class.getName(), _classBytes, bcVect2, bcMatrices, scalars), true);\nif(type == OutProdType.LEFT_OUTER_PRODUCT || type == OutProdType.RIGHT_OUTER_PRODUCT ) {\n- //TODO investigate if some other side effect of correct blocks\n- if( in.partitions().size() > mcOut.getNumRowBlocks()*mcOut.getNumColBlocks() )\n- out = RDDAggregateUtils.sumByKeyStable(out, (int)(mcOut.getNumRowBlocks()*mcOut.getNumColBlocks()), false);\n- else\n- out = RDDAggregateUtils.sumByKeyStable(out, false);\n+ long numBlocks = mcOut.getNumRowBlocks() * mcOut.getNumColBlocks();\n+ out = RDDAggregateUtils.sumByKeyStable(out,\n+ (int)Math.min(out.getNumPartitions(), numBlocks), false);\n}\nsec.setRDDHandleForVariable(_out.getName(), out);\n@@ -231,13 +226,9 @@ public class SpoofSPInstruction extends SPInstruction {\nelse //row-agg or no-agg\n{\nif( op.getRowType()==RowType.ROW_AGG && mcIn.getCols() > mcIn.getColsPerBlock() ) {\n- //TODO investigate if some other side effect of correct blocks\n- if( out.partitions().size() > mcIn.getNumRowBlocks() )\n- out = RDDAggregateUtils.sumByKeyStable(out, (int)mcIn.getNumRowBlocks(), false);\n- else\n- out = RDDAggregateUtils.sumByKeyStable(out, false);\n+ out = RDDAggregateUtils.sumByKeyStable(out,\n+ (int)Math.min(out.getNumPartitions(), mcIn.getNumRowBlocks()), false);\n}\n-\nsec.setRDDHandleForVariable(_out.getName(), out);\n//maintain lineage info and output characteristics\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup spark codegen instructions (number of partitions)
49,738
03.01.2018 19:07:15
-3,600
5f9266a4db4cdd76219b78470f941312c6ee1d03
[HOTFIX] Fix spark/mr reverse operations (correct left indexing)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -277,7 +277,8 @@ public class LibMatrixReorg\nMatrixIndexes outix1 = new MatrixIndexes(blkix1, inix.getColumnIndex());\nMatrixBlock outblk1 = new MatrixBlock(blklen1, inblk.getNumColumns(), inblk.isInSparseFormat());\nMatrixBlock tmp1 = tmpblk.sliceOperations(0, iposCut, 0, tmpblk.getNumColumns()-1, new MatrixBlock());\n- outblk1.leftIndexingOperations(tmp1, ipos1, outblk1.getNumRows()-1, 0, tmpblk.getNumColumns()-1, outblk1, UpdateType.INPLACE_PINNED);\n+ outblk1.leftIndexingOperations(tmp1, ipos1, ipos1+tmp1.getNumRows()-1,\n+ 0, tmpblk.getNumColumns()-1, outblk1, UpdateType.INPLACE_PINNED);\nout.add(new IndexedMatrixValue(outix1, outblk1));\n//slice second block (if necessary)\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix spark/mr reverse operations (correct left indexing)
49,738
03.01.2018 20:41:10
28,800
cf200e1ddaec050195cf9fb6f1f4c63fdc5973f8
Fix integer overflow in large dense blocks This patch fixes an integer overflow in obtaining the current capacity of a large dense block, which had particularly ugly consequences because each check for allocated blocks lead to a reallocation of the underlying arrays (due to the incorrectly computed size).
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -111,7 +111,7 @@ public class DenseBlockLDRB extends DenseBlock\n@Override\npublic long capacity() {\n- int len = 0;\n+ long len = 0;\nfor(int i=0; i<numBlocks(); i++)\nlen += data[i].length;\nreturn len;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2045] Fix integer overflow in large dense blocks This patch fixes an integer overflow in obtaining the current capacity of a large dense block, which had particularly ugly consequences because each check for allocated blocks lead to a reallocation of the underlying arrays (due to the incorrectly computed size).
49,738
03.01.2018 21:57:26
28,800
d596d2214b6e745ff61ab41178b5e5ff8cfacb54
Parallel array allocation for large dense blocks This patch modifies the new large dense block to allocate and initialize its underlying arrays in a multi-threaded manner. On a scenario of reading a 4M x 1k (32GB) binary block matrix from HDFS, this patch improved the end-to-end read time from 23.1s to 20.8s.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "package org.apache.sysml.runtime.matrix.data;\nimport java.util.Arrays;\n+import java.util.stream.IntStream;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -28,6 +29,8 @@ public class DenseBlockLDRB extends DenseBlock\n{\nprivate static final long serialVersionUID = -7285459683402612969L;\n+ private static final boolean PARALLEL_ALLOC = true;\n+\nprivate double[][] data;\nprivate int rlen;\nprivate int clen;\n@@ -56,6 +59,7 @@ public class DenseBlockLDRB extends DenseBlock\nreset(rlen, clen, blen, v);\n}\n+ @SuppressWarnings(\"resource\")\nprivate void reset(int rlen, int clen, int blen, double v) {\nlong llen = (long) rlen * clen;\nint numPart = (int)Math.ceil((double)rlen / blen);\n@@ -67,18 +71,25 @@ public class DenseBlockLDRB extends DenseBlock\n}\nelse {\ndata = new double[numPart][];\n- for(int i=0; i<numPart; i++) {\n- int lrlen = (int)(Math.min((i+1)*blen,rlen)-i*blen);\n- data[i] = new double[lrlen*clen];\n- if( v != 0 )\n- Arrays.fill(data[i], v);\n- }\n+ IntStream range = PARALLEL_ALLOC ?\n+ IntStream.range(0, numPart).parallel() :\n+ IntStream.range(0, numPart);\n+ range.forEach(i ->\n+ data[i] = allocArray(i, rlen, clen, blen, v));\n}\nthis.rlen = rlen;\nthis.clen = clen;\nthis.blen = blen;\n}\n+ private static double[] allocArray(int i, int rlen, int clen, int blen, double v) {\n+ int lrlen = (int)(Math.min((i+1)*blen,rlen)-i*blen);\n+ double[] ret = new double[lrlen*clen];\n+ if( v != 0 )\n+ Arrays.fill(ret, v);\n+ return ret;\n+ }\n+\n@Override\npublic int numRows() {\nreturn rlen;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2047] Parallel array allocation for large dense blocks This patch modifies the new large dense block to allocate and initialize its underlying arrays in a multi-threaded manner. On a scenario of reading a 4M x 1k (32GB) binary block matrix from HDFS, this patch improved the end-to-end read time from 23.1s to 20.8s.
49,738
04.01.2018 23:12:19
28,800
2a6fa063edafc38e323a3b18903bb029791fd445
[HOTFIX] Fix solve handling in consolidated binary instructions
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "diff": "@@ -79,6 +79,7 @@ import org.apache.sysml.runtime.instructions.cp.CPOperand;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction.GPUINSTRUCTION_TYPE;\nimport org.apache.sysml.runtime.instructions.mr.MRInstruction.MRType;\nimport org.apache.sysml.runtime.instructions.spark.SPInstruction.SPType;\n+import org.apache.sysml.runtime.matrix.data.LibCommonsMath;\nimport org.apache.sysml.runtime.matrix.operators.AggregateOperator;\nimport org.apache.sysml.runtime.matrix.operators.AggregateTernaryOperator;\nimport org.apache.sysml.runtime.matrix.operators.AggregateUnaryOperator;\n@@ -493,6 +494,8 @@ public class InstructionUtils\npublic static Operator parseBinaryOrBuiltinOperator(String opcode, CPOperand in1, CPOperand in2)\nthrows DMLRuntimeException\n{\n+ if( LibCommonsMath.isSupportedMatrixMatrixOperation(opcode) )\n+ return null;\nboolean matrixScalar = (in1.getDataType() != in2.getDataType());\nreturn Builtin.isBuiltinFnObject(opcode) ?\n(matrixScalar ? new RightScalarOperator( Builtin.getBuiltinFnObject(opcode), 0) :\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix solve handling in consolidated binary instructions
49,738
05.01.2018 11:02:57
28,800
1cc9a5e9a5b8d06369461361d66a78f76e548d95
[HOTFIX] Fix max/min spark binary builtin instructions parsing
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/SPInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/SPInstructionParser.java", "diff": "@@ -198,6 +198,12 @@ public class SPInstructionParser extends InstructionParser\nString2SPInstructionType.put( \"map||\" , SPType.Binary);\nString2SPInstructionType.put( \"mapxor\" , SPType.Binary);\n+ // Builtin Instruction Opcodes\n+ String2SPInstructionType.put( \"max\" , SPType.Binary);\n+ String2SPInstructionType.put( \"min\" , SPType.Binary);\n+ String2SPInstructionType.put( \"mapmax\" , SPType.Binary);\n+ String2SPInstructionType.put( \"mapmin\" , SPType.Binary);\n+\n// REBLOCK Instruction Opcodes\nString2SPInstructionType.put( \"rblk\" , SPType.Reblock);\nString2SPInstructionType.put( \"csvrblk\", SPType.CSVReblock);\n@@ -210,12 +216,6 @@ public class SPInstructionParser extends InstructionParser\nString2SPInstructionType.put( \"log\" , SPType.Builtin);\nString2SPInstructionType.put( \"log_nz\" , SPType.Builtin);\n- // Boolean Binary builtin\n- String2SPInstructionType.put( \"max\" , SPType.Builtin);\n- String2SPInstructionType.put( \"min\" , SPType.Builtin);\n- String2SPInstructionType.put( \"mapmax\" , SPType.Builtin);\n- String2SPInstructionType.put( \"mapmin\" , SPType.Builtin);\n-\nString2SPInstructionType.put( \"exp\" , SPType.Unary);\nString2SPInstructionType.put( \"abs\" , SPType.Unary);\nString2SPInstructionType.put( \"sin\" , SPType.Unary);\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix max/min spark binary builtin instructions parsing
49,738
05.01.2018 16:28:58
28,800
7cb956676d6e5f1b0ef5c3095c241149df18c1d7
Improved parfor optimize and initialize overhead This patch improves the parfor optimization and initialization overhead by (1) removed unnecessary inefficiencies from the creation of parfor optimizer trees, and (2) parallel creation of local parfor workers, which also creates deep copies for update-in-place variables.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -29,6 +29,7 @@ import java.util.Collection;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\n+import java.util.stream.IntStream;\nimport org.apache.hadoop.fs.FileSystem;\nimport org.apache.hadoop.fs.Path;\n@@ -750,17 +751,16 @@ public class ParForProgramBlock extends ForProgramBlock\ntry\n{\n- // Step 1) init parallel workers, task queue and threads\n+ // Step 1) create task queue and init workers in parallel\n+ // (including preparation of update-in-place variables)\nLocalTaskQueue<Task> queue = new LocalTaskQueue<>();\nThread[] threads = new Thread[_numThreads];\nLocalParWorker[] workers = new LocalParWorker[_numThreads];\n- for( int i=0; i<_numThreads; i++ ) {\n- //create parallel workers as (lazy) deep copies\n- //including preparation of update-in-place variables\n+ IntStream.range(0, _numThreads).parallel().forEach(i -> {\nworkers[i] = createParallelWorker( _pwIDs[i], queue, ec, i);\nthreads[i] = new Thread( workers[i] );\nthreads[i].setPriority(Thread.MAX_PRIORITY);\n- }\n+ });\n// start threads (from now on waiting for tasks)\nfor( Thread thread : threads )\n@@ -1346,10 +1346,8 @@ public class ParForProgramBlock extends ForProgramBlock\n* @param ec execution context\n* @param index the index of the worker\n* @return local parworker\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\nprivate LocalParWorker createParallelWorker(long pwID, LocalTaskQueue<Task> queue, ExecutionContext ec, int index)\n- throws DMLRuntimeException\n{\nLocalParWorker pw = null;\n@@ -1393,7 +1391,7 @@ public class ParForProgramBlock extends ForProgramBlock\npw.setFunctionNames(fnNames);\n}\ncatch(Exception ex) {\n- throw new DMLRuntimeException(ex);\n+ throw new RuntimeException(ex);\n}\nreturn pw;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptNode.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptNode.java", "diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.runtime.controlprogram.parfor.opt;\nimport java.util.ArrayList;\nimport java.util.Collection;\nimport java.util.HashMap;\n+import java.util.Iterator;\nimport java.util.Set;\nimport org.apache.commons.lang.ArrayUtils;\n@@ -92,9 +93,6 @@ public class OptNode\nprivate int _k = -1;\nprivate HashMap<ParamType,String> _params = null;\n- //node statistics (only present for physical plans and leaf nodes)\n- private OptNodeStatistics _stats = null;\n-\n//line numbers (for explain)\nprivate int _beginLine = -1;\nprivate int _endLine = -1;\n@@ -204,14 +202,6 @@ public class OptNode\n_k = k;\n}\n- public OptNodeStatistics getStatistics() {\n- return _stats;\n- }\n-\n- public void setStatistics(OptNodeStatistics stats) {\n- _stats = stats;\n- }\n-\npublic boolean exchangeChild(OptNode oldNode, OptNode newNode) {\nif( isLeaf() )\nreturn false;\n@@ -404,13 +394,13 @@ public class OptNode\npublic void checkAndCleanupLeafNodes() {\nif( isLeaf() )\nreturn;\n- for( int i=0; i<_childs.size(); i++ ) {\n- OptNode n = _childs.get(i);\n+ Iterator<OptNode> iter = _childs.iterator();\n+ while( iter.hasNext() ) {\n+ OptNode n = iter.next();\nn.checkAndCleanupLeafNodes();\nif( n.isLeaf() && n._ntype != NodeType.HOP && n._ntype != NodeType.INST\n&& n._ntype != NodeType.FUNCCALL ) {\n- _childs.remove(i);\n- i--;\n+ iter.remove();\n}\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptTreeConverter.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptTreeConverter.java", "diff": "@@ -286,10 +286,6 @@ public class OptTreeConverter\nthrow new DMLRuntimeException(\"Unsupported instruction type.\");\n}\n- //create statistics\n- OptNodeStatistics stats = analyzeStatistics(inst, node, vars);\n- node.setStatistics(stats);\n-\nreturn node;\n}\n@@ -465,9 +461,10 @@ public class OptTreeConverter\n//TODO remove this workaround once this information can be obtained from hops/lops compiler\nif( node.isCPOnly() ) {\n- if( containsMRJobInstruction(pb, false, false) )\n+ boolean isSparkExec = OptimizerUtils.isSparkExecutionMode();\n+ if( !isSparkExec && containsMRJobInstruction(pb, false, false) )\nnode.setExecType(ExecType.MR);\n- else if(containsMRJobInstruction(pb, false, true))\n+ else if( isSparkExec && containsMRJobInstruction(pb, false, true))\nnode.setExecType(ExecType.SPARK);\n}\n}\n@@ -630,53 +627,25 @@ public class OptTreeConverter\nelse\n{\nret = containsMRJobInstruction(pb, true, true)\n- | (inclFunctions && containsFunctionCallInstruction(pb));\n+ || (inclFunctions && containsFunctionCallInstruction(pb));\n}\nreturn ret;\n}\n- public static boolean containsMRJobInstruction( ProgramBlock pb, boolean inclCPFile, boolean inclSpark )\n- {\n+ public static boolean containsMRJobInstruction( ProgramBlock pb, boolean inclCPFile, boolean inclSpark ) {\nreturn containsMRJobInstruction(pb.getInstructions(), inclCPFile, inclSpark);\n}\n- public static boolean containsMRJobInstruction( ArrayList<Instruction> instSet, boolean inclCPFile, boolean inclSpark )\n- {\n- boolean ret = false;\n- if( instSet!=null )\n- for( Instruction inst : instSet )\n- if( inst instanceof MRJobInstruction\n+ public static boolean containsMRJobInstruction( ArrayList<Instruction> instSet, boolean inclCPFile, boolean inclSpark ) {\n+ return instSet.stream().anyMatch(inst -> inst instanceof MRJobInstruction\n|| (inclSpark && inst instanceof SPInstruction)\n- || (inclCPFile && (inst instanceof MatrixIndexingCPFileInstruction || inst instanceof ParameterizedBuiltinCPFileInstruction)))\n- {\n- ret = true;\n- break;\n+ || (inclCPFile && (inst instanceof MatrixIndexingCPFileInstruction || inst instanceof ParameterizedBuiltinCPFileInstruction)));\n}\n- return ret;\n- }\n-\n- public static boolean containsFunctionCallInstruction( ProgramBlock pb )\n- {\n- boolean ret = false;\n- for( Instruction inst : pb.getInstructions() )\n- if( inst instanceof FunctionCallCPInstruction )\n- {\n- ret = true;\n- break;\n- }\n-\n- return ret;\n- }\n-\n- private static OptNodeStatistics analyzeStatistics(Instruction inst, OptNode on, LocalVariableMap vars)\n- throws DMLRuntimeException\n- {\n- //since the performance test tool for offline profiling has been removed,\n- //we return default values\n-\n- return new OptNodeStatistics(); //default values\n+ public static boolean containsFunctionCallInstruction( ProgramBlock pb ) {\n+ return pb.getInstructions().stream()\n+ .anyMatch(inst -> inst instanceof FunctionCallCPInstruction);\n}\npublic static void replaceProgramBlock(OptNode parent, OptNode n, ProgramBlock pbOld, ProgramBlock pbNew, boolean rtMap)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionParser.java", "diff": "@@ -35,36 +35,34 @@ public class InstructionParser\nif ( str == null || str.isEmpty() )\nreturn null;\n- String execType = str.split(Instruction.OPERAND_DELIM)[0];\n- if ( execType.equalsIgnoreCase(ExecType.CP.toString())\n- || execType.equalsIgnoreCase(ExecType.CP_FILE.toString()) )\n- {\n+ ExecType et = InstructionUtils.getExecType(str);\n+ switch( et ) {\n+ case CP:\n+ case CP_FILE: {\nCPType cptype = InstructionUtils.getCPType(str);\nif( cptype == null )\nthrow new DMLRuntimeException(\"Unknown CP instruction: \" + str);\nreturn CPInstructionParser.parseSingleInstruction (cptype, str);\n}\n- else if ( execType.equalsIgnoreCase(ExecType.SPARK.toString()) )\n- {\n+ case SPARK: {\nSPType sptype = InstructionUtils.getSPType(str);\nif( sptype == null )\nthrow new DMLRuntimeException(\"Unknown SPARK instruction: \" + str);\nreturn SPInstructionParser.parseSingleInstruction (sptype, str);\n}\n- else if ( execType.equalsIgnoreCase(ExecType.GPU.toString()) )\n- {\n+ case GPU: {\nGPUINSTRUCTION_TYPE gputype = InstructionUtils.getGPUType(str);\nif( gputype == null )\nthrow new DMLRuntimeException(\"Unknown GPU instruction: \" + str);\nreturn GPUInstructionParser.parseSingleInstruction (gputype, str);\n}\n- else if ( execType.equalsIgnoreCase(\"MR\") ) {\n+ case MR: {\nMRType mrtype = InstructionUtils.getMRType(str);\nif( mrtype == null )\nthrow new DMLRuntimeException(\"Unknown MR instruction: \" + str);\nreturn MRInstructionParser.parseSingleInstruction (mrtype, str);\n}\n- else {\n+ default:\nthrow new DMLRuntimeException(\"Unknown execution type in instruction: \" + str);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "diff": "@@ -24,6 +24,7 @@ import java.util.StringTokenizer;\nimport org.apache.sysml.lops.AppendM;\nimport org.apache.sysml.lops.BinaryM;\nimport org.apache.sysml.lops.GroupedAggregateM;\n+import org.apache.sysml.lops.LopProperties.ExecType;\nimport org.apache.sysml.lops.MapMult;\nimport org.apache.sysml.lops.MapMultChain;\nimport org.apache.sysml.lops.PMMJ;\n@@ -192,8 +193,12 @@ public class InstructionUtils\nreturn ret;\n}\n- public static String getOpCode( String str )\n- {\n+ public static ExecType getExecType( String str ) {\n+ int ix = str.indexOf(Instruction.OPERAND_DELIM);\n+ return ExecType.valueOf(str.substring(0, ix));\n+ }\n+\n+ public static String getOpCode( String str ) {\nint ix1 = str.indexOf(Instruction.OPERAND_DELIM);\nint ix2 = str.indexOf(Instruction.OPERAND_DELIM, ix1+1);\nreturn str.substring(ix1+1, ix2);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2059] Improved parfor optimize and initialize overhead This patch improves the parfor optimization and initialization overhead by (1) removed unnecessary inefficiencies from the creation of parfor optimizer trees, and (2) parallel creation of local parfor workers, which also creates deep copies for update-in-place variables.
49,738
05.01.2018 19:56:05
28,800
127cc06d96557edb77767422f66c6446d2b7beee
Serialization/deserialization for large dense blocks This patch completes the handling of large dense blocks in the serialization and deserialization code paths. Furthermore, this also includes a minor extension of the dense block abstraction as well as the handling of dense blocks in block appends, and initialization.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "@@ -128,9 +128,18 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract long countNonZeros();\n+ /**\n+ * Compute the number of non-zero values for the given row,\n+ * which potentially makes a full pass over the underlying row.\n+ *\n+ * @param r row index\n+ * @return number of non-zeros\n+ */\n+ public abstract int countNonZeros(int r);\n+\n/**\n* Compute the number of non-zero values, which potentially\n- * makes a full pass over the underlying blocks.\n+ * makes a full pass over the underlying blocks in the row range.\n*\n* @param rl row lower index\n* @param ru row upper index (exclusive)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "@@ -112,6 +112,11 @@ public class DenseBlockDRB extends DenseBlock\nreturn nnz;\n}\n+ @Override\n+ public int countNonZeros(int r) {\n+ return UtilFunctions.computeNnz(data, r*clen, clen);\n+ }\n+\n@Override\npublic long countNonZeros(int rl, int ru, int cl, int cu) {\nlong nnz = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -136,6 +136,11 @@ public class DenseBlockLDRB extends DenseBlock\nreturn nnz;\n}\n+ @Override\n+ public int countNonZeros(int r) {\n+ return UtilFunctions.computeNnz(values(r), pos(r), clen);\n+ }\n+\n@Override\npublic long countNonZeros(int rl, int ru, int cl, int cu) {\nlong nnz = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -287,9 +287,9 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nallocateDenseBlock();\n//copy and compute nnz\n- double[] data = getDenseBlockValues();\n- for(int i=0, ix=0; i < r; i++, ix+=clen)\n- System.arraycopy(arr[i], 0, data, ix, arr[i].length);\n+ DenseBlock db = getDenseBlock();\n+ for(int i=0; i < r; i++)\n+ System.arraycopy(arr[i], 0, db.values(i), db.pos(i), arr[i].length);\nrecomputeNonZeros();\n}\n@@ -313,7 +313,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//allocate or resize dense block\nallocateDenseBlock();\n- //copy and compute nnz\n+ //copy and compute nnz (guaranteed single block)\nSystem.arraycopy(arr, 0, getDenseBlockValues(), 0, arr.length);\nrecomputeNonZeros();\n}\n@@ -716,45 +716,51 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nif( that.sparse ) //SPARSE <- SPARSE\n{\n- SparseBlock b = that.sparseBlock;\n+ SparseBlock a = that.sparseBlock;\n+ SparseBlock c = sparseBlock;\nfor( int i=0; i<that.rlen; i++ )\n{\n- if( b.isEmpty(i) ) continue;\n+ if( a.isEmpty(i) ) continue;\nint aix = rowoffset+i;\n//single block append (avoid re-allocations)\n- if( !sparseBlock.isAllocated(aix) && coloffset==0 ) {\n+ if( !c.isAllocated(aix) && coloffset==0 ) {\n//note: the deep copy flag is only relevant for MCSR due to\n//shallow references of b.get(i); other block formats do not\n//require a redundant copy because b.get(i) created a new row.\n- boolean ldeep = (deep && b instanceof SparseBlockMCSR);\n- sparseBlock.set(aix, b.get(i), ldeep);\n+ boolean ldeep = (deep && a instanceof SparseBlockMCSR);\n+ c.set(aix, a.get(i), ldeep);\n}\nelse { //general case\n- int pos = b.pos(i);\n- int len = b.size(i);\n- int[] ix = b.indexes(i);\n- double[] val = b.values(i);\n+ int pos = a.pos(i);\n+ int len = a.size(i);\n+ int[] ix = a.indexes(i);\n+ double[] val = a.values(i);\nif( estimatedNNzsPerRow > 0 )\n- sparseBlock.allocate(aix, Math.max(estimatedNNzsPerRow, sparseBlock.size(aix)+len), clen);\n+ c.allocate(aix, Math.max(estimatedNNzsPerRow, c.size(aix)+len), clen);\nelse\n- sparseBlock.allocate(aix, sparseBlock.size(aix)+len);\n+ c.allocate(aix, c.size(aix)+len);\nfor( int j=pos; j<pos+len; j++ )\n- sparseBlock.append(aix, coloffset+ix[j], val[j]);\n+ c.append(aix, coloffset+ix[j], val[j]);\n}\n}\n}\nelse //SPARSE <- DENSE\n{\n- double[] b = that.getDenseBlockValues();\n- final int bm = that.rlen;\n- final int bn = that.clen;\n- for( int i=0, aix=rowoffset, bix=0; i<bm; i++, aix++, bix+=bn )\n- for( int j=0; j<bn; j++ ) {\n- final double bval = b[bix+j];\n+ DenseBlock a = that.getDenseBlock();\n+ SparseBlock c = getSparseBlock();\n+ final int m2 = that.rlen;\n+ final int n2 = that.clen;\n+ for( int i=0; i<m2; i++ ) {\n+ double[] avals = a.values(i);\n+ int aix = a.pos(i);\n+ int cix = rowoffset + i;\n+ for( int j=0; j<n2; j++ ) {\n+ double bval = avals[aix+j];\nif( bval != 0 ) {\n- sparseBlock.allocate(aix, estimatedNNzsPerRow, clen);\n- sparseBlock.append(aix, coloffset+j, bval);\n+ c.allocate(cix, estimatedNNzsPerRow, clen);\n+ c.append(cix, coloffset+j, bval);\n+ }\n}\n}\n}\n@@ -1813,32 +1819,30 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nif( !allocateDenseBlock(false) ) //allocate block\ndenseBlock.reset(rlen, clen);\n- int limit = rlen*clen;\n- double[] a = getDenseBlockValues();\n- if( in instanceof MatrixBlockDataInput ) //fast deserialize\n- {\n+ DenseBlock a = getDenseBlock();\n+ long nnz = 0;\n+ if( in instanceof MatrixBlockDataInput ) { //fast deserialize\nMatrixBlockDataInput mbin = (MatrixBlockDataInput)in;\n- nonZeros = mbin.readDoubleArray(limit, a);\n+ for( int i=0; i<a.numBlocks(); i++ )\n+ nnz += mbin.readDoubleArray(a.size(i), a.valuesAt(i));\n}\n- else if( in instanceof DataInputBuffer && MRJobConfiguration.USE_BINARYBLOCK_SERIALIZATION )\n- {\n+ else if( in instanceof DataInputBuffer && MRJobConfiguration.USE_BINARYBLOCK_SERIALIZATION ) {\n//workaround because sequencefile.reader.next(key, value) does not yet support serialization framework\nDataInputBuffer din = (DataInputBuffer)in;\n- FastBufferedDataInputStream mbin = null;\n- try {\n- mbin = new FastBufferedDataInputStream(din);\n- nonZeros = mbin.readDoubleArray(limit, a);\n- }\n- finally {\n- IOUtilFunctions.closeSilently(mbin);\n+ try(FastBufferedDataInputStream mbin = new FastBufferedDataInputStream(din)) {\n+ for( int i=0; i<a.numBlocks(); i++ )\n+ nnz += mbin.readDoubleArray(a.size(i), a.valuesAt(i));\n}\n}\nelse { //default deserialize\n- long nnz = 0;\n- for( int i=0; i<limit; i++ )\n- nnz += ((a[i] = in.readDouble()) != 0) ? 1 : 0;\n- nonZeros = nnz;\n+ for( int i=0; i<rlen; i++ ) {\n+ double[] avals = a.values(i);\n+ int aix = a.pos(i);\n+ for( int j=0; j<clen; j++ )\n+ nnz += ((avals[aix+j] = in.readDouble()) != 0) ? 1 : 0;\n+ }\n}\n+ nonZeros = nnz;\n}\nprivate void readSparseBlock(DataInput in)\n@@ -1881,13 +1885,14 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nif( !allocateDenseBlock(false) ) //allocate block\ndenseBlock.reset(rlen, clen);\n- double[] a = getDenseBlockValues();\n+ DenseBlock a = getDenseBlock();\nfor(int r=0; r<rlen; r++) {\nint nr = in.readInt();\n+ double[] avals = a.values(r);\n+ int cix = a.pos(r);\nfor( int j=0; j<nr; j++ ) {\nint c = in.readInt();\n- double val = in.readDouble();\n- a[r*clen+c] = val;\n+ avals[cix+c] = in.readDouble();\n}\n}\n}\n@@ -1901,15 +1906,13 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n//adjust size and ensure reuse block is in CSR format\nallocateAndResetSparseRowsBlock(false, SparseBlock.Type.CSR);\n- if( clen > 1 ) //ULTRA-SPARSE BLOCK\n- {\n+ if( clen > 1 ) { //ULTRA-SPARSE BLOCK\n//block: read ijv-triples (ordered by row and column) via custom\n//init to avoid repeated updates of row pointers per append\nSparseBlockCSR sblockCSR = (SparseBlockCSR) sparseBlock;\nsblockCSR.initUltraSparse((int)nonZeros, in);\n}\n- else //ULTRA-SPARSE COL\n- {\n+ else { //ULTRA-SPARSE COL\n//col: read iv-pairs (should never happen since always dense)\nfor(long i=0; i<nonZeros; i++) {\nint r = in.readInt();\n@@ -1926,23 +1929,20 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nif( !allocateDenseBlock(false) ) //allocate block\ndenseBlock.reset(rlen, clen);\n- double[] a = getDenseBlockValues();\nif( clen > 1 ) { //ULTRA-SPARSE BLOCK\n//block: read ijv-triples\n+ DenseBlock a = getDenseBlock();\nfor(long i=0; i<nonZeros; i++) {\nint r = in.readInt();\nint c = in.readInt();\n- double val = in.readDouble();\n- a[r*clen+c] = val;\n+ a.set(r, c, in.readDouble());\n}\n}\nelse { //ULTRA-SPARSE COL\n//col: read iv-pairs\n- for(long i=0; i<nonZeros; i++) {\n- int r = in.readInt();\n- double val = in.readDouble();\n- a[r] = val;\n- }\n+ double[] a = getDenseBlockValues();\n+ for(long i=0; i<nonZeros; i++)\n+ a[in.readInt()] = in.readDouble();\n}\n}\n@@ -2135,13 +2135,14 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nwriteNnzInfo( out, true );\nlong wnnz = 0;\n- double[] a = getDenseBlockValues();\n- if( clen > 1 ) //ULTRA-SPARSE BLOCK\n- {\n+ if( clen > 1 ) { //ULTRA-SPARSE BLOCK\n//block: write ijv-triples\n- for(int r=0, ix=0; r<rlen; r++)\n- for(int c=0; c<clen; c++, ix++) {\n- double aval = a[ix];\n+ DenseBlock a = getDenseBlock();\n+ for( int r=0; r<rlen; r++ ) {\n+ double[] avals = a.values(r);\n+ int aix = a.pos(r);\n+ for( int c=0; c<clen; c++ ) {\n+ double aval = avals[aix+c];\nif( aval != 0 ) {\nout.writeInt(r);\nout.writeInt(c);\n@@ -2150,9 +2151,10 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\n}\n}\n- else //ULTRA-SPARSE COL\n- {\n+ }\n+ else { //ULTRA-SPARSE COL\n//col: write iv-pairs\n+ double[] a = getDenseBlockValues();\nfor(int r=0; r<rlen; r++) {\ndouble aval = a[r];\nif( aval != 0 ) {\n@@ -2175,12 +2177,13 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nout.writeByte( BlockType.SPARSE_BLOCK.ordinal() ); //block type\nwriteNnzInfo( out, false );\n- double[] a = getDenseBlockValues();\n- for(int r=0, ix=0; r<rlen; r++, ix+=clen) {\n- int nr = (int)denseBlock.countNonZeros(r, r+1, 0, clen);\n- out.writeInt(nr);\n+ DenseBlock a = getDenseBlock();\n+ for( int r=0; r<rlen; r++ ) {\n+ double[] avals = a.values(r);\n+ int aix = a.pos(r);\n+ out.writeInt(a.countNonZeros(r));\nfor( int c=0; c<clen; c++ ) {\n- double aval = a[ix+c];\n+ double aval = avals[aix+c];\nif( aval != 0 ) {\nout.writeInt(c);\nout.writeDouble(aval);\n@@ -2194,7 +2197,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n{\n//note: if ultrasparse, int always sufficient because nnz<rlen\n// where rlen is limited to integer\n-\nlong lrlen = (long)rlen;\nlong lclen = (long)clen;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Serialization/deserialization for large dense blocks This patch completes the handling of large dense blocks in the serialization and deserialization code paths. Furthermore, this also includes a minor extension of the dense block abstraction as well as the handling of dense blocks in block appends, and initialization.
49,738
06.01.2018 12:11:13
28,800
94d0a1283dbfd72f1a2570f2782337a198c0c36e
[HOTFIX] Fix parfor result merge (maintenance of output nnz)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMerge.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMerge.java", "diff": "@@ -151,7 +151,8 @@ public abstract class ResultMerge\nprotected long computeNonZeros( MatrixObject out, List<MatrixObject> in ) {\n//sum of nnz of input (worker result) - output var existing nnz\n- return -(in.size() * out.getMatrixCharacteristics().getNonZeros())\n- + in.stream().mapToLong(m -> m.getMatrixCharacteristics().getNonZeros()).sum();\n+ long outNNZ = out.getMatrixCharacteristics().getNonZeros();\n+ return outNNZ - in.size() * outNNZ + in.stream()\n+ .mapToLong(m -> m.getMatrixCharacteristics().getNonZeros()).sum();\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix parfor result merge (maintenance of output nnz)
49,738
07.01.2018 20:54:37
28,800
e9fb661731e53293444f56af7ccf15e61bfeca67
Large dense blocks in transpose reorg operations This patch adds support for large dense blocks in dense-dense, dense-sparse, and sparse-dense transpose operations. Since the general implementation against the dense block abstraction introduced ~2-5% overhead, we keep a slightly modified original version for dense-dense operations in case of single block dense matrices.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java", "diff": "@@ -745,14 +745,15 @@ public class LibMatrixReorg\nfinal int n = in.clen;\nfinal int n2 = out.clen;\n- double[] a = in.getDenseBlockValues();\n- double[] c = out.getDenseBlockValues();\n+ DenseBlock a = in.getDenseBlock();\n+ DenseBlock c = out.getDenseBlock();\nif( m==1 || n==1 ) //VECTOR TRANSPOSE\n{\n//plain memcopy, in case shallow dense copy no applied\n+ //input/output guaranteed single block\nint ix = rl+cl; int len = ru+cu-ix-1;\n- System.arraycopy(a, ix, c, ix, len);\n+ System.arraycopy(a.valuesAt(0), ix, c.valuesAt(0), ix, len);\n}\nelse //MATRIX TRANSPOSE\n{\n@@ -761,17 +762,35 @@ public class LibMatrixReorg\nfinal int blocksizeJ = 128;\n//blocked execution\n- for( int bi = rl; bi<ru; bi+=blocksizeI )\n- for( int bj = cl; bj<cu; bj+=blocksizeJ )\n- {\n+ if( a.numBlocks()==1 && c.numBlocks()==1 ) { //<16GB\n+ double[] avals = a.valuesAt(0);\n+ double[] cvals = c.valuesAt(0);\n+ for( int bi = rl; bi<ru; bi+=blocksizeI ) {\nint bimin = Math.min(bi+blocksizeI, ru);\n+ for( int bj = cl; bj<cu; bj+=blocksizeJ ) {\nint bjmin = Math.min(bj+blocksizeJ, cu);\n//core transpose operation\n- for( int i=bi; i<bimin; i++ )\n- {\n+ for( int i=bi; i<bimin; i++ ) {\nint aix = i * n + bj;\nint cix = bj * n2 + i;\n- transposeRow(a, c, aix, cix, n2, bjmin-bj);\n+ transposeRow(avals, cvals, aix, cix, n2, bjmin-bj);\n+ }\n+ }\n+ }\n+ }\n+ else { //general case > 16GB (multiple blocks)\n+ for( int bi = rl; bi<ru; bi+=blocksizeI ) {\n+ int bimin = Math.min(bi+blocksizeI, ru);\n+ for( int bj = cl; bj<cu; bj+=blocksizeJ ) {\n+ int bjmin = Math.min(bj+blocksizeJ, cu);\n+ //core transpose operation\n+ for( int i=bi; i<bimin; i++ ) {\n+ double[] avals = a.values(i);\n+ int aix = a.pos(i);\n+ for( int j=bj; j<bjmin; j++ )\n+ c.set(j, i, avals[ aix+j ]);\n+ }\n+ }\n}\n}\n}\n@@ -787,13 +806,13 @@ public class LibMatrixReorg\nfinal int n2 = out.clen;\nfinal int ennz2 = (int) (in.nonZeros/m2);\n- double[] a = in.getDenseBlockValues();\n+ DenseBlock a = in.getDenseBlock();\nSparseBlock c = out.getSparseBlock();\nif( out.rlen == 1 ) //VECTOR-VECTOR\n{\nc.allocate(0, (int)in.nonZeros);\n- c.setIndexRange(0, 0, m, a, 0, m);\n+ c.setIndexRange(0, 0, m, a.valuesAt(0), 0, m);\n}\nelse //general case: MATRIX-MATRIX\n{\n@@ -802,17 +821,19 @@ public class LibMatrixReorg\nfinal int blocksizeJ = 128;\n//blocked execution\n- for( int bi = 0; bi<m; bi+=blocksizeI )\n- for( int bj = 0; bj<n; bj+=blocksizeJ )\n- {\n+ for( int bi = 0; bi<m; bi+=blocksizeI ) {\nint bimin = Math.min(bi+blocksizeI, m);\n+ for( int bj = 0; bj<n; bj+=blocksizeJ ) {\nint bjmin = Math.min(bj+blocksizeJ, n);\n//core transpose operation\n- for( int i=bi; i<bimin; i++ )\n- for( int j=bj, aix=i*n+bj; j<bjmin; j++, aix++ )\n- {\n+ for( int i=bi; i<bimin; i++ ) {\n+ double[] avals = a.values(i);\n+ int aix = a.pos(i);\n+ for( int j=bj; j<bjmin; j++ ) {\nc.allocate(j, ennz2, n2);\n- c.append(j, i, a[aix]);\n+ c.append(j, i, avals[aix+j]);\n+ }\n+ }\n}\n}\n}\n@@ -886,10 +907,9 @@ public class LibMatrixReorg\n{\nfinal int m = in.rlen;\nfinal int n = in.clen;\n- final int n2 = out.clen;\nSparseBlock a = in.getSparseBlock();\n- double[] c = out.getDenseBlockValues();\n+ DenseBlock c = out.getDenseBlock();\nif( m==1 ) //ROW VECTOR TRANSPOSE\n{\n@@ -897,8 +917,9 @@ public class LibMatrixReorg\nint alen = a.size(0); //always pos 0\nint[] aix = a.indexes(0);\ndouble[] avals = a.values(0);\n+ double[] cvals = c.valuesAt(0);\nfor( int j=0; j<alen; j++ )\n- c[ aix[j] ] = avals[j];\n+ cvals[aix[j]] = avals[j];\n}\nelse //MATRIX TRANSPOSE\n{\n@@ -910,44 +931,35 @@ public class LibMatrixReorg\nint[] ix = new int[blocksizeI];\n//blocked execution\n- for( int bi = rl; bi<ru; bi+=blocksizeI )\n- {\n+ for( int bi = rl; bi<ru; bi+=blocksizeI ) {\nArrays.fill(ix, 0);\n- for( int bj = 0; bj<n; bj+=blocksizeJ )\n- {\nint bimin = Math.min(bi+blocksizeI, ru);\n+ for( int bj = 0; bj<n; bj+=blocksizeJ ) {\nint bjmin = Math.min(bj+blocksizeJ, n);\n-\n//core transpose operation\n- for( int i=bi, iix=0; i<bimin; i++, iix++ )\n- {\n- if( !a.isEmpty(i) ) {\n+ for( int i=bi, iix=0; i<bimin; i++, iix++ ) {\n+ if( a.isEmpty(i) ) continue;\nint apos = a.pos(i);\nint alen = a.size(i);\nint[] aix = a.indexes(i);\ndouble[] avals = a.values(i);\nint j = ix[iix]; //last block boundary\nfor( ; j<alen && aix[apos+j]<bjmin; j++ )\n- c[ aix[apos+j]*n2+i ] = avals[ apos+j ];\n+ c.set(aix[apos+j], i, avals[apos+j]);\nix[iix] = j; //keep block boundary\n}\n}\n}\n}\n}\n- }\n- static void transposeRow( double[] a, double[] c, int aix, int cix, int n2, int len )\n- {\n+ static void transposeRow( double[] a, double[] c, int aix, int cix, int n2, int len ) {\nfinal int bn = len%8;\n-\n//compute rest (not aligned to 8-blocks)\nfor( int j=0; j<bn; j++, aix++, cix+=n2 )\nc[ cix ] = a[ aix+0 ];\n-\n//unrolled 8-blocks\n- for( int j=bn; j<len; j+=8, aix+=8, cix+=8*n2 )\n- {\n+ for( int j=bn; j<len; j+=8, aix+=8, cix+=8*n2 ) {\nc[ cix + 0*n2 ] = a[ aix+0 ];\nc[ cix + 1*n2 ] = a[ aix+1 ];\nc[ cix + 2*n2 ] = a[ aix+2 ];\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Large dense blocks in transpose reorg operations This patch adds support for large dense blocks in dense-dense, dense-sparse, and sparse-dense transpose operations. Since the general implementation against the dense block abstraction introduced ~2-5% overhead, we keep a slightly modified original version for dense-dense operations in case of single block dense matrices.
49,738
09.01.2018 12:49:09
28,800
4dabd2b0f171b4005284ac98bcd67f8b511df485
[HOTFIX][SYSTEMML-2064] Fix javadoc issues of modified rand operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDatagen.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDatagen.java", "diff": "@@ -256,7 +256,6 @@ public class LibMatrixDatagen\n*\n* @param out output matrix block\n* @param rgen random matrix generator\n- * @param nnzInBlocks number of non-zeros in blocks\n* @param bigrand Well1024a pseudo-random number generator\n* @param bSeed seed for random generator\n* @param k ?\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -5343,7 +5343,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n* (-Inf,+Inf).\n*\n* @param rgen random matrix generator\n- * @param nnzInBlock number of nonzeros in block\n* @param bigrand ?\n* @param bSeed seed value\n* @return matrix block\n@@ -5371,7 +5370,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n* (-Inf,+Inf).\n*\n* @param rgen random matrix generator\n- * @param nnzInBlock number of nonzeros in block\n* @param bigrand ?\n* @param bSeed seed value\n* @param k ?\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2064] Fix javadoc issues of modified rand operations
49,738
09.01.2018 20:40:31
28,800
c9977b736b38e8c2b9ec34645f062337e1273c94
[MINOR] Cleanup tensor index computation in convolution operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNHelper.java", "diff": "@@ -38,6 +38,16 @@ import org.apache.sysml.utils.Statistics;\npublic class LibMatrixDNNHelper {\n+ protected static class CellIndex3 {\n+ public int ix1;\n+ public int ix2;\n+ public int ix3;\n+ @Override\n+ public String toString() {\n+ return \"(\"+ix1+\", \"+ix2+\", \"+ix3+\")\";\n+ }\n+ }\n+\n// *********************************** low-level runtime operator selection ***********************************************\n// *********************************** based on runtime properties (sparsity, native, etc) ********************************\n// These methods help reduce branch miss predictions and instruction-cache misses.\n@@ -275,18 +285,26 @@ public class LibMatrixDNNHelper {\n// *********************************** utility methods ******************************************************\n+ protected static CellIndex3 computeTensorIndexes(int j, int H, int W) {\n+ return computeTensorIndexes(j, H, W, new CellIndex3());\n+ }\n+\n/**\n- * Computes tensor indexes from column index such that column index is equal to ret[0]*HW + ret[1]*W + ret[2]\n+ * Computes tensor indexes from a linearized column index such that\n+ * the column index is equal to ix1*NM + ix2*M + ix3\n*\n* @param j column index\n- * @param ret tensor indexes\n- * @param H second last dimension\n- * @param W last dimension\n+ * @param N second last dimension\n+ * @param M last dimension\n+ * @param ret output object for reuse\n+ * @return tensor indexes\n*/\n- static void computeTensorIndexes(int j, int [] ret, int H, int W) {\n- ret[0] = j / (H*W);\n- ret[1] = (j - ret[0]*(H*W))/W;\n- ret[2] = j % W;\n+ protected static CellIndex3 computeTensorIndexes(int j, int N, int M, CellIndex3 ret) {\n+ int tmp = j / M;\n+ ret.ix1 = tmp / N;\n+ ret.ix2 = tmp % N;\n+ ret.ix3 = j % M;\n+ return ret;\n}\n//Split a filter of size [K, CRS] into c filters of [K, RS]\n@@ -310,23 +328,21 @@ public class LibMatrixDNNHelper {\n}\n}\nelse {\n+ SparseBlock sblock = _params.input2.sparseBlock;\n+ CellIndex3 ix = new CellIndex3();\nfor(int k = 0; k < _params.K; k++) {\n- if( !_params.input2.sparseBlock.isEmpty(k) ) {\n- int [] tensorIndexes = new int[3];\n+ if( sblock.isEmpty(k) ) continue;\n// Find maxIndex\n- int apos = _params.input2.sparseBlock.pos(k);\n- int alen = _params.input2.sparseBlock.size(k);\n- int[] aix = _params.input2.sparseBlock.indexes(k);\n- double[] avals = _params.input2.sparseBlock.values(k);\n+ int apos = sblock.pos(k);\n+ int alen = sblock.size(k);\n+ int[] aix = sblock.indexes(k);\n+ double[] avals = sblock.values(k);\nfor(int j=apos; j<apos+alen; j++) {\n- computeTensorIndexes(aix[j], tensorIndexes, _params.R, _params.S);\n- if(c != tensorIndexes[0])\n+ ix = computeTensorIndexes(aix[j], _params.R, _params.S, ix);\n+ if(c != ix.ix1)\ncontinue;\n- int r = tensorIndexes[1];\n- int s = tensorIndexes[2];\n- outputArr[k*RS + r*S + s] = avals[j];\n- nnz += outputArr[k*RS + r*S + s] != 0 ? 1 : 0;\n- }\n+ outputArr[k*RS + ix.ix2*S + ix.ix3] = avals[j];\n+ nnz += outputArr[k*RS + ix.ix2*S + ix.ix3] != 0 ? 1 : 0;\n}\n}\n}\n@@ -415,8 +431,6 @@ public class LibMatrixDNNHelper {\n* @throws DMLRuntimeException if error occurs\n*/\nstatic int getMaxIndexSparse(int p, int q, int inputOffset, int n, int c, MatrixBlock input, ConvolutionParameters params, boolean performReluBackward) throws DMLRuntimeException {\n- int [] tensorIndexes = new int[3];\n-\nint start_h = params.start_indexes_h[p];\nint end_h = params.end_indexes_h[p];\nint start_w = params.start_indexes_w[q];\n@@ -432,18 +446,19 @@ public class LibMatrixDNNHelper {\n// if start_index_h < 0 || start_index_w < 0 || end_index_h >= params.H || end_index_w >= params.W\n// input.isEmptyBlock() check is done by the caller\n- if( !input.sparseBlock.isEmpty(n) ) {\n+ CellIndex3 ix = new CellIndex3();\n+ SparseBlock sblock = input.sparseBlock;\n+ if( !sblock.isEmpty(n) ) {\n// Find maxIndex\n- int apos = input.sparseBlock.pos(n);\n- int alen = input.sparseBlock.size(n);\n- int[] aix = input.sparseBlock.indexes(n);\n- double[] avals = input.sparseBlock.values(n);\n+ int apos = sblock.pos(n);\n+ int alen = sblock.size(n);\n+ int[] aix = sblock.indexes(n);\n+ double[] avals = sblock.values(n);\nfor(int j=apos; j<apos+alen; j++) {\n- computeTensorIndexes(aix[j], tensorIndexes, params.H, params.W);\n- if(c != tensorIndexes[0])\n- continue;\n- int h = tensorIndexes[1];\n- int w = tensorIndexes[2];\n+ ix = computeTensorIndexes(aix[j], params.H, params.W, ix);\n+ if(c != ix.ix1) continue;\n+ int h = ix.ix2;\n+ int w = ix.ix3;\nif(h >= start_h && h < end_h && w >= start_w && w < end_w) {\ndouble val = performReluBackward && avals[j] < 0 ? 0 : avals[j];\nif(maxVal < val) {\n@@ -514,30 +529,25 @@ public class LibMatrixDNNHelper {\nif(!input.isEmptyBlock()) {\nint outOffset = outputN*params.C*params.H*params.W;\nint HW = params.H*params.W;\n- int [] tensorIndexes = new int[3];\n+ CellIndex3 ix = new CellIndex3();\n+ SparseBlock sblock = input.sparseBlock;\nfor(int i = 0; i < input.getNumRows(); i++) {\n- if( !input.sparseBlock.isEmpty(i) ) {\n- computeTensorIndexes(i, tensorIndexes, params.P, params.Q);\n- int p = tensorIndexes[1];\n- int q = tensorIndexes[2];\n- int tmpP = p*params.stride_h - params.pad_h;\n- int tmpQ = q*params.stride_w - params.pad_w;\n- if(tensorIndexes[0] != 0)\n- throw new DMLRuntimeException(\"Incorrect tensor indexes: \" + tensorIndexes[0] + \" != 0 <\" + p + \" \" + q + \" \" + tensorIndexes[0] + params.P + \" \" + params.Q + \">\");\n-\n- int apos = input.sparseBlock.pos(i);\n- int alen = input.sparseBlock.size(i);\n- int[] aix = input.sparseBlock.indexes(i);\n- double[] avals = input.sparseBlock.values(i);\n+ if( sblock.isEmpty(i) ) continue;\n+ ix = computeTensorIndexes(i, params.P, params.Q, ix);\n+ int tmpP = ix.ix2*params.stride_h - params.pad_h;\n+ int tmpQ = ix.ix3*params.stride_w - params.pad_w;\n+ if(ix.ix1 != 0)\n+ throw new DMLRuntimeException(\"Incorrect tensor indexes: \"+ ix + \", \" + params.P + \" \" + params.Q);\n+ int apos = sblock.pos(i);\n+ int alen = sblock.size(i);\n+ int[] aix = sblock.indexes(i);\n+ double[] avals = sblock.values(i);\nfor(int j = apos; j < apos+alen; j++) {\n- computeTensorIndexes(aix[j], tensorIndexes, params.R, params.S);\n- int c = tensorIndexes[0];\n- int r = tensorIndexes[1];\n- int s = tensorIndexes[2];\n- int h = tmpP + r;\n- int w = tmpQ + s;\n+ ix = computeTensorIndexes(aix[j], params.R, params.S, ix);\n+ int h = tmpP + ix.ix2;\n+ int w = tmpQ + ix.ix3;\nif(h >= 0 && h < params.H && w >= 0 && w < params.W) {\n- int outIndex = outOffset + c*HW + h*params.W + w;\n+ int outIndex = outOffset + ix.ix1*HW + h*params.W + w;\noutputArray[outIndex] += avals[j];\n}\n}\n@@ -545,7 +555,6 @@ public class LibMatrixDNNHelper {\n}\n}\n}\n- }\n// Converts input: PQ X CRS matrix and writes to 1 X CHW if inputN == 0\n// Or converts input: NPQ X CRS matrix and writes to N X CHW\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPoolingBackwardHelper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNPoolingBackwardHelper.java", "diff": "@@ -21,6 +21,8 @@ package org.apache.sysml.runtime.matrix.data;\nimport java.util.Arrays;\nimport java.util.concurrent.Callable;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNNHelper.CellIndex3;\n+\n/**\n* This class contains the set of operators used for performing pooling backward\n*/\n@@ -99,26 +101,24 @@ public class LibMatrixDNNPoolingBackwardHelper {\n@Override\npublic Long call() throws Exception {\n+ CellIndex3 ix = new CellIndex3();\ndouble[] out = output.getDenseBlockValues();\n+ SparseBlock sblock = dout.sparseBlock;\nfor(int n = _rl; n < _ru; n++) {\n- if( !dout.sparseBlock.isEmpty(n) ) {\n- int [] tensorIndexes = new int[3];\n- int apos = dout.sparseBlock.pos(n);\n- int alen = dout.sparseBlock.size(n);\n- int[] aix = dout.sparseBlock.indexes(n);\n- double[] avals = dout.sparseBlock.values(n);\n+ if( sblock.isEmpty(n) ) continue;\n+ int apos = sblock.pos(n);\n+ int alen = sblock.size(n);\n+ int[] aix = sblock.indexes(n);\n+ double[] avals = sblock.values(n);\nfor(int j = apos; j < apos+alen; j++) {\n- LibMatrixDNNHelper.computeTensorIndexes(aix[j], tensorIndexes, P, Q);\n- int c = tensorIndexes[0];\n- int p = tensorIndexes[1];\n- int q = tensorIndexes[2];\n- final int inputOffset = n*CHW + c*HW;\n- int maxIndex = LibMatrixDNNHelper.getMaxIndex(p, q, inputOffset, inputArray, _params, performReluBackward);\n+ ix = LibMatrixDNNHelper.computeTensorIndexes(aix[j], P, Q, ix);\n+ final int inputOffset = n*CHW + ix.ix1*HW;\n+ int maxIndex = LibMatrixDNNHelper.getMaxIndex(ix.ix2, ix.ix3,\n+ inputOffset, inputArray, _params, performReluBackward);\nif(maxIndex != -1)\nout[maxIndex] += avals[j];\n}\n}\n- }\n//thread-local nnz maintenance\nreturn output.recomputeNonZeros(_rl, _ru-1);\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRotate180Helper.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRotate180Helper.java", "diff": "*/\npackage org.apache.sysml.runtime.matrix.data;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNNHelper.CellIndex3;\n+\n/**\n* This class contains the different implementation of rotate180 operation\n*/\n@@ -90,21 +92,18 @@ public class LibMatrixDNNRotate180Helper {\nif( sblock==null || sblock.isEmpty(inputN) )\nreturn;\n+ CellIndex3 ix = new CellIndex3();\nint outputOffset = outputN*params.P*params.Q;\n- int [] tensorIndexes = new int[3];\nint apos = sblock.pos(inputN);\nint alen = sblock.size(inputN);\nint[] aix = sblock.indexes(inputN);\ndouble[] avals = sblock.values(inputN);\nfor(int j = apos; j < apos+alen; j++) {\n- LibMatrixDNNHelper.computeTensorIndexes(aix[j], tensorIndexes, params.P, params.Q);\n- int k = tensorIndexes[0];\n- int p = tensorIndexes[1];\n- int q = tensorIndexes[2];\n+ ix = LibMatrixDNNHelper.computeTensorIndexes(aix[j], params.P, params.Q, ix);\nif( trans )\n- out.appendValue(k, outputOffset + p*params.Q + q, avals[j]);\n+ out.appendValue(ix.ix1, outputOffset + ix.ix2*params.Q + ix.ix3, avals[j]);\nelse\n- out.appendValue(outputOffset + p*params.Q + q, k, avals[j]);\n+ out.appendValue(outputOffset + ix.ix2*params.Q + ix.ix3, ix.ix1, avals[j]);\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup tensor index computation in convolution operations
49,738
10.01.2018 17:12:31
28,800
0a9c91a63c4290f6edc3d8107e4338be4de679fb
Large dense blocks in cache-conscious mm operators This patch finalizes the work on supporting large dense blocks >16GB in matrix multiplication operators, handling all remaining cache-conscious implementations, which required detailed micro benchmarks to ensure this generalization did not impact performance.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlock.java", "diff": "@@ -104,6 +104,17 @@ public abstract class DenseBlock implements Serializable\n*/\npublic abstract boolean isContiguous();\n+ /**\n+ * Indicates if the dense block has a single\n+ * underlying block for the given row range.\n+ *\n+ * @param rl row lower index\n+ * @param ru row upper index (inclusive)\n+ * @return true if single block in row range\n+ */\n+ public abstract boolean isContiguous(int rl, int ru);\n+\n+\n/**\n* Get the length of the dense block as the product\n* of row and column dimensions.\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockDRB.java", "diff": "@@ -92,6 +92,11 @@ public class DenseBlockDRB extends DenseBlock\nreturn true;\n}\n+ @Override\n+ public boolean isContiguous(int rl, int ru) {\n+ return true;\n+ }\n+\n@Override\npublic long size() {\nreturn rlen * clen;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/DenseBlockLDRB.java", "diff": "@@ -115,6 +115,11 @@ public class DenseBlockLDRB extends DenseBlock\nreturn rlen <= blen;\n}\n+ @Override\n+ public boolean isContiguous(int rl, int ru) {\n+ return isContiguous() || index(rl)==index(ru);\n+ }\n+\n@Override\npublic long size() {\nreturn (long)rlen * clen;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java", "diff": "@@ -1049,33 +1049,43 @@ public class LibMatrixMult\nprivate static void matrixMultDenseDenseMMShortLHS(DenseBlock a, DenseBlock b, DenseBlock c, int m, int n, int cd, int rl, int ru)\nthrows DMLRuntimeException\n{\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] bvals = b.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//cache-conscious parallelization over rows in rhs matrix\nfinal int kn = (ru-rl)%4;\n//rest not aligned to blocks of 2 rows\n- for( int i=0, aix=0, cix=0; i<m; i++, aix+=cd, cix+=n )\n- for( int k=rl, bix=rl*n; k<rl+kn; k++, bix+=n )\n+ for( int i=0; i<m; i++ ) {\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aix = a.pos(i), cix = c.pos(i);\n+ for( int k=rl; k<rl+kn; k++ )\nif( avals[aix+k] != 0 )\n- vectMultiplyAdd(avals[aix+k], bvals, cvals, bix, cix, n);\n+ vectMultiplyAdd(avals[aix+k], b.values(k), cvals, b.pos(k), cix, n);\n+ }\nfinal int blocksizeK = 48;\nfinal int blocksizeJ = 1024;\n//blocked execution\n- for( int bk = rl+kn; bk < ru; bk+=blocksizeK )\n- for( int bj = 0, bkmin = Math.min(ru, bk+blocksizeK); bj < n; bj+=blocksizeJ ) {\n+ for( int bk = rl+kn; bk < ru; bk+=blocksizeK ) {\n+ int bkmin = Math.min(ru, bk+blocksizeK);\n+ for( int bj = 0; bj < n; bj+=blocksizeJ ) {\n//compute blocks of 4 rows in rhs w/ IKJ\nint bjlen = Math.min(n, bj+blocksizeJ)-bj;\n- for( int i=0, aix=0, cix=bj; i<m; i++, aix+=cd, cix+=n )\n- for( int k=bk, bix=bk*n+bj; k<bkmin; k+=4, bix+=4*n ) {\n+ for( int i=0; i<m; i++ ) {\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aix = a.pos(i), cix = c.pos(i, bj);\n+ if( b.isContiguous(bk, bkmin-1) ) {\n+ double[] bvals = b.values(bk);\n+ for( int k=bk, bix=b.pos(bk, bj); k<bkmin; k+=4, bix+=4*n )\nvectMultiplyAdd4(avals[aix+k], avals[aix+k+1], avals[aix+k+2], avals[aix+k+3],\nbvals, cvals, bix, bix+n, bix+2*n, bix+3*n, cix, bjlen);\n}\n+ else {\n+ for( int k=rl; k<rl+kn; k++ )\n+ if( avals[aix+k] != 0 )\n+ vectMultiplyAdd(avals[aix+k], b.values(k), cvals, b.pos(k), cix, n);\n+ }\n+ }\n+ }\n}\n}\n@@ -1095,11 +1105,6 @@ public class LibMatrixMult\nprivate static void matrixMultDenseDenseMM(DenseBlock a, DenseBlock b, DenseBlock c, int n, int cd, int rl, int ru, int cl, int cu)\nthrows DMLRuntimeException\n{\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] bvals = b.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//1) Unrolled inner loop (for better instruction-level parallelism)\n//2) Blocked execution (for less cache trashing in parallel exec)\n//3) Asymmetric block sizes (for less misses in inner loop, yet blocks in L1/L2)\n@@ -1121,14 +1126,16 @@ public class LibMatrixMult\nint bjlen = Math.min(cu, bj+blocksizeJ)-bj;\n//core sub block matrix multiplication\n- for( int i = bi; i < bimin; i++)\n- {\n- int aixi = i * cd + bk; //start index on a\n- int cixj = i * n + bj; //scan index on c\n+ for( int i = bi; i < bimin; i++) {\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aixi = a.pos(i, bk), cixj = c.pos(i, bj);\n+\n+ if( b.isContiguous(bk, bkmin-1) ) {\n+ double[] bvals = b.values(bk);\n+ int bkpos = b.pos(bk, bj);\n//determine nnz of a (for sparsity-aware skipping of rows)\n- int knnz = copyNonZeroElements(avals, aixi, bk, bj, n, ta, tbi, bklen);\n- //if( knnz > 0 ) //for skipping empty rows\n+ int knnz = copyNonZeroElements(avals, aixi, bkpos, bj, n, ta, tbi, bklen);\n//rest not aligned to blocks of 4 rows\nfinal int bn = knnz % 4;\n@@ -1144,6 +1151,14 @@ public class LibMatrixMult\ntbi[k], tbi[k+1], tbi[k+2], tbi[k+3], cixj, bjlen );\n}\n}\n+ else {\n+ for( int k = bk; k<bkmin; k++ ) {\n+ if( avals[k] != 0 )\n+ vectMultiplyAdd( avals[k], b.values(k),\n+ cvals, b.pos(k, bj), cixj, bjlen );\n+ }\n+ }\n+ }\n}\n}\n@@ -1339,35 +1354,41 @@ public class LibMatrixMult\nprivate static void matrixMultSparseDenseMMShortLHS(SparseBlock a, DenseBlock b, DenseBlock c, int n, int cd, int rl, int ru)\nthrows DMLRuntimeException\n{\n- //TODO robustness large blocks (perf critical)\n- double[] bvals = b.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\nint arlen = a.numRows();\n- for( int i=0, cix=0; i<arlen; i++, cix+=n ) {\n+ for( int i=0; i<arlen; i++ ) {\nif( a.isEmpty(i) ) continue;\nint apos = a.pos(i);\nint alen = a.size(i);\nint[] aix = a.indexes(i);\ndouble[] avals = a.values(i);\n+ double[] cvals = c.values(i);\n+ int cix = c.pos(i);\nint k1 = (rl==0) ? 0 : a.posFIndexGTE(i, rl);\nk1 = (k1>=0) ? apos+k1 : apos+alen;\nint k2 = (ru==cd) ? alen : a.posFIndexGTE(i, ru);\nk2 = (k2>=0) ? apos+k2 : apos+alen;\n+ if( b.isContiguous(aix[k1], aix[k2-1]) ) {\n+ double[] bvals = b.values(aix[k1]);\n+ int base = aix[k1]*n - b.pos(aix[k1]);\n//rest not aligned to blocks of 4 rows\nfinal int bn = (k2-k1) % 4;\nswitch( bn ){\n- case 1: vectMultiplyAdd(avals[k1], bvals, cvals, aix[k1]*n, cix, n); break;\n- case 2: vectMultiplyAdd2(avals[k1],avals[k1+1], bvals, cvals, aix[k1]*n, aix[k1+1]*n, cix, n); break;\n- case 3: vectMultiplyAdd3(avals[k1],avals[k1+1],avals[k1+2], bvals, cvals, aix[k1]*n, aix[k1+1]*n, aix[k1+2]*n, cix, n); break;\n+ case 1: vectMultiplyAdd(avals[k1], bvals, cvals, aix[k1]*n-base, cix, n); break;\n+ case 2: vectMultiplyAdd2(avals[k1],avals[k1+1], bvals, cvals, aix[k1]*n-base, aix[k1+1]*n-base, cix, n); break;\n+ case 3: vectMultiplyAdd3(avals[k1],avals[k1+1],avals[k1+2], bvals, cvals, aix[k1]*n-base, aix[k1+1]*n-base, aix[k1+2]*n-base, cix, n); break;\n}\n//compute blocks of 4 rows (core inner loop)\nfor( int k = k1+bn; k<k2; k+=4 ) {\nvectMultiplyAdd4( avals[k], avals[k+1], avals[k+2], avals[k+3], bvals, cvals,\n- aix[k]*n, aix[k+1]*n, aix[k+2]*n, aix[k+3]*n, cix, n );\n+ aix[k]*n-base, aix[k+1]*n-base, aix[k+2]*n-base, aix[k+3]*n-base, cix, n );\n+ }\n+ }\n+ else {\n+ for( int k = k1; k<k2; k++ )\n+ vectMultiplyAdd( avals[k], b.values(aix[k]), cvals, b.pos(aix[k]), cix, n );\n}\n}\n}\n@@ -1375,10 +1396,6 @@ public class LibMatrixMult\nprivate static void matrixMultSparseDenseMMSkinnyRHS(SparseBlock a, DenseBlock b, DenseBlock c, int n, int rl, int ru)\nthrows DMLRuntimeException\n{\n- //TODO robustness large blocks (perf critical)\n- double[] bvals = b.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//no blocking since b and c fit into cache anyway\nfor( int i=rl, cix=rl*n; i<ru; i++, cix+=n ) {\nif( a.isEmpty(i) ) continue;\n@@ -1386,11 +1403,13 @@ public class LibMatrixMult\nint alen = a.size(i);\nint[] aix = a.indexes(i);\ndouble[] avals = a.values(i);\n+ double[] cvals = c.values(i);\n//rest not aligned to blocks of 4 rows\n- int bn = alen%4;\n+ int bn = b.isContiguous() ? alen%4 : alen;\nfor( int k=apos; k<apos+bn; k++ )\n- vectMultiplyAdd(avals[k], bvals, cvals, aix[k]*n, cix, n);\n+ vectMultiplyAdd(avals[k], b.values(aix[k]), cvals, b.pos(aix[k]), cix, n);\n//compute blocks of 4 rows (core inner loop)\n+ double[] bvals = b.valuesAt(0); //only for contiguous\nfor( int k=apos+bn; k<apos+alen; k+=4 )\nvectMultiplyAdd4( avals[k], avals[k+1], avals[k+2], avals[k+3], bvals, cvals,\naix[k]*n, aix[k+1]*n, aix[k+2]*n, aix[k+3]*n, cix, n );\n@@ -1400,10 +1419,6 @@ public class LibMatrixMult\nprivate static void matrixMultSparseDenseMM(SparseBlock a, DenseBlock b, DenseBlock c, int n, int cd, long xsp, int rl, int ru)\nthrows DMLRuntimeException\n{\n- //TODO robustness large blocks (perf critical)\n- double[] bvals = b.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//blocksizes to fit blocks of B (dense) and several rows of A/C in common L2 cache size,\n//while blocking A/C for L1/L2 yet allowing long scans (2 pages) in the inner loop over j\n//in case of almost ultra-sparse matrices, we cannot ensure the blocking for the rhs and\n@@ -1423,19 +1438,22 @@ public class LibMatrixMult\nint bjlen = Math.min(n, bj+blocksizeJ)-bj;\n//core sub block matrix multiplication\n- for( int i=bi, cix=bi*n+bj; i<bimin; i++, cix+=n ) {\n- if( !a.isEmpty(i) ) {\n+ for( int i=bi; i<bimin; i++ ) {\n+ if( a.isEmpty(i) ) continue;\nint apos = a.pos(i);\nint alen = a.size(i);\nint[] aix = a.indexes(i);\ndouble[] avals = a.values(i);\n+ double[] cvals = c.values(i);\n+ int cix = c.pos(i, bj);\nint k = curk[i-bi] + apos;\n//rest not aligned to blocks of 4 rows\n- int bn = alen%4;\n+ int bn = b.isContiguous() ? alen%4 : alen;\nfor( ; k<apos+bn && aix[k]<bkmin; k++ )\n- vectMultiplyAdd(avals[k], bvals, cvals, aix[k]*n+bj, cix, bjlen);\n+ vectMultiplyAdd(avals[k], b.values(aix[k]), cvals, b.pos(aix[k],bj), cix, bjlen);\n//compute blocks of 4 rows (core inner loop), allowed to exceed bkmin\n+ double[] bvals = b.valuesAt(0); //only for contiguous\nfor( ; k<apos+alen && aix[k]<bkmin; k+=4 )\nvectMultiplyAdd4( avals[k], avals[k+1], avals[k+2], avals[k+3], bvals, cvals,\naix[k]*n+bj, aix[k+1]*n+bj, aix[k+2]*n+bj, aix[k+3]*n+bj, cix, bjlen );\n@@ -1447,7 +1465,6 @@ public class LibMatrixMult\n}\n}\n}\n- }\nprivate static void matrixMultSparseSparse(MatrixBlock m1, MatrixBlock m2, MatrixBlock ret, boolean pm2, int rl, int ru)\nthrows DMLRuntimeException\n@@ -1765,10 +1782,6 @@ public class LibMatrixMult\n}\nelse //MATRIX\n{\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//1) Unrolled inner loop (for better instruction-level parallelism)\n//2) Blocked execution (for less cache trashing in parallel exec)\n//3) Asymmetric block sizes (for less misses in inner loop, yet blocks in L1/L2)\n@@ -1796,8 +1809,12 @@ public class LibMatrixMult\n//core sub block matrix multiplication\nfor( int i = bi; i < bimin; i++)\n{\n- int aixi = bk*n +i; //start index on a (logical t(X))\n- int cixj = i * nx + bj; //scan index on c\n+ double[] cvals = c.values(i);\n+ int cixj = c.pos(i, bj);\n+\n+ if( a.isContiguous(bk, bkmin-1) ) {\n+ double[] avals = a.values(bk);\n+ int aixi = a.pos(bk, i);\n//determine nnz of a (for sparsity-aware skipping of rows)\nint knnz = copyNonZeroElements(avals, aixi, bk, bj, n, nx, ta, tbi, bklen);\n@@ -1816,21 +1833,31 @@ public class LibMatrixMult\ntbi[k], tbi[k+1], tbi[k+2], tbi[k+3], cixj, bjlen );\n}\n}\n+ else {\n+ for( int k = bk; k<bkmin; k++ ) {\n+ double[] avals = a.values(bk);\n+ int aix = a.pos(bk, i);\n+ if( avals[aix] != 0 )\n+ vectMultiplyAdd( avals[aix], a.values(k),\n+ cvals, a.pos(k, bj), cixj, bjlen );\n+ }\n}\n}\n}\n- else\n- {\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n- for(int k = 0, ix1 = 0; k < m; k++, ix1+=n)\n- for(int i = rl, ix3 = 0; i < ru; i++, ix3+=n) {\n- double val = avals[ ix1+i ];\n+ }\n+ }\n+ else {\n+ for( int k = 0; k < m; k++ ) {\n+ double[] avals = a.values(k);\n+ int aix = a.pos(k);\n+ for( int i = rl; i < ru; i++ ) {\n+ double[] cvals = c.values(i);\n+ int cix = c.pos(i);\n+ double val = avals[ aix+i ];\nif( val != 0 ) {\nfor(int j = i; j < n; j++) //from i due to symmetry\n- cvals[ ix3+j ] += val * avals[ ix1+j ];\n+ cvals[ cix+j ] += val * avals[ aix+j ];\n+ }\n}\n}\n}\n@@ -1846,10 +1873,6 @@ public class LibMatrixMult\n}\nelse //MATRIX\n{\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n//algorithm: scan c, foreach ci,j: scan row of a and t(a) (IJK)\n//1) Unrolled inner loop, for better ILP\n@@ -1865,27 +1888,29 @@ public class LibMatrixMult\nfor( int bj = bi, bklen = Math.min(blocksizeK, n-bk); bj<m; bj+=blocksizeIJ ) {\n//core tsmm block operation (15x15 vectors of length 1K elements)\nint bjmin = Math.min(m, bj+blocksizeIJ);\n- for(int i=bi, ix1=bi*n+bk, ix3=bi*m; i<bimin; i++, ix1+=n, ix3+=m) {\n+ for( int i=bi; i<bimin; i++ ) {\nfinal int bjmax = Math.max(i,bj); //from i due to symmetry\n- for(int j=bjmax, ix2=bjmax*n+bk; j <bjmin; j++, ix2+=n)\n- cvals[ ix3+j ] += dotProduct(avals, avals, ix1, ix2, bklen);\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aix = a.pos(i, bk), cix = c.pos(i);\n+ for(int j=bjmax; j <bjmin; j++)\n+ cvals[ cix+j ] += dotProduct(avals, a.values(j), aix, a.pos(j, bk), bklen);\n}\n}\n}\n}\nelse\n{\n- //TODO robustness large blocks (perf critical)\n- double[] avals = a.valuesAt(0);\n- double[] cvals = c.valuesAt(0);\n-\n- for(int i = rl, ix1 = 0, ix3 = 0; i < ru; i++, ix1+=n, ix3+=m)\n- for(int j = i, ix2 = i*n; j < m; j++, ix2+=n) //from i due to symmetry\n- {\n+ for( int i = rl; i < ru; i++ ) {\n+ double[] avals1 = a.values(i), cvals = c.values(i);\n+ int aix1 = a.pos(i), cix = c.pos(i);\n+ for(int j = i; j < m; j++) { //from i due to symmetry\n+ double[] avals2 = a.values(j);\n+ int aix2 = a.pos(j);\ndouble val = 0;\nfor(int k = 0; k < n; k++)\n- val += avals[ ix1+k ] * avals[ix2+k];\n- cvals[ ix3+j ] = val;\n+ val += avals1[aix1+k] * avals2[aix2+k];\n+ cvals[cix+j] = val;\n+ }\n}\n}\n}\n@@ -1899,13 +1924,9 @@ public class LibMatrixMult\nSparseBlock a = m1.sparseBlock;\nDenseBlock c = ret.getDenseBlock();\nint m = m1.rlen;\n- int n = m1.clen;\nif( leftTranspose ) // t(X)%*%X\n{\n- //TODO robustness large blocks (perf critical)\n- double[] cvals = c.valuesAt(0);\n-\n//only general case (because vectors always dense)\n//algorithm: scan rows, foreach row self join (KIJ)\nif( LOW_LEVEL_OPTIMIZATION )\n@@ -1922,10 +1943,9 @@ public class LibMatrixMult\nint len = apos + alen;\nfor(int i = rlix; i < len && aix[i]<ru; i++) {\ndouble val = avals[i];\n- if( val != 0 ) {\n- int ix2 = aix[i]*n;\n- vectMultiplyAdd(val, avals, cvals, aix, i, ix2, len-i);\n- }\n+ if( val != 0 )\n+ vectMultiplyAdd(val, avals, c.values(aix[i]),\n+ aix, i, c.pos(aix[i]), len-i);\n}\n}\n}\n@@ -1941,9 +1961,12 @@ public class LibMatrixMult\nrlix = (rlix>=0) ? apos+rlix : apos+alen;\nfor(int i = rlix; i < apos+alen && aix[i]<ru; i++) {\ndouble val = avals[i];\n- if( val != 0 )\n- for(int j = i, ix2 = aix[i]*n; j < apos+alen; j++)\n- cvals[ix2+aix[j]] += val * avals[j];\n+ if( val != 0 ) {\n+ double[] cvals = c.values(aix[i]);\n+ int cix = c.pos(aix[i]);\n+ for(int j = i; j < apos+alen; j++)\n+ cvals[cix+aix[j]] += val * avals[j];\n+ }\n}\n}\n}\n@@ -1960,14 +1983,10 @@ public class LibMatrixMult\n}\nelse //MATRIX\n{\n- //TODO robustness large blocks (perf critical)\n- double[] cvals = c.valuesAt(0);\n-\n//note: reorg to similar layout as t(X)%*%X because faster than\n//direct computation with IJK (no dependencies/branches in inner loop)\n//see preprocessMatrixMultTransposeSelf m1<-tmpBlock\nm = m1.clen;\n- n = m1.rlen;\n//algorithm: scan rows, foreach row self join (KIJ)\nif( LOW_LEVEL_OPTIMIZATION )\n@@ -1981,13 +2000,11 @@ public class LibMatrixMult\ndouble[] avals = a.values(r);\nint rlix = (rl==0) ? 0 : a.posFIndexGTE(r, rl);\nrlix = (rlix>=0) ? apos+rlix : apos+alen;\n-\nfor(int i = rlix; i < apos+alen && aix[i]<ru; i++) {\ndouble val = avals[i];\n- if( val != 0 ) {\n- int ix2 = aix[i]*m;\n- vectMultiplyAdd(val, avals, cvals, aix, i, ix2, alen-i);\n- }\n+ if( val != 0 )\n+ vectMultiplyAdd(val, avals, c.values(aix[i]),\n+ aix, i, c.pos(aix[i]), alen-i);\n}\n}\n}\n@@ -2003,9 +2020,12 @@ public class LibMatrixMult\nrlix = (rlix>=0) ? apos+rlix : apos+alen;\nfor(int i = rlix; i < apos+alen && aix[i]<ru; i++) {\ndouble val = avals[i];\n- if( val != 0 )\n- for(int j = i, ix2 = aix[i]*m; j < alen; j++)\n- cvals[ix2+aix[j]] += val * avals[j];\n+ if( val != 0 ) {\n+ double[] cvals = c.values(aix[i]);\n+ int cix = c.pos(aix[i]);\n+ for( int j = i; j < alen; j++ )\n+ cvals[cix+aix[j]] += val * avals[j];\n+ }\n}\n}\n}\n@@ -3752,13 +3772,13 @@ public class LibMatrixMult\nreturn ret;\n}\n- private static int copyNonZeroElements( double[] a, final int aixi, final int bk, final int bj, final int n, double[] tmpa, int[] tmpbi, final int bklen )\n+ private static int copyNonZeroElements( double[] a, final int aixi, final int bixk, final int bj, final int n, double[] tmpa, int[] tmpbi, final int bklen )\n{\nint knnz = 0;\nfor( int k = 0; k < bklen; k++ )\nif( a[ aixi+k ] != 0 ) {\ntmpa[ knnz ] = a[ aixi+k ];\n- tmpbi[ knnz ] = (bk+k) * n + bj; //scan index on b\n+ tmpbi[ knnz ] = bixk + k*n;\nknnz ++;\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Large dense blocks in cache-conscious mm operators This patch finalizes the work on supporting large dense blocks >16GB in matrix multiplication operators, handling all remaining cache-conscious implementations, which required detailed micro benchmarks to ensure this generalization did not impact performance.
49,738
11.01.2018 20:46:06
28,800
fe5ed59474e14214a14ce0b5dbd5f1d162821a62
[MINOR] Reduced recompilation overhead and various cleanups This minor patch reduced the recompilation overhead by (1) better memoization across size expressions of operations, and (2) the removal of unnecessary list copies on specific rewrites and operator cloning.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -1649,87 +1649,72 @@ public abstract class Hop implements ParseInfo\nreturn ret;\n}\n- public void refreshRowsParameterInformation( Hop input, LocalVariableMap vars )\n- {\n- long size = computeSizeInformation(input, vars);\n-\n//always set the computed size not just if known (positive) in order to allow\n//recompile with unknowns to reset sizes (otherwise potential for incorrect results)\n- setDim1( size );\n+\n+ public void refreshRowsParameterInformation( Hop input, LocalVariableMap vars ) {\n+ setDim1(computeSizeInformation(input, vars));\n}\n- public void refreshColsParameterInformation( Hop input, LocalVariableMap vars )\n- {\n- long size = computeSizeInformation(input, vars);\n+ public void refreshRowsParameterInformation( Hop input, LocalVariableMap vars, HashMap<Long,Long> memo ) {\n+ setDim1(computeSizeInformation(input, vars, memo));\n+ }\n- //always set the computed size not just if known (positive) in order to allow\n- //recompile with unknowns to reset sizes (otherwise potential for incorrect results)\n- setDim2( size );\n+ public void refreshColsParameterInformation( Hop input, LocalVariableMap vars ) {\n+ setDim2(computeSizeInformation(input, vars));\n}\n- public long computeSizeInformation( Hop input, LocalVariableMap vars )\n- {\n- long ret = -1;\n+ public void refreshColsParameterInformation( Hop input, LocalVariableMap vars, HashMap<Long,Long> memo ) {\n+ setDim2(computeSizeInformation(input, vars, memo));\n+ }\n- try\n+ public long computeSizeInformation( Hop input, LocalVariableMap vars ) {\n+ return computeSizeInformation(input, vars, new HashMap<Long,Long>());\n+ }\n+\n+ public long computeSizeInformation( Hop input, LocalVariableMap vars, HashMap<Long,Long> memo )\n{\n- long tmp = OptimizerUtils.rEvalSimpleLongExpression(input, new HashMap<Long,Long>(), vars);\n+ long ret = -1;\n+ try {\n+ long tmp = OptimizerUtils.rEvalSimpleLongExpression(input, memo, vars);\nif( tmp!=Long.MAX_VALUE )\nret = tmp;\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nLOG.error(\"Failed to compute size information.\", ex);\nret = -1;\n}\n-\nreturn ret;\n}\n- public double computeBoundsInformation( Hop input )\n- {\n+ public double computeBoundsInformation( Hop input ) {\ndouble ret = Double.MAX_VALUE;\n-\n- try\n- {\n+ try {\nret = OptimizerUtils.rEvalSimpleDoubleExpression(input, new HashMap<Long, Double>());\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nLOG.error(\"Failed to compute bounds information.\", ex);\nret = Double.MAX_VALUE;\n}\n-\nreturn ret;\n}\n- /**\n- * Computes bound information for sequence if possible, otherwise returns\n- * Double.MAX_VALUE\n- *\n- * @param input high-level operator\n- * @param vars local variable map\n- * @return bounds information\n- */\n- public double computeBoundsInformation( Hop input, LocalVariableMap vars )\n- {\n- double ret = Double.MAX_VALUE;\n-\n- try\n- {\n- ret = OptimizerUtils.rEvalSimpleDoubleExpression(input, new HashMap<Long, Double>(), vars);\n+ public double computeBoundsInformation( Hop input, LocalVariableMap vars ) {\n+ return computeBoundsInformation(input, vars, new HashMap<Long, Double>());\n+ }\n+ public double computeBoundsInformation( Hop input, LocalVariableMap vars, HashMap<Long, Double> memo ) {\n+ double ret = Double.MAX_VALUE;\n+ try {\n+ ret = OptimizerUtils.rEvalSimpleDoubleExpression(input, memo, vars);\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nLOG.error(\"Failed to compute bounds information.\", ex);\nret = Double.MAX_VALUE;\n}\n-\nreturn ret;\n}\n-\n/**\n* Compute worst case estimate for size expression based on worst-case\n* statistics of inputs. Limited set of supported operations in comparison\n@@ -1860,8 +1845,8 @@ public abstract class Hop implements ParseInfo\n_updateType = that._updateType;\n//no copy of lops (regenerated)\n- _parent = new ArrayList<>();\n- _input = new ArrayList<>();\n+ _parent = new ArrayList<>(_parent.size());\n+ _input = new ArrayList<>(_input.size());\n_lops = null;\n_etype = that._etype;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java", "diff": "@@ -740,31 +740,22 @@ public class Recompiler\nreturn ret;\n}\n- private static Hop rDeepCopyHopsDag( Hop hops, HashMap<Long,Hop> memo )\n+ private static Hop rDeepCopyHopsDag( Hop hop, HashMap<Long,Hop> memo )\nthrows CloneNotSupportedException\n{\n- Hop ret = memo.get(hops.getHopID());\n+ Hop ret = memo.get(hop.getHopID());\n//create clone if required\n- if( ret == null )\n- {\n- ret = (Hop) hops.clone();\n- ArrayList<Hop> tmp = new ArrayList<>();\n+ if( ret == null ) {\n+ ret = (Hop) hop.clone();\n- //create new childs\n- for( Hop in : hops.getInput() )\n- {\n- Hop newIn = rDeepCopyHopsDag(in, memo);\n- tmp.add(newIn);\n+ //create new childs and modify references\n+ for( Hop in : hop.getInput() ) {\n+ Hop tmp = rDeepCopyHopsDag(in, memo);\n+ ret.getInput().add(tmp);\n+ tmp.getParent().add(ret);\n}\n- //modify references of childs\n- for( Hop in : tmp )\n- {\n- ret.getInput().add(in);\n- in.getParent().add(ret);\n- }\n-\n- memo.put(hops.getHopID(), ret);\n+ memo.put(hop.getHopID(), ret);\n}\nreturn ret;\n@@ -1548,8 +1539,9 @@ public class Recompiler\nint ix1 = params.get(DataExpression.RAND_ROWS);\nint ix2 = params.get(DataExpression.RAND_COLS);\n//update rows/cols by evaluating simple expression of literals, nrow, ncol, scalars, binaryops\n- d.refreshRowsParameterInformation(d.getInput().get(ix1), vars);\n- d.refreshColsParameterInformation(d.getInput().get(ix2), vars);\n+ HashMap<Long, Long> memo = new HashMap<>();\n+ d.refreshRowsParameterInformation(d.getInput().get(ix1), vars, memo);\n+ d.refreshColsParameterInformation(d.getInput().get(ix2), vars, memo);\nupdatedSizeExpr = initUnknown & d.dimsKnown();\n}\nelse if ( d.getOp() == DataGenMethod.SEQ )\n@@ -1558,9 +1550,10 @@ public class Recompiler\nint ix1 = params.get(Statement.SEQ_FROM);\nint ix2 = params.get(Statement.SEQ_TO);\nint ix3 = params.get(Statement.SEQ_INCR);\n- double from = d.computeBoundsInformation(d.getInput().get(ix1), vars);\n- double to = d.computeBoundsInformation(d.getInput().get(ix2), vars);\n- double incr = d.computeBoundsInformation(d.getInput().get(ix3), vars);\n+ HashMap<Long, Double> memo = new HashMap<>();\n+ double from = d.computeBoundsInformation(d.getInput().get(ix1), vars, memo);\n+ double to = d.computeBoundsInformation(d.getInput().get(ix2), vars, memo);\n+ double incr = d.computeBoundsInformation(d.getInput().get(ix3), vars, memo);\n//special case increment\nif ( from!=Double.MAX_VALUE && to!=Double.MAX_VALUE ) {\n@@ -1584,8 +1577,9 @@ public class Recompiler\n{\nReorgOp d = (ReorgOp) hop;\nboolean initUnknown = !d.dimsKnown();\n- d.refreshRowsParameterInformation(d.getInput().get(1), vars);\n- d.refreshColsParameterInformation(d.getInput().get(2), vars);\n+ HashMap<Long, Long> memo = new HashMap<>();\n+ d.refreshRowsParameterInformation(d.getInput().get(1), vars, memo);\n+ d.refreshColsParameterInformation(d.getInput().get(2), vars, memo);\nupdatedSizeExpr = initUnknown & d.dimsKnown();\n}\n//update size expression for indexing according to symbol table entries\n@@ -1597,10 +1591,11 @@ public class Recompiler\nHop input4 = iop.getInput().get(3); //inpColL\nHop input5 = iop.getInput().get(4); //inpColU\nboolean initUnknown = !iop.dimsKnown();\n- double rl = iop.computeBoundsInformation(input2, vars);\n- double ru = iop.computeBoundsInformation(input3, vars);\n- double cl = iop.computeBoundsInformation(input4, vars);\n- double cu = iop.computeBoundsInformation(input5, vars);\n+ HashMap<Long, Double> memo = new HashMap<>();\n+ double rl = iop.computeBoundsInformation(input2, vars, memo);\n+ double ru = iop.computeBoundsInformation(input3, vars, memo);\n+ double cl = iop.computeBoundsInformation(input4, vars, memo);\n+ double cu = iop.computeBoundsInformation(input5, vars, memo);\nif( rl!=Double.MAX_VALUE && ru!=Double.MAX_VALUE )\niop.setDim1( (long)(ru-rl+1) );\nif( cl!=Double.MAX_VALUE && cu!=Double.MAX_VALUE )\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteSplitDagDataDependentOperators.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteSplitDagDataDependentOperators.java", "diff": "@@ -77,7 +77,7 @@ public class RewriteSplitDagDataDependentOperators extends StatementBlockRewrite\n//DAG splits not required for forced single node\nif( DMLScript.rtplatform == RUNTIME_PLATFORM.SINGLE_NODE\n|| !HopRewriteUtils.isLastLevelStatementBlock(sb) )\n- return new ArrayList<>(Arrays.asList(sb));\n+ return Arrays.asList(sb);\nArrayList<StatementBlock> ret = new ArrayList<>();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupUncompressed.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupUncompressed.java", "diff": "@@ -23,7 +23,6 @@ package org.apache.sysml.runtime.compress;\nimport java.io.DataInput;\nimport java.io.DataOutput;\nimport java.io.IOException;\n-import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.Iterator;\nimport java.util.List;\n@@ -128,7 +127,7 @@ public class ColGroupUncompressed extends ColGroup\n* compressed columns to subsume. Must contain at least one\n* element.\n*/\n- public ColGroupUncompressed(ArrayList<ColGroup> groupsToDecompress)\n+ public ColGroupUncompressed(List<ColGroup> groupsToDecompress)\n{\nsuper(mergeColIndices(groupsToDecompress),\ngroupsToDecompress.get(0)._numRows);\n@@ -186,7 +185,7 @@ public class ColGroupUncompressed extends ColGroup\n* UncompressedColGroup\n* @return a merged set of column indices across all those groups\n*/\n- private static int[] mergeColIndices(ArrayList<ColGroup> groupsToDecompress)\n+ private static int[] mergeColIndices(List<ColGroup> groupsToDecompress)\n{\n// Pass 1: Determine number of columns\nint sz = 0;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "diff": "@@ -90,7 +90,7 @@ public class ColumnGroupPartitionerBinPacking extends ColumnGroupPartitioner\n//create new bin at end of list\nif( !assigned ) {\n- bins.add(new ArrayList<>(Arrays.asList(items[i])));\n+ bins.add(Arrays.asList(items[i]));\nbinWeights.add(BIN_CAPACITY-itemWeights[i]);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCoder.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCoder.java", "diff": "@@ -135,8 +135,7 @@ public class PlanningCoCoder\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Cocoding: process \"+singletonGroups.length);\n- List<PlanningCoCodingGroup> workset =\n- new ArrayList<>(Arrays.asList(singletonGroups));\n+ List<PlanningCoCodingGroup> workset = Arrays.asList(singletonGroups);\n//establish memo table for extracted column groups\nPlanningMemoTable memo = new PlanningMemoTable();\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/utils/ConverterUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/utils/ConverterUtils.java", "diff": "package org.apache.sysml.runtime.compress.utils;\n-import java.util.ArrayList;\nimport java.util.Arrays;\nimport org.apache.sysml.runtime.compress.ColGroup;\n@@ -68,18 +67,9 @@ public class ConverterUtils\nreturn DataConverter.convertToDoubleVector(vector, false);\n}\n- public static MatrixBlock getUncompressedColBlock( ColGroup group )\n- {\n- MatrixBlock ret = null;\n- if( group instanceof ColGroupUncompressed ) {\n- ret = ((ColGroupUncompressed) group).getData();\n- }\n- else {\n- ArrayList<ColGroup> tmpGroup = new ArrayList<>(Arrays.asList(group));\n- ColGroupUncompressed decompressedCols = new ColGroupUncompressed(tmpGroup);\n- ret = decompressedCols.getData();\n- }\n-\n- return ret;\n+ public static MatrixBlock getUncompressedColBlock( ColGroup group ) {\n+ return (group instanceof ColGroupUncompressed) ?\n+ ((ColGroupUncompressed) group).getData() :\n+ new ColGroupUncompressed(Arrays.asList(group)).getData();\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/ParForProgramBlock.java", "diff": "@@ -29,6 +29,7 @@ import java.util.Collection;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\n+import java.util.Set;\nimport java.util.stream.IntStream;\nimport org.apache.hadoop.fs.FileSystem;\n@@ -1301,7 +1302,7 @@ public class ParForProgramBlock extends ForProgramBlock\nthrows CacheException\n{\nParForStatementBlock sb = (ParForStatementBlock)getStatementBlock();\n- HashSet<String> blacklist = new HashSet<>(Arrays.asList(blacklistNames));\n+ Set<String> blacklist = UtilFunctions.asSet(blacklistNames);\nif( LIVEVAR_AWARE_EXPORT && sb != null)\n{\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/UtilFunctions.java", "diff": "@@ -606,4 +606,12 @@ public class UtilFunctions\nret.add(element);\nreturn ret;\n}\n+\n+ @SafeVarargs\n+ public static <T> Set<T> asSet(T... inputs) {\n+ Set<T> ret = new HashSet<>();\n+ for( T element : inputs )\n+ ret.add(element);\n+ return ret;\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Reduced recompilation overhead and various cleanups This minor patch reduced the recompilation overhead by (1) better memoization across size expressions of operations, and (2) the removal of unnecessary list copies on specific rewrites and operator cloning.
49,738
12.01.2018 11:15:47
28,800
523f82fb0d3afd6279f33c3054064d06fdfa931e
[HOTFIX] Fix use of unmodifiable lists in compression planning
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/ColumnGroupPartitionerBinPacking.java", "diff": "@@ -90,7 +90,7 @@ public class ColumnGroupPartitionerBinPacking extends ColumnGroupPartitioner\n//create new bin at end of list\nif( !assigned ) {\n- bins.add(Arrays.asList(items[i]));\n+ bins.add(new ArrayList<>(Arrays.asList(items[i])));\nbinWeights.add(BIN_CAPACITY-itemWeights[i]);\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCoder.java", "new_path": "src/main/java/org/apache/sysml/runtime/compress/cocode/PlanningCoCoder.java", "diff": "@@ -135,7 +135,7 @@ public class PlanningCoCoder\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Cocoding: process \"+singletonGroups.length);\n- List<PlanningCoCodingGroup> workset = Arrays.asList(singletonGroups);\n+ List<PlanningCoCodingGroup> workset = new ArrayList<>(Arrays.asList(singletonGroups));\n//establish memo table for extracted column groups\nPlanningMemoTable memo = new PlanningMemoTable();\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix use of unmodifiable lists in compression planning
49,738
14.01.2018 15:59:05
28,800
4f1223a9a4193a036de1a9d0e7891d0aa5eb11cc
[MINOR] Improved recompilation of size expressions and checkpoints This patch improves the recompilation of size expressions with floor/ceil functions as well as the recompilation of checkpoints in parfor body programs. For example, for the nn testsuite, this reduced the number of (unnecessarily) executed spark instructions from 62 to 0.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/Hop.java", "new_path": "src/main/java/org/apache/sysml/hops/Hop.java", "diff": "@@ -956,8 +956,10 @@ public abstract class Hop implements ParseInfo\n//reset recompile flag\nif( (et == null || getExecType() == et || getExecType() == null)\n- && (reset==ResetType.RESET || (reset==ResetType.RESET_KNOWN_DIMS && dimsKnown())) )\n+ && (reset==ResetType.RESET || (reset==ResetType.RESET_KNOWN_DIMS && dimsKnown()))\n+ && !(_requiresCheckpoint && getLops() instanceof Checkpoint && !dimsKnown(true)) ) {\n_requiresRecompile = false;\n+ }\nsetVisited();\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java", "diff": "@@ -1296,6 +1296,8 @@ public class OptimizerUtils\n{\ncase SQRT: ret = Math.sqrt(lval); break;\ncase ROUND: ret = Math.round(lval); break;\n+ case CEIL: ret = Math.ceil(lval); break;\n+ case FLOOR: ret = Math.floor(lval); break;\ncase CAST_AS_BOOLEAN: ret = (lval!=0)? 1 : 0; break;\ncase CAST_AS_INT: ret = UtilFunctions.toLong(lval); break;\ncase CAST_AS_DOUBLE: ret = lval; break;\n@@ -1333,6 +1335,8 @@ public class OptimizerUtils\n{\ncase SQRT: ret = Math.sqrt(lval); break;\ncase ROUND: ret = Math.round(lval); break;\n+ case CEIL: ret = Math.ceil(lval); break;\n+ case FLOOR: ret = Math.floor(lval); break;\ncase CAST_AS_BOOLEAN: ret = (lval!=0)? 1 : 0; break;\ncase CAST_AS_INT: ret = UtilFunctions.toLong(lval); break;\ncase CAST_AS_DOUBLE: ret = lval; break;\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "diff": "@@ -36,6 +36,7 @@ import org.junit.runners.Suite;\nAlgorithmMLogreg.class,\nAlgorithmMSVM.class,\nAlgorithmPNMF.class,\n+ AlgorithmStepwiseRegression.class,\n})\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Improved recompilation of size expressions and checkpoints This patch improves the recompilation of size expressions with floor/ceil functions as well as the recompilation of checkpoints in parfor body programs. For example, for the nn testsuite, this reduced the number of (unnecessarily) executed spark instructions from 62 to 0.
49,738
15.01.2018 11:43:42
28,800
e7994b7459316ec0955b87dee9079bbf60088851
[HOTFIX][SYSTEMML-2070] Fix compressed LinregCG (R script location)
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/compress/CompressedLinregCG.java", "diff": "@@ -105,7 +105,7 @@ public class CompressedLinregCG extends AutomatedTestBase\n/* This is for running the junit test the new way, i.e., construct the arguments directly */\nString HOME1 = SCRIPT_DIR + \"functions/compress/\";\n- String HOME2 = SCRIPT_DIR + \"functions/codegen/\";\n+ String HOME2 = SCRIPT_DIR + \"functions/codegenalg/\";\nfullDMLScriptName = \"scripts/algorithms/LinearRegCG.dml\";\nprogramArgs = new String[]{ \"-explain\", \"-stats\", \"-nvargs\", \"X=\"+input(\"X\"), \"Y=\"+input(\"y\"),\n\"icpt=\"+String.valueOf(intercept), \"tol=\"+String.valueOf(epsilon),\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2070] Fix compressed LinregCG (R script location)
49,738
15.01.2018 19:45:48
28,800
185ab0e3b34ecd8e8281027839a3d53fbacb9e42
Large dense blocks in rotate180, relu, incl cleanups This patch adds support for large dense blocks in convolution rotate180, and relu backwards operations. Furthermore, this also includes some minor cleanups of unnecessary code duplication and inefficiencies.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/InstructionUtils.java", "diff": "@@ -594,10 +594,8 @@ public class InstructionUtils\n* @param opcode the opcode\n* @param arg1IsScalar ?\n* @return scalar operator\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static ScalarOperator parseScalarBinaryOperator(String opcode, boolean arg1IsScalar)\n- throws DMLRuntimeException\n{\n//for all runtimes that set constant dynamically (cp/spark)\ndouble default_constant = 0;\n@@ -612,10 +610,8 @@ public class InstructionUtils\n* @param arg1IsScalar ?\n* @param constant ?\n* @return scalar operator\n- * @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\npublic static ScalarOperator parseScalarBinaryOperator(String opcode, boolean arg1IsScalar, double constant)\n- throws DMLRuntimeException\n{\n//commutative operators\nif ( opcode.equalsIgnoreCase(\"+\") ){\n@@ -754,7 +750,7 @@ public class InstructionUtils\nreturn new LeftScalarOperator(Divide.getDivideFnObject(), constant);\n}\n- throw new DMLRuntimeException(\"Unknown binary opcode \" + opcode);\n+ throw new RuntimeException(\"Unknown binary opcode \" + opcode);\n}\npublic static BinaryOperator parseExtendedBinaryOperator(String opcode)\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRelu.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRelu.java", "diff": "@@ -23,17 +23,15 @@ import java.util.concurrent.Callable;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.instructions.InstructionUtils;\n-import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n-import org.apache.sysml.runtime.util.ConvolutionUtils;\n+import org.apache.sysml.runtime.matrix.operators.ScalarOperator;\n/**\n* This class contains the different implementation of rotate180 operation\n*/\npublic class LibMatrixDNNRelu\n{\n- private static BinaryOperator PLUS = new BinaryOperator(Plus.getPlusFnObject());\n+ private static ScalarOperator GT0 = InstructionUtils.parseScalarBinaryOperator(\">\", false, 0);\n/**\n@@ -57,33 +55,92 @@ public class LibMatrixDNNRelu\n*/\npublic static class ReluBackward implements Callable<Long>\n{\n- public int _rl; public int _ru;\n+ public final int _rl, _ru;\nprivate final ConvolutionParameters _params;\n- double [] outputArray; int numOutCols;\npublic ReluBackward(int rl, int ru, ConvolutionParameters params) {\n_rl = rl; _ru = ru;\n_params = params;\n- outputArray= params.output.getDenseBlockValues();\n- numOutCols = params.input1.getNumColumns();\n}\n@Override\npublic Long call() throws Exception {\n+ //note: X (m x n), dout (m x n) -> out (m x n)\n+ DenseBlock out = _params.output.getDenseBlock();\n+ final int n = _params.input1.getNumColumns();\nif(!_params.input1.isInSparseFormat() && !_params.input2.isInSparseFormat()) {\n- double [] inputArr = _params.input1.getDenseBlockValues();\n- double [] doutArr = _params.input2.getDenseBlockValues();\n- for(int i = _rl*numOutCols; i < _ru*numOutCols; i++) {\n- outputArray[i] = inputArr[i] > 0 ? doutArr[i] : 0;\n+ DenseBlock x = _params.input1.getDenseBlock();\n+ DenseBlock dout = _params.input2.getDenseBlock();\n+ for(int i = _rl; i < _ru; i++) {\n+ double[] xvals = x.values(i), doutvals = dout.values(i), cvals = out.values(i);\n+ int xpos = x.pos(i), doutpos = dout.pos(i), cpos = out.pos(i);\n+ for(int j=0; j<n; j++)\n+ cvals[cpos+j] = xvals[xpos+j] > 0 ? doutvals[doutpos +j] : 0;\n}\n}\nelse {\n- // Perform (X > 0)\n- ConvolutionUtils.scalarOperations(_params.input1, outputArray, _rl*numOutCols, numOutCols, _rl, _ru,\n- InstructionUtils.parseScalarBinaryOperator(\">\", false, 0));\n- // Then perform (X > 0) * dout\n- ConvolutionUtils.binaryOperationInPlace(_params.input2, outputArray, _rl*numOutCols, numOutCols, _rl, _ru, PLUS);\n+ scalarOperations(_params.input1, out, n, _rl, _ru, GT0); // (X > 0)\n+ binaryOperationInPlacePlus(_params.input2, out, n, _rl, _ru); // (X > 0) * dout\n}\nreturn 0L;\n}\n}\n+\n+ private static void scalarOperations(MatrixBlock src, DenseBlock c,\n+ int destNumCols, int src_rl, int src_ru, ScalarOperator op)\n+ throws DMLRuntimeException\n+ {\n+ if(src.isInSparseFormat()) {\n+ for(int i = src_rl; i < src_ru; i++) {\n+ if( src.getSparseBlock().isEmpty(i) ) continue;\n+ int apos = src.getSparseBlock().pos(i);\n+ int alen = src.getSparseBlock().size(i);\n+ int[] aix = src.getSparseBlock().indexes(i);\n+ double[] avals = src.getSparseBlock().values(i);\n+ double[] cvals = c.values(i);\n+ int cix = c.pos(i);\n+ for(int j = apos; j < apos+alen; j++)\n+ cvals[ cix+aix[j] ] = op.executeScalar(avals[j]);\n+ }\n+ }\n+ else {\n+ DenseBlock a = src.getDenseBlock();\n+ for(int i = src_rl; i < src_ru; i++) {\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aix = a.pos(i), cix = c.pos(i);\n+ for(int j=0; j<destNumCols; j++)\n+ cvals[cix+j] = op.executeScalar(avals[aix+j]);\n+ }\n+ }\n+ }\n+\n+ private static void binaryOperationInPlacePlus(MatrixBlock src,\n+ DenseBlock c, int destNumCols, int src_rl, int src_ru)\n+ throws DMLRuntimeException\n+ {\n+ if( src.isEmptyBlock(false) )\n+ return; //do nothing (add 0);\n+\n+ if(src.isInSparseFormat()) {\n+ for(int i = src_rl; i < src_ru; i++) {\n+ if( src.getSparseBlock().isEmpty(i) ) continue;\n+ int apos = src.getSparseBlock().pos(i);\n+ int alen = src.getSparseBlock().size(i);\n+ int[] aix = src.getSparseBlock().indexes(i);\n+ double[] avals = src.getSparseBlock().values(i);\n+ double[] cvals = c.values(i);\n+ int cix = c.pos(i);\n+ for(int j = apos; j < apos+alen; j++)\n+ cvals[ cix+aix[j] ] += avals[j];\n+ }\n+ }\n+ else { //DENSE\n+ DenseBlock a = src.getDenseBlock();\n+ for(int i = src_rl; i < src_ru; i++) {\n+ double[] avals = a.values(i), cvals = c.values(i);\n+ int aix = a.pos(i), cix = c.pos(i);\n+ for(int j=0; j<destNumCols; j++)\n+ cvals[cix+j] += avals[aix+j];\n+ }\n+ }\n+ }\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRotate180.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRotate180.java", "diff": "@@ -29,10 +29,9 @@ public class LibMatrixDNNRotate180\npublic void execute(int inputN, int outputN);\npublic static Rotate180Worker getWorker(MatrixBlock in, MatrixBlock out,\nConvolutionParameters params, boolean zeroOutSparseOutput, boolean trans) {\n- if(!in.isInSparseFormat())\n- return new DenseRotate180Worker(in, out.getDenseBlockValues(), params);\n- else\n- return new SparseRotate180Worker(in, out, params, trans);\n+ return in.isInSparseFormat() ?\n+ new SparseRotate180Worker(in, out, params, trans) :\n+ new DenseRotate180Worker(in, out, params);\n}\n}\n@@ -40,27 +39,24 @@ public class LibMatrixDNNRotate180\n* Performing dense rotate180 (general case)\n*/\nprivate static class DenseRotate180Worker implements Rotate180Worker {\n- private final double[] inputArray, outputArray;\n+ private final DenseBlock in, out;\nprivate final ConvolutionParameters params;\n- public DenseRotate180Worker(MatrixBlock input, double[] outputArray, ConvolutionParameters params) {\n- this.outputArray = outputArray;\n+ public DenseRotate180Worker(MatrixBlock input, MatrixBlock output, ConvolutionParameters params) {\n+ this.in = input.getDenseBlock();\n+ this.out = output.getDenseBlock();\nthis.params = params;\n- inputArray = input.getDenseBlockValues();\n- if(inputArray == null || outputArray == null)\n- throw new RuntimeException(\"Incorrect usage: empty inputs\");\n}\n@Override\npublic void execute(int inputN, int outputN) {\n- int outputOffset = outputN*params.K*params.P*params.Q;\n- for (int k = 0; k < params.K; k++) {\n- for (int p = 0; p < params.P; p++) {\n- for (int q = 0; q < params.Q; q++) {\n- outputArray[outputOffset + p*params.Q*params.K + q*params.K + k] =\n- inputArray[inputN*params.K*params.P*params.Q + k*params.P*params.Q + p*params.Q + q];\n- }\n- }\n- }\n+ //note: in (m x KPQ) -> out (m x KPQ)\n+ double[] avals = in.values(inputN), cvals = out.values(outputN);\n+ int aix = in.pos(inputN), cix = out.pos(outputN);\n+ int K = params.K, P = params.P, Q = params.Q;\n+ for (int k = 0; k < K; k++)\n+ for (int p = 0; p < P; p++)\n+ for (int q = 0; q < Q; q++)\n+ cvals[cix + p*Q*K + q*K + k] = avals[aix + k*P*Q + p*Q + q];\n}\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java", "new_path": "src/main/java/org/apache/sysml/runtime/util/ConvolutionUtils.java", "diff": "@@ -22,11 +22,7 @@ package org.apache.sysml.runtime.util;\nimport java.util.Arrays;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.functionobjects.Multiply;\n-import org.apache.sysml.runtime.functionobjects.Plus;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n-import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n-import org.apache.sysml.runtime.matrix.operators.ScalarOperator;\npublic class ConvolutionUtils {\n@@ -74,102 +70,6 @@ public class ConvolutionUtils {\nreturn ret;\n}\n-\n- // Performs dest[destPos...] op= thatValue[src_rl:src_ru,]\n- public static void binaryOperationInPlace(MatrixBlock src, double [] dest,\n- int destPos, int destNumCols, int src_rl, int src_ru, BinaryOperator op) throws DMLRuntimeException {\n- if(src.isInSparseFormat()) {\n- if(src.isEmptyBlock() && op.fn == Plus.getPlusFnObject()) {\n- // Do nothing: Inplace addition by zero\n- }\n- else if(src.isEmptyBlock() && op.fn == Multiply.getMultiplyFnObject()) {\n- // Inplace multiplication by zero\n- Arrays.fill(dest, destPos, destPos + (src_ru-src_rl)*destNumCols, 0);\n- }\n- else if(op.fn == Plus.getPlusFnObject()) {\n- for(int i = src_rl, cix = destPos; i < src_ru; i++, cix += destNumCols) {\n- if( !src.getSparseBlock().isEmpty(i) ) {\n- int apos = src.getSparseBlock().pos(i);\n- int alen = src.getSparseBlock().size(i);\n- int[] aix = src.getSparseBlock().indexes(i);\n- double[] avals = src.getSparseBlock().values(i);\n- for(int j = apos; j < apos+alen; j++) {\n- dest[ cix+aix[j] ] += avals[j];\n- }\n- }\n- }\n- }\n- else if(op.fn == Multiply.getMultiplyFnObject()) {\n- // Unsafe operation\n- for(int i = src_rl, cix = destPos; i < src_ru; i++, cix += destNumCols) {\n- if( !src.getSparseBlock().isEmpty(i) ) {\n- int apos = src.getSparseBlock().pos(i);\n- int alen = src.getSparseBlock().size(i);\n- int[] aix = src.getSparseBlock().indexes(i);\n- double[] avals = src.getSparseBlock().values(i);\n- int prevDestIndex = 0;\n- for(int j = apos; j < apos+alen; j++) {\n- // Multiplication by zero. Assumption: aix is sorted.\n- Arrays.fill(dest, cix+prevDestIndex, cix+aix[j], 0);\n- prevDestIndex = aix[j]+1;\n- dest[ cix+aix[j] ] *= avals[j];\n- }\n- Arrays.fill(dest, cix+prevDestIndex, cix+destNumCols, 0);\n- }\n- else {\n- Arrays.fill(dest, cix, cix + destNumCols, 0);\n- }\n- }\n- }\n- else {\n- // As operation could be safe or unsafe. This will be caught at development time.\n- throw new DMLRuntimeException(\"Unimplemented sparse operation\");\n- }\n- }\n- else {\n- double [] inputArr = src.getDenseBlockValues();\n- if(op.fn == Plus.getPlusFnObject()) {\n- for(int i = destPos; i < src_ru*destNumCols; i++) {\n- dest[i] += inputArr[i];\n- }\n- }\n- else if(op.fn == Multiply.getMultiplyFnObject()) {\n- for(int i = destPos; i < src_ru*destNumCols; i++) {\n- dest[i] *= inputArr[i];\n- }\n- }\n- else {\n- for(int i = destPos; i < src_ru*destNumCols; i++) {\n- dest[i] = op.fn.execute(dest[i], inputArr[i]);\n- }\n- }\n- }\n- }\n-\n- // Performs dest[destPos...] = src[src_rl:src_ru,] op scalar\n- public static void scalarOperations(MatrixBlock src, double [] dest,\n- int destPos, int destNumCols, int src_rl, int src_ru, ScalarOperator scalarOp) throws DMLRuntimeException {\n- if(src.isInSparseFormat()) {\n- for(int i = src_rl, cix = destPos; i < src_ru; i++, cix += destNumCols) {\n- if( !src.getSparseBlock().isEmpty(i) ) {\n- int apos = src.getSparseBlock().pos(i);\n- int alen = src.getSparseBlock().size(i);\n- int[] aix = src.getSparseBlock().indexes(i);\n- double[] avals = src.getSparseBlock().values(i);\n- for(int j = apos; j < apos+alen; j++) {\n- dest[ cix+aix[j] ] = scalarOp.executeScalar(avals[j]);\n- }\n- }\n- }\n- }\n- else {\n- double [] inputArr = src.getDenseBlockValues();\n- for(int i = destPos; i < src_ru*destNumCols; i++) {\n- dest[i] = scalarOp.executeScalar(inputArr[i]);\n- }\n- }\n- }\n-\npublic static void fillBias(MatrixBlock bias, double [] outputArray, int src_rl, int src_ru, int N, int K, int PQ) throws DMLRuntimeException {\n// bias.getNumColumns() == 1 checked outside\nif(bias.isInSparseFormat()) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2046] Large dense blocks in rotate180, relu, incl cleanups This patch adds support for large dense blocks in convolution rotate180, and relu backwards operations. Furthermore, this also includes some minor cleanups of unnecessary code duplication and inefficiencies.
49,738
16.01.2018 16:07:53
28,800
d03396f20ecf3ed46e11fb8b3b960a54d94d329a
Fix robustness toString for scalars, tests The runtime instruction for toString does not support scalars, but because the ParameterizedCPInstruction requires instruction patching, the error message is unclear. This patch makes the parser more robust to compile toString only for matrices and frames but directly convert scalars to strings.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java", "diff": "@@ -2166,21 +2166,17 @@ public class DMLTranslator\nbreak;\ncase TOSTRING:\n- currBuiltinOp = new ParameterizedBuiltinOp(\n- target.getName(), target.getDataType(),\n- target.getValueType(), ParamBuiltinOp.TOSTRING,\n- paramHops);\n+ //check for input data type and only compile toString Hop for matrices/frames,\n+ //for scalars, we compile (s + \"\") to ensure consistent string output value types\n+ currBuiltinOp = !paramHops.get(\"target\").getDataType().isScalar() ?\n+ new ParameterizedBuiltinOp(target.getName(), target.getDataType(),\n+ target.getValueType(), ParamBuiltinOp.TOSTRING, paramHops) :\n+ HopRewriteUtils.createBinary(paramHops.get(\"target\"), new LiteralOp(\"\"), OpOp2.PLUS);\nbreak;\ndefault:\n-\n- LOG.error(source.printErrorLocation() +\n- \"processParameterizedBuiltinFunctionExpression() -- Unknown operation: \"\n- + source.getOpCode());\n-\nthrow new ParseException(source.printErrorLocation() +\n- \"processParameterizedBuiltinFunctionExpression() -- Unknown operation: \"\n- + source.getOpCode());\n+ \"processParameterizedBuiltinFunctionExpression() -- Unknown operation: \" + source.getOpCode());\n}\nsetIdentifierParams(currBuiltinOp, source.getOutput());\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/InputToStringTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.jmlc;\n+\n+import org.apache.sysml.api.DMLException;\n+import org.apache.sysml.api.jmlc.Connection;\n+import org.apache.sysml.api.jmlc.PreparedScript;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.util.DataConverter;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.junit.Test;\n+\n+public class InputToStringTest extends AutomatedTestBase\n+{\n+ @Override\n+ public void setUp() { }\n+\n+ @Test\n+ public void testScalartoString() throws DMLException {\n+ try( Connection conn = new Connection() ) {\n+ PreparedScript pscript = conn.prepareScript(\n+ \"s = read(\\\"tmp\\\", data_type=\\\"scalar\\\"); print(toString(s));\",\n+ new String[]{\"s\"}, new String[]{});\n+ pscript.setScalar(\"s\", 7);\n+ pscript.executeScript();\n+ }\n+ }\n+\n+ @Test\n+ public void testMatrixtoString() throws DMLException {\n+ try( Connection conn = new Connection() ) {\n+ PreparedScript pscript = conn.prepareScript(\n+ \"m = read(\\\"tmp\\\", data_type=\\\"matrix\\\"); print(toString(m));\",\n+ new String[]{\"m\"}, new String[]{});\n+ pscript.setMatrix(\"m\", MatrixBlock.randOperations(7, 3, 1.0, 0, 1, \"uniform\", 7), false);\n+ pscript.executeScript();\n+ }\n+ }\n+\n+ @Test\n+ public void testFrametoString() throws DMLException {\n+ try( Connection conn = new Connection() ) {\n+ PreparedScript pscript = conn.prepareScript(\n+ \"f = read(\\\"tmp\\\", data_type=\\\"frame\\\"); print(toString(f));\",\n+ new String[]{\"f\"}, new String[]{});\n+ pscript.setFrame(\"f\", DataConverter.convertToFrameBlock(\n+ MatrixBlock.randOperations(7, 3, 1.0, 0, 1, \"uniform\", 7)), false);\n+ pscript.executeScript();\n+ }\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/jmlc/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/jmlc/ZPackageSuite.java", "diff": "@@ -34,6 +34,7 @@ import org.junit.runners.Suite;\nFrameLeftIndexingTest.class,\nFrameReadMetaTest.class,\nFrameTransformTest.class,\n+ InputToStringTest.class,\nJMLCClonedPreparedScriptTest.class,\nJMLCInputOutputTest.class,\nJMLCInputStreamReadTest.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1175] Fix robustness toString for scalars, tests The runtime instruction for toString does not support scalars, but because the ParameterizedCPInstruction requires instruction patching, the error message is unclear. This patch makes the parser more robust to compile toString only for matrices and frames but directly convert scalars to strings.
49,738
17.01.2018 12:21:43
28,800
5b0fb0ccf2e6413e40da81727dc0a53d24f54152
[HOTFIX][SYSTEMML-1570] Fix rewrite for relu (selp) expressions
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java", "diff": "@@ -1296,7 +1296,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nif( left2 instanceof LiteralOp &&\nHopRewriteUtils.getDoubleValue((LiteralOp)left2)==0 &&\n- left1 == right && (bleft.getOp() == OpOp2.GREATER || bleft.getOp() == OpOp2.GREATEREQUAL ) )\n+ left1 == right && (bleft.getOp() == OpOp2.GREATER ) )\n{\nBinaryOp binary = HopRewriteUtils.createBinary(right, new LiteralOp(0), OpOp2.MAX);\nHopRewriteUtils.replaceChildReference(parent, bop, binary, pos);\n@@ -1315,7 +1315,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nif( right2 instanceof LiteralOp &&\nHopRewriteUtils.getDoubleValue((LiteralOp)right2)==0 &&\n- right1 == left && bright.getOp() == OpOp2.GREATER || bright.getOp() == OpOp2.GREATEREQUAL )\n+ right1 == left && bright.getOp() == OpOp2.GREATER )\n{\nBinaryOp binary = HopRewriteUtils.createBinary(left, new LiteralOp(0), OpOp2.MAX);\nHopRewriteUtils.replaceChildReference(parent, bop, binary, pos);\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-1570] Fix rewrite for relu (selp) expressions
49,738
18.01.2018 11:51:30
28,800
82e42957f7e1a2e3d7a9c1e4f6685b52db61c9b4
[MINOR] Cleanup jmlc build lite jar tests, incl remove temp files
[ { "change_type": "DELETE", "old_path": "A.csv", "new_path": null, "diff": "-0.0146817556045713,0.5112049868172497\n-1.8844105118944472,1.575721197916694\n-0.31179904293595984,1.9060943669721677\n" }, { "change_type": "DELETE", "old_path": "A.csv.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 3,\n- \"cols\": 2,\n- \"nnz\": 6,\n- \"format\": \"csv\",\n- \"header\": false,\n- \"sep\": \",\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:09:58 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "B.csv", "new_path": null, "diff": "-0.0146817556045713,0.5112049868172497,1.0,0,1.0,2.5258867424218208\n-1.8844105118944472,1.575721197916694,1.0,1.0,1.8844105118944472,7.344542221705589\n-0.31179904293595984,1.9060943669721677,1.0,2.0,2.0,7.217893409908127\n" }, { "change_type": "DELETE", "old_path": "B.csv.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 3,\n- \"cols\": 6,\n- \"nnz\": 17,\n- \"format\": \"csv\",\n- \"header\": false,\n- \"sep\": \",\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:09:58 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.binary", "new_path": "functions/jmlc/temp/m.binary", "diff": "Binary files a/functions/jmlc/temp/m.binary and /dev/null differ\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.binary.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 4,\n- \"cols\": 3,\n- \"rows_in_block\": 1000,\n- \"cols_in_block\": 1000,\n- \"nnz\": 6,\n- \"format\": \"binary\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:10:56 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.csv", "new_path": null, "diff": "-1.0,2.0,3.0\n-0,0,0\n-7.0,8.0,9.0\n-0,0,0\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.csv.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 4,\n- \"cols\": 3,\n- \"nnz\": 6,\n- \"format\": \"csv\",\n- \"header\": false,\n- \"sep\": \",\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:10:56 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.mm", "new_path": null, "diff": "-%%MatrixMarket matrix coordinate real general\n-4 3 6\n-1 1 1.0\n-1 2 2.0\n-1 3 3.0\n-3 1 7.0\n-3 2 8.0\n-3 3 9.0\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.txt", "new_path": null, "diff": "-1 1 1.0\n-1 2 2.0\n-1 3 3.0\n-3 1 7.0\n-3 2 8.0\n-3 3 9.0\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/m.txt.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 4,\n- \"cols\": 3,\n- \"nnz\": 6,\n- \"format\": \"text\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:10:56 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/scoring-example.dml", "new_path": null, "diff": "-X = read(\"./tmp/X\", rows=-1, cols=-1);\n-W = read(\"./tmp/W\", rows=-1, cols=-1);\n-\n-numRows = nrow(X);\n-numCols = ncol(X);\n-b = W[numCols+1,]\n-scores = X %*% W[1:numCols,] + b;\n-predicted_y = rowIndexMax(scores);\n-\n-print('pred:' + toString(predicted_y))\n-\n-write(predicted_y, \"./tmp\", format=\"text\");\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/x.csv", "new_path": null, "diff": "-1.0,2.0\n-3.0,4.0\n" }, { "change_type": "DELETE", "old_path": "functions/jmlc/temp/x.csv.mtd", "new_path": null, "diff": "-{\n- \"data_type\": \"matrix\",\n- \"value_type\": \"double\",\n- \"rows\": 2,\n- \"cols\": 2,\n- \"nnz\": 4,\n- \"format\": \"csv\",\n- \"header\": false,\n- \"sep\": \",\",\n- \"author\": \"mboehm\",\n- \"created\": \"2018-01-16 16:10:56 PST\"\n-}\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/lite/BuildLiteExecution.java", "new_path": "src/main/java/org/apache/sysml/utils/lite/BuildLiteExecution.java", "diff": "@@ -44,10 +44,19 @@ import org.apache.sysml.runtime.util.DataConverter;\n* the execution of that code.\n*\n*/\n-public class BuildLiteExecution {\n-\n+public class BuildLiteExecution\n+{\nprivate static Logger log = Logger.getLogger(BuildLiteExecution.class);\nprivate static final String ROOT = \"functions/jmlc/temp/\";\n+ private static String _rootPrefix = null;\n+\n+ public static void setRootPrefix(String prefix) {\n+ _rootPrefix = prefix;\n+ }\n+\n+ public static String getRoot() {\n+ return (_rootPrefix != null) ? _rootPrefix + ROOT : ROOT;\n+ }\npublic static void main(String[] args) throws Exception {\n@@ -79,7 +88,6 @@ public class BuildLiteExecution {\n}\npublic static void jmlcScoringExample() throws Exception {\n- /* @formatter:off */\nString scriptString =\n\"X = read(\\\"./tmp/X\\\", rows=-1, cols=-1);\\n\" +\n\"W = read(\\\"./tmp/W\\\", rows=-1, cols=-1);\\n\" +\n@@ -93,15 +101,15 @@ public class BuildLiteExecution {\n\"print('pred:' + toString(predicted_y))\\n\" +\n\"\\n\" +\n\"write(predicted_y, \\\"./tmp\\\", format=\\\"text\\\");\\n\";\n- /* @formatter:on */\n- File file = new File(ROOT+\"scoring-example.dml\");\n+ File file = new File(getRoot()+\"scoring-example.dml\");\n+ System.out.println(file.toString());\nFileUtils.writeStringToFile(file, scriptString);\nConnection conn = getConfiguredConnection();\n- String dml = conn.readScript(ROOT+\"scoring-example.dml\");\n- PreparedScript script = conn.prepareScript(dml, new String[] { \"W\", \"X\" }, new String[] { \"predicted_y\" },\n- false);\n+ String dml = conn.readScript(getRoot()+\"scoring-example.dml\");\n+ PreparedScript script = conn.prepareScript(dml,\n+ new String[] { \"W\", \"X\" }, new String[] { \"predicted_y\" }, false);\ndouble[][] mtx = matrix(4, 3, new double[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 });\ndouble[][] result = null;\n@@ -132,8 +140,8 @@ public class BuildLiteExecution {\nMap<String, String> m = new HashMap<>();\nm.put(\"$CONSOLE_OUTPUT\", \"TRUE\");\n- PreparedScript script = conn.prepareScript(dml, m, new String[] { \"A\", \"K\" }, new String[] { \"baseStats\" },\n- false);\n+ PreparedScript script = conn.prepareScript(dml,\n+ m, new String[] { \"A\", \"K\" }, new String[] { \"baseStats\" }, false);\ndouble[][] data = new double[100][4];\nfor (int i = 0; i < 100; i++) {\n@@ -159,18 +167,16 @@ public class BuildLiteExecution {\npublic static void jmlcWriteMatrix() throws Exception {\nConnection conn = getConfiguredConnection();\nPreparedScript script = conn.prepareScript(\n- \"x=matrix('1 2 3 4',rows=2,cols=2);write(x,'\"+ROOT+\"x.csv',format='csv');\", new String[] {},\n+ \"x=matrix('1 2 3 4',rows=2,cols=2);write(x,'\"+getRoot()+\"x.csv',format='csv');\", new String[] {},\nnew String[] {}, false);\nscript.executeScript();\n- /* @formatter:off */\nString scriptString =\n\"m = matrix('1 2 3 0 0 0 7 8 9 0 0 0', rows=4, cols=3)\\n\" +\n- \"write(m, '\"+ROOT+\"m.txt', format='text')\\n\" +\n- \"write(m, '\"+ROOT+\"m.mm', format='mm')\\n\" +\n- \"write(m, '\"+ROOT+\"m.csv', format='csv')\\n\" +\n- \"write(m, '\"+ROOT+\"m.binary', format='binary')\\n\";\n- /* @formatter:on */\n+ \"write(m, '\"+getRoot()+\"m.txt', format='text')\\n\" +\n+ \"write(m, '\"+getRoot()+\"m.mm', format='mm')\\n\" +\n+ \"write(m, '\"+getRoot()+\"m.csv', format='csv')\\n\" +\n+ \"write(m, '\"+getRoot()+\"m.binary', format='binary')\\n\";\nscript = conn.prepareScript(scriptString, new String[] {}, new String[] {}, false);\nscript.executeScript();\n@@ -180,12 +186,11 @@ public class BuildLiteExecution {\npublic static void jmlcReadMatrix() throws Exception {\nConnection conn = getConfiguredConnection();\n- PreparedScript script = conn.prepareScript(\"x=read('\"+ROOT+\"x.csv',format='csv');y=x*2;print(toString(y));\",\n+ PreparedScript script = conn.prepareScript(\"x=read('\"+getRoot()+\"x.csv',format='csv');y=x*2;print(toString(y));\",\nnew String[] {}, new String[] {}, false);\nscript.executeScript();\n- /* @formatter:off */\n- String scriptString = \"m = read('\"+ROOT+\"m.csv',format='csv')\\n\" +\n+ String scriptString = \"m = read('\"+getRoot()+\"m.csv',format='csv')\\n\" +\n\"print(toString(m))\\n\" +\n\"print('min:' + min(m))\\n\" +\n\"print('max:' + max(m))\\n\" +\n@@ -199,17 +204,6 @@ public class BuildLiteExecution {\n\" print('col ' + i + ' sum:' + as.scalar(mColSums[1,i]))\\n\" +\n\"}\\n\";\n- // note: the following can be set to work using the following setting\n- // in the Connection class: cconf.set(ConfigType.IGNORE_READ_WRITE_METADATA, false);\n-\n- // \"m2=read('\"+ROOT+\"m.txt', format='text')\\n\" +\n- // \"m3=read('\"+ROOT+\"m.mm', format='mm')\\n\" +\n- // \"m4=read('\"+ROOT+\"m.binary', format='binary')\\n\" +\n- // \"print('m2:'+toString(m2))\\n\" +\n- // \"print('m3:'+toString(m3))\\n\" +\n- // \"print('m4:'+toString(m4))\\n\";\n- /* @formatter:on */\n-\nscript = conn.prepareScript(scriptString, new String[] {}, new String[] {}, false);\nscript.executeScript();\n@@ -218,7 +212,6 @@ public class BuildLiteExecution {\npublic static void jmlcBasics() throws Exception {\n- /* @formatter:off */\nString dml =\n\"A = matrix(\\\"1 2 3 4 5 6\\\", rows=3, cols=2)\\n\"+\n\"print(toString(A))\\n\"+\n@@ -274,9 +267,8 @@ public class BuildLiteExecution {\n\"\\n\"+\n\"A = rand(rows=3, cols=2, min=0, max=2) # random 3x2 matrix with values 0 to 2\\n\"+\n\"B = doSomething(A)\\n\"+\n- \"write(A, \\\"A.csv\\\", format=\\\"csv\\\")\\n\"+\n- \"write(B, \\\"B.csv\\\", format=\\\"csv\\\")\\n\";\n- /* @formatter:on */\n+ \"write(A, \\\"\"+getRoot()+\"A.csv\\\", format=\\\"csv\\\")\\n\"+\n+ \"write(B, \\\"\"+getRoot()+\"B.csv\\\", format=\\\"csv\\\")\\n\";\nConnection conn = getConfiguredConnection();\nPreparedScript script = conn.prepareScript(dml, new String[] {}, new String[] {}, false);\n@@ -705,5 +697,4 @@ public class BuildLiteExecution {\nConfigType.PARALLEL_CP_MATRIX_OPERATIONS);\nreturn conn;\n}\n-\n}\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/BuildLiteJarTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/jmlc/BuildLiteJarTest.java", "diff": "@@ -27,7 +27,7 @@ public class BuildLiteJarTest extends AutomatedTestBase\n{\n@Override\npublic void setUp() {\n- //do nothing\n+ BuildLiteExecution.setRootPrefix(baseDirectory);\n}\n@Test\n@@ -80,4 +80,9 @@ public class BuildLiteJarTest extends AutomatedTestBase\npublic void testJMLCTests() throws Exception {\nBuildLiteExecution.jmlcTests();\n}\n+\n+ @Override\n+ public void tearDown() {\n+ BuildLiteExecution.setRootPrefix(null);\n+ }\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Cleanup jmlc build lite jar tests, incl remove temp files
49,738
18.01.2018 13:02:57
28,800
6bf3e78364942c305018a526e80fd3dad441bd86
Minor simplifications of GLM, Kmeans, Linreg, MLogreg This patch makes some minor script-level simplifications using the new ifelse function to ease understanding and increase the potential for rewrites and inlining (by avoiding unnecessary DAG cuts).
[ { "change_type": "MODIFY", "old_path": "scripts/algorithms/GLM.dml", "new_path": "scripts/algorithms/GLM.dml", "diff": "@@ -1062,14 +1062,8 @@ get_trust_boundary_point =\nf_extra = 0.5 * sum (z * (r + g));\nf_change_1 = f_extra + (0.5 * tau_1 * pq + zq + gp) * tau_1;\nf_change_2 = f_extra + (0.5 * tau_2 * pq + zq + gp) * tau_2;\n- if (f_change_1 < f_change_2) {\n- new_z = z + (tau_1 * p);\n- f_change = f_change_1;\n- }\n- else {\n- new_z = z + (tau_2 * p);\n- f_change = f_change_2;\n- }\n+ new_z = z + ifelse(f_change_1<f_change_2, tau_1, tau_2) * p;\n+ f_change = min(f_change_1, f_change_2);\n}\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/Kmeans.dml", "new_path": "scripts/algorithms/Kmeans.dml", "diff": "@@ -118,11 +118,8 @@ for (i in 1 : num_centroids)\n# Update min_distances to preserve the loop invariant:\ndistances = X_samples_sq_norms + samples_vs_runs_map %*% rowSums (centroids ^ 2)\n- 2 * rowSums (X_samples * (samples_vs_runs_map %*% centroids));\n- if (i == 1) {\n- min_distances = is_row_in_samples * distances;\n- } else {\n- min_distances = min (min_distances, distances);\n-} }\n+ min_distances = ifelse(i==1, is_row_in_samples*distances, min(min_distances,distances));\n+}\n# STEP 2: PERFORM K-MEANS ITERATIONS FOR ALL RUNS:\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/LinearRegCG.dml", "new_path": "scripts/algorithms/LinearRegCG.dml", "diff": "@@ -224,13 +224,8 @@ ss_res = sum (y_residual ^ 2);\nss_avg_res = ss_res - n * avg_res ^ 2;\nR2 = 1 - ss_res / ss_avg_tot;\n-if (n > m_ext) {\n- dispersion = ss_res / (n - m_ext);\n- adjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n-} else {\n- dispersion = 0.0 / 0.0;\n- adjusted_R2 = 0.0 / 0.0;\n-}\n+dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), 0.0/0.0);\n+adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), 0.0/0.0);\nR2_nobias = 1 - ss_avg_res / ss_avg_tot;\ndeg_freedom = n - m - 1;\n@@ -244,11 +239,7 @@ if (deg_freedom > 0) {\n}\nR2_vs_0 = 1 - ss_res / ss_tot;\n-if (n > m) {\n- adjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n-} else {\n- adjusted_R2_vs_0 = 0.0 / 0.0;\n-}\n+adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), 0.0/0.0);\nstr = \"AVG_TOT_Y,\" + avg_tot; # Average of the response value Y\nstr = append (str, \"STDEV_TOT_Y,\" + sqrt (var_tot)); # Standard Deviation of the response value Y\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/LinearRegDS.dml", "new_path": "scripts/algorithms/LinearRegDS.dml", "diff": "@@ -166,13 +166,8 @@ ss_res = sum (y_residual ^ 2);\nss_avg_res = ss_res - n * avg_res ^ 2;\nR2 = 1 - ss_res / ss_avg_tot;\n-if (n > m_ext) {\n- dispersion = ss_res / (n - m_ext);\n- adjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n-} else {\n- dispersion = 0.0 / 0.0;\n- adjusted_R2 = 0.0 / 0.0;\n-}\n+dispersion = ifelse(n > m_ext, ss_res / (n - m_ext), 0.0/0.0);\n+adjusted_R2 = ifelse(n > m_ext, 1 - dispersion / (ss_avg_tot / (n - 1)), 0.0/0.0);\nR2_nobias = 1 - ss_avg_res / ss_avg_tot;\ndeg_freedom = n - m - 1;\n@@ -186,11 +181,7 @@ if (deg_freedom > 0) {\n}\nR2_vs_0 = 1 - ss_res / ss_tot;\n-if (n > m) {\n- adjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n-} else {\n- adjusted_R2_vs_0 = 0.0 / 0.0;\n-}\n+adjusted_R2_vs_0 = ifelse(n > m, 1 - (ss_res / (n - m)) / (ss_tot / n), 0.0/0.0);\nstr = \"AVG_TOT_Y,\" + avg_tot; # Average of the response value Y\nstr = append (str, \"STDEV_TOT_Y,\" + sqrt (var_tot)); # Standard Deviation of the response value Y\n" }, { "change_type": "MODIFY", "old_path": "scripts/algorithms/MultiLogReg.dml", "new_path": "scripts/algorithms/MultiLogReg.dml", "diff": "@@ -287,29 +287,16 @@ while (! converge)\n}\nalpha2 = obj_new - obj - gs;\n- if (alpha2 <= 0) {\n- alpha = sigma3;\n- }\n- else {\n- alpha = max (sigma1, -0.5 * gs / alpha2);\n- }\n+ alpha = ifelse(alpha2 <= 0, sigma3, max(sigma1, -0.5 * gs / alpha2));\n- if (rho < eta0) {\n+ if (rho < eta0)\ndelta = min (max (alpha, sigma1) * snorm, sigma2 * delta);\n- }\n- else {\n- if (rho < eta1) {\n+ else if (rho < eta1)\ndelta = max (sigma1 * delta, min (alpha * snorm, sigma2 * delta));\n- }\n- else {\n- if (rho < eta2) {\n+ else if (rho < eta2)\ndelta = max (sigma1 * delta, min (alpha * snorm, sigma3 * delta));\n- }\n- else {\n+ else\ndelta = max (delta, min (alpha * snorm, sigma3 * delta));\n- }\n- }\n- }\nif (is_trust_boundary_reached == 1)\n{\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/layers/dropout.dml", "new_path": "scripts/nn/layers/dropout.dml", "diff": "@@ -46,11 +46,9 @@ forward = function(matrix[double] X, double p, int seed)\n# `mask = rand(rows=nrow(X), cols=ncol(X), min=0, max=1, seed=seed) <= p`\n# to create a dropout mask. Fortunately, SystemML has a `sparsity` parameter on\n# the `rand` function that allows use to create a mask directly.\n- if (seed == -1) {\n- mask = rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p)\n- } else {\n- mask = rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p, seed=seed)\n- }\n+ mask = ifelse(seed == -1,\n+ rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p),\n+ rand(rows=nrow(X), cols=ncol(X), min=1, max=1, sparsity=p, seed=seed));\nout = X * mask / p\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java", "diff": "@@ -2788,17 +2788,6 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n{\n//TODO perf for special cases like ifelse\n- final int m = Math.max(Math.max(rlen, m2.rlen), m3.rlen);\n- final int n = Math.max(Math.max(clen, m2.clen), m3.clen);\n-\n- //error handling\n- if( (rlen != 1 && rlen != m) || (clen != 1 && clen != n)\n- || (m2.rlen != 1 && m2.rlen != m) || (m2.clen != 1 && m2.clen != n)\n- || (m3.rlen != 1 && m3.rlen != m) || (m3.clen != 1 && m3.clen != n) ) {\n- throw new DMLRuntimeException(\"Block sizes are not matched for ternary cell operations: \"\n- + rlen + \"x\" + clen + \" vs \" + m2.rlen + \"x\" + m2.clen + \" vs \" + m3.rlen + \"x\" + m3.clen);\n- }\n-\n//prepare inputs\nfinal boolean s1 = (rlen==1 && clen==1);\nfinal boolean s2 = (m2.rlen==1 && m2.clen==1);\n@@ -2806,6 +2795,16 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nfinal double d1 = s1 ? quickGetValue(0, 0) : Double.NaN;\nfinal double d2 = s2 ? m2.quickGetValue(0, 0) : Double.NaN;\nfinal double d3 = s3 ? m3.quickGetValue(0, 0) : Double.NaN;\n+ final int m = Math.max(Math.max(rlen, m2.rlen), m3.rlen);\n+ final int n = Math.max(Math.max(clen, m2.clen), m3.clen);\n+\n+ //error handling\n+ if( (!s1 && (rlen != m || clen != n))\n+ || (!s2 && (m2.rlen != m || m2.clen != n))\n+ || (!s3 && (m3.rlen != m || m3.clen != n)) ) {\n+ throw new DMLRuntimeException(\"Block sizes are not matched for ternary cell operations: \"\n+ + rlen + \"x\" + clen + \" vs \" + m2.rlen + \"x\" + m2.clen + \" vs \" + m3.rlen + \"x\" + m3.clen);\n+ }\n//prepare result\nret.reset(m, n, false);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2050] Minor simplifications of GLM, Kmeans, Linreg, MLogreg This patch makes some minor script-level simplifications using the new ifelse function to ease understanding and increase the potential for rewrites and inlining (by avoiding unnecessary DAG cuts).
49,736
19.01.2018 11:29:15
28,800
269aef35f12273b6a82a7a0c24fc63c0a9c9e071
[MINOR] Limit the size of intermediate im2col matrix to MAX_WORKSPACE_LIMIT_BYTES
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java", "diff": "@@ -207,7 +207,8 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {\nif(NCHW < maxNumElementsOfCuDNNTensor && NKPQ < maxNumElementsOfCuDNNTensor && KCRS < maxNumElementsOfCuDNNTensor) {\nif(isSparseFilter &&\n- (OptimizerUtils.estimateSizeExactSparsity(CRS, NPQ, 1.0) + OptimizerUtils.estimateSizeExactSparsity(K, NPQ, 1.0)) < intermediateMemoryBudget) {\n+ (OptimizerUtils.estimateSizeExactSparsity(CRS, NPQ, 1.0) + OptimizerUtils.estimateSizeExactSparsity(K, NPQ, 1.0)) <\n+ Math.min(LibMatrixCuDNNConvolutionAlgorithm.MAX_WORKSPACE_LIMIT_BYTES, intermediateMemoryBudget)) {\n// Sparse filter conv2d\n// Perform dense im2col\nPointer im2colPointer = denseIm2col(gCtx, instName, image, isSparseImage,\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNNConvolutionAlgorithm.java", "diff": "@@ -57,7 +57,7 @@ import static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\n*/\npublic class LibMatrixCuDNNConvolutionAlgorithm implements java.lang.AutoCloseable {\n// Limit the workspace available to cudnn convolution operation to 1 GB\n- private static long MAX_WORKSPACE_LIMIT_BYTES = (long) 1e+9;\n+ static long MAX_WORKSPACE_LIMIT_BYTES = (long) 1e+9;\npublic int algo = -1;\npublic Pointer workSpace = new Pointer();\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Limit the size of intermediate im2col matrix to MAX_WORKSPACE_LIMIT_BYTES
49,736
19.01.2018 13:17:56
28,800
f702c03be29407b11f049d34eb272f47a8395b2d
Bugfix in GPU axpy instruction due to recent ternary changes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixAxpyGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixAxpyGPUInstruction.java", "diff": "@@ -62,7 +62,7 @@ public class MatrixMatrixAxpyGPUInstruction extends ArithmeticBinaryGPUInstructi\nOperator operator = (dt1 != dt2) ?\nInstructionUtils.parseScalarBinaryOperator(opcode, (dt1 == DataType.SCALAR)) :\n- InstructionUtils.parseBinaryOperator(opcode);\n+ InstructionUtils.parseTernaryOperator(opcode);\nif(dt1 == DataType.MATRIX && dt2 == DataType.MATRIX && dt3 == DataType.MATRIX) {\nreturn new MatrixMatrixAxpyGPUInstruction(operator, in1, constant, multiplier, in2, out, opcode, str);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Bugfix in GPU axpy instruction due to recent ternary changes
49,736
19.01.2018 16:14:51
28,800
b9b273d87acd2643962307692c156783f7ce7543
Added support for GPU relu, scalar min and max operations
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java", "diff": "@@ -161,7 +161,7 @@ public class BinaryOp extends Hop\nOpOp2 [] supportedOps = { OpOp2.MULT, OpOp2.PLUS, OpOp2.MINUS, OpOp2.DIV, OpOp2.POW, OpOp2.MINUS1_MULT,\nOpOp2.MODULUS, OpOp2.INTDIV, OpOp2.LESS, OpOp2.LESSEQUAL, OpOp2.EQUAL, OpOp2.NOTEQUAL, OpOp2.GREATER, OpOp2.GREATEREQUAL};\n- if(isMatrixScalar && op == OpOp2.MINUS_NZ) {\n+ if(isMatrixScalar && (op == OpOp2.MINUS_NZ || op == OpOp2.MIN || op == OpOp2.MAX)) {\n// Only supported for matrix scalar:\nreturn true;\n}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java", "diff": "@@ -103,6 +103,8 @@ public class GPUInstructionParser extends InstructionParser\n// Binary Builtin functions\nString2GPUInstructionType.put( \"solve\", GPUINSTRUCTION_TYPE.BuiltinBinary);\n+ String2GPUInstructionType.put( \"min\", GPUINSTRUCTION_TYPE.BuiltinBinary);\n+ String2GPUInstructionType.put( \"max\", GPUINSTRUCTION_TYPE.BuiltinBinary);\n// Aggregate Unary\nString2GPUInstructionType.put( \"ua+\" , GPUINSTRUCTION_TYPE.AggregateUnary); // Sum\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinBinaryGPUInstruction.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinBinaryGPUInstruction.java", "diff": "@@ -65,13 +65,19 @@ public abstract class BuiltinBinaryGPUInstruction extends GPUInstruction {\n// Determine appropriate Function Object based on opcode\nValueFunction func = Builtin.getBuiltinFnObject(opcode);\n- // Only for \"solve\"\n+ boolean isMatrixMatrix = in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.MATRIX;\n+ boolean isMatrixScalar = (in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.SCALAR) ||\n+ (in1.getDataType() == Expression.DataType.SCALAR && in2.getDataType() == Expression.DataType.MATRIX);\n+\nif ( in1.getDataType() == Expression.DataType.SCALAR && in2.getDataType() == Expression.DataType.SCALAR )\nthrow new DMLRuntimeException(\"GPU : Unsupported GPU builtin operations on 2 scalars\");\n- else if ( in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.MATRIX )\n+ else if ( isMatrixMatrix && opcode.equals(\"solve\") )\nreturn new MatrixMatrixBuiltinGPUInstruction(new BinaryOperator(func), in1, in2, out, opcode, str, 2);\n+ else if ( isMatrixScalar && (opcode.equals(\"min\") || opcode.equals(\"max\")) )\n+ return new ScalarMatrixBuiltinGPUInstruction(new BinaryOperator(func), in1, in2, out, opcode, str, 2);\n+\nelse\n- throw new DMLRuntimeException(\"GPU : Unsupported GPU builtin operations on a matrix and a scalar\");\n+ throw new DMLRuntimeException(\"GPU : Unsupported GPU builtin operations on a matrix and a scalar:\" + opcode);\n}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/ScalarMatrixBuiltinGPUInstruction.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.gpu;\n+\n+import org.apache.sysml.parser.Expression.DataType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCuDNN;\n+import org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.utils.GPUStatistics;\n+\n+public class ScalarMatrixBuiltinGPUInstruction extends BuiltinBinaryGPUInstruction {\n+\n+ protected ScalarMatrixBuiltinGPUInstruction(Operator op, CPOperand input1, CPOperand input2, CPOperand output,\n+ String opcode, String istr, int _arity) {\n+ super(op, input1, input2, output, opcode, istr, _arity);\n+ _gputype = GPUINSTRUCTION_TYPE.BuiltinUnary;\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) throws DMLRuntimeException {\n+ GPUStatistics.incrementNoOfExecutedGPUInst();\n+\n+ String opcode = getOpcode();\n+ CPOperand mat = ( input1.getDataType() == DataType.MATRIX ) ? input1 : input2;\n+ CPOperand scalar = ( input1.getDataType() == DataType.MATRIX ) ? input2 : input1;\n+ MatrixObject in1 = getMatrixInputForGPUInstruction(ec, mat.getName());\n+ ScalarObject constant = (ScalarObject) ec.getScalarInput(scalar.getName(), scalar.getValueType(), scalar.isLiteral());\n+\n+ if(opcode.equals(\"max\")) {\n+ ec.setMetaData(output.getName(), in1.getNumRows(), in1.getNumColumns());\n+ double constVal = constant.getDoubleValue();\n+ if(constVal == 0)\n+ LibMatrixCuDNN.relu(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, output.getName());\n+ else\n+ LibMatrixCUDA.matrixScalarOp(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, output.getName(), false,\n+ InstructionUtils.parseScalarBinaryOperator(opcode, false, constVal));\n+ } else if(opcode.equals(\"min\")) {\n+ ec.setMetaData(output.getName(), in1.getNumRows(), in1.getNumColumns());\n+ double constVal = constant.getDoubleValue();\n+ LibMatrixCUDA.matrixScalarOp(ec, ec.getGPUContext(0), getExtendedOpcode(), in1, output.getName(), false,\n+ InstructionUtils.parseScalarBinaryOperator(opcode, false, constVal));\n+ } else {\n+ throw new DMLRuntimeException(\"Unsupported GPU operator:\" + opcode);\n+ }\n+ ec.releaseMatrixInputForGPUInstruction(mat.getName());\n+ ec.releaseMatrixOutputForGPUInstruction(output.getName());\n+ }\n+\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java", "diff": "@@ -61,6 +61,7 @@ import org.apache.sysml.runtime.functionobjects.ReduceCol;\nimport org.apache.sysml.runtime.functionobjects.ReduceDiag;\nimport org.apache.sysml.runtime.functionobjects.ReduceRow;\nimport org.apache.sysml.runtime.functionobjects.ValueFunction;\n+import org.apache.sysml.runtime.functionobjects.Builtin.BuiltinCode;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.context.CSRPointer;\n@@ -1310,7 +1311,7 @@ public class LibMatrixCUDA {\n* @param op operator\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- private static void matrixScalarOp(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject in, String outputName, boolean isInputTransposed,\n+ public static void matrixScalarOp(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject in, String outputName, boolean isInputTransposed,\nScalarOperator op) throws DMLRuntimeException {\nif (ec.getGPUContext(0) != gCtx)\nthrow new DMLRuntimeException(\"GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function\");\n@@ -1604,6 +1605,8 @@ public class LibMatrixCUDA {\nelse if(fn instanceof MinusNz) return 16;\nelse if(fn instanceof Modulus) return 17;\nelse if(fn instanceof IntegerDivide) return 18;\n+ else if(fn instanceof Builtin && ((Builtin)fn).getBuiltinCode()==BuiltinCode.MIN) return 11;\n+ else if(fn instanceof Builtin && ((Builtin)fn).getBuiltinCode()==BuiltinCode.MAX) return 12;\nthrow new DMLRuntimeException(\"The given value function is not supported:\" + fn.getClass().getName());\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Added support for GPU relu, scalar min and max operations
49,738
19.01.2018 20:07:44
28,800
d5fd4230e38ffda0186a8af9a8812e124181f5b9
New codegen algorithm tests (linreg/logreg datagen) This patch adds the data generation scripts for linear and logistic regression to the codegen algorithm testsuite. Furthermore, this also includes some minor cleanups of the datagen dml scripts because they also serve as examples.
[ { "change_type": "MODIFY", "old_path": "scripts/datagen/genRandData4LinearRegression.dml", "new_path": "scripts/datagen/genRandData4LinearRegression.dml", "diff": "@@ -49,7 +49,7 @@ Y = X %*% w\nif( b != 0 ) {\nb_mat = Rand(rows=1, cols=1, min=b, max=b, pdf=\"uniform\")\n- w = t(cbind(t(w), b_mat))\n+ w = rbind(w, t(b_mat))\nY = Y + b\n}\n" }, { "change_type": "MODIFY", "old_path": "scripts/datagen/genRandData4LogisticRegression.dml", "new_path": "scripts/datagen/genRandData4LogisticRegression.dml", "diff": "@@ -50,22 +50,22 @@ w = w * maxWeight\not = X %*% w\nif( b != 0) {\nb_mat = Rand(rows=1, cols=1, min=b, max=b, pdf=\"uniform\")\n- w = t(cbind(t(w), b_mat))\n+ w = rbind(w, t(b_mat))\not = ot + b\n}\nprob = 1 / (1 + exp(-ot))\nif( addNoise == 1 ){\nr = Rand(rows=numSamples, cols=1, min=0, max=1, pdf=\"uniform\", seed=0)\n-}else{\n+}\n+else {\nprint(\"this data generator generates the same dataset for both noise=0 and noise=1\")\nr = Rand(rows=numSamples, cols=1, min=0, max=1, pdf=\"uniform\", seed=0)\n- #r = Rand(rows=numSamples, cols=1, min=0.5, max=0.5, pdf=\"uniform\")\n}\n+\nY = 1 - 2 * (prob < r)\n-if( $12 == 1 ) {\n- Y = (Y+3)/2;\n-}\n+if( $12 == 1 )\n+ Y = (Y + 3) / 2\nwrite(w, $5, format=$11)\nwrite(X, $6, format=$11)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegenalg/AlgorithmDatagen.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.codegenalg;\n+\n+import java.io.File;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class AlgorithmDatagen extends AutomatedTestBase\n+{\n+ private final static String TEST_NAME1 = \"Algorithm_Datagen\";\n+ private final static String TEST_DIR = \"functions/codegenalg/\";\n+ private final static String TEST_CLASS_DIR = TEST_DIR + AlgorithmDatagen.class.getSimpleName() + \"/\";\n+ private final static String TEST_CONF = \"SystemML-config-codegen.xml\";\n+ private final static File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\n+\n+ private final static int rows = 2468;\n+ private final static int cols = 200;\n+\n+ private final static double sparsity1 = 0.9; //dense\n+ private final static double sparsity2 = 0.1; //sparse\n+\n+ public enum DatagenType {\n+ LINREG,\n+ LOGREG,\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"X\",\"Y\",\"w\" }));\n+ }\n+\n+ @Test\n+ public void testDatagenLinregDenseRewritesCP() {\n+ runStepwiseTest(DatagenType.LINREG, false, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregSparseRewritesCP() {\n+ runStepwiseTest(DatagenType.LINREG, true, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregDenseNoRewritesCP() {\n+ runStepwiseTest(DatagenType.LINREG, false, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregSparseNoRewritesCP() {\n+ runStepwiseTest(DatagenType.LINREG, true, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregDenseRewritesCP() {\n+ runStepwiseTest(DatagenType.LOGREG, false, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregSparseRewritesCP() {\n+ runStepwiseTest(DatagenType.LOGREG, true, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregDenseNoRewritesCP() {\n+ runStepwiseTest(DatagenType.LOGREG, false, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregSparseNoRewritesCP() {\n+ runStepwiseTest(DatagenType.LOGREG, true, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregDenseRewritesSP() {\n+ runStepwiseTest(DatagenType.LINREG, false, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregSparseRewritesSP() {\n+ runStepwiseTest(DatagenType.LINREG, true, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregDenseNoRewritesSP() {\n+ runStepwiseTest(DatagenType.LINREG, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLinregSparseNoRewritesSP() {\n+ runStepwiseTest(DatagenType.LINREG, true, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregDenseRewritesSP() {\n+ runStepwiseTest(DatagenType.LOGREG, false, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregSparseRewritesSP() {\n+ runStepwiseTest(DatagenType.LOGREG, true, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregDenseNoRewritesSP() {\n+ runStepwiseTest(DatagenType.LOGREG, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testDatagenLogregSparseNoRewritesSP() {\n+ runStepwiseTest(DatagenType.LOGREG, true, false, ExecType.SPARK);\n+ }\n+\n+ private void runStepwiseTest( DatagenType type, boolean sparse, boolean rewrites, ExecType instType)\n+ {\n+ boolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( instType ){\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;\n+\n+ try\n+ {\n+ String TEST_NAME = TEST_NAME1;\n+ TestConfiguration config = getTestConfiguration(TEST_NAME);\n+ loadTestConfiguration(config);\n+\n+ double sparsity = sparse ? sparsity2 : sparsity1;\n+\n+ if( type == DatagenType.LINREG) {\n+ fullDMLScriptName = \"scripts/datagen/genRandData4LinearRegression.dml\";\n+ programArgs = new String[]{ \"-explain\", \"-stats\", \"-args\",\n+ String.valueOf(rows), String.valueOf(cols), \"10\", \"1\", output(\"w\"),\n+ output(\"X\"), output(\"y\"), \"1\", \"1\", String.valueOf(sparsity), \"binary\"};\n+ }\n+ else { //LOGREG\n+ fullDMLScriptName = \"scripts/datagen/genRandData4LogisticRegression.dml\";\n+ programArgs = new String[]{ \"-explain\", \"-stats\", \"-args\",\n+ String.valueOf(rows), String.valueOf(cols), \"10\", \"1\", output(\"w\"),\n+ output(\"X\"), output(\"y\"), \"1\", \"1\", String.valueOf(sparsity), \"binary\", \"1\"};\n+ }\n+\n+ runTest(true, false, null, -1);\n+\n+ Assert.assertTrue(heavyHittersContainsSubString(\"spoof\")\n+ || heavyHittersContainsSubString(\"sp_spoof\"));\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldFlag;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+\n+ /**\n+ * Override default configuration with custom test configuration to ensure\n+ * scratch space and local temporary directory locations are also updated.\n+ */\n+ @Override\n+ protected File getConfigTemplateFile() {\n+ // Instrumentation in this test's output log to show custom configuration file used for template.\n+ System.out.println(\"This test case overrides default configuration with \" + TEST_CONF_FILE.getPath());\n+ return TEST_CONF_FILE;\n+ }\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "new_path": "src/test_suites/java/org/apache/sysml/test/integration/functions/codegenalg/ZPackageSuite.java", "diff": "@@ -28,6 +28,7 @@ import org.junit.runners.Suite;\[email protected]({\nAlgorithmARIMA.class,\nAlgorithmAutoEncoder.class,\n+ AlgorithmDatagen.class,\nAlgorithmGLM.class,\nAlgorithmKMeans.class,\nAlgorithmL2SVM.class,\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2070] New codegen algorithm tests (linreg/logreg datagen) This patch adds the data generation scripts for linear and logistic regression to the codegen algorithm testsuite. Furthermore, this also includes some minor cleanups of the datagen dml scripts because they also serve as examples.
49,766
21.01.2018 19:27:49
28,800
f1c25c93f610a0ed68a70cc1648820d9298bb018
[MINOR] Fix license header formatting
[ { "change_type": "MODIFY", "old_path": "pom.xml", "new_path": "pom.xml", "diff": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n-<!-- * Licensed to the Apache Software Foundation (ASF) under one * or more\n- contributor license agreements. See the NOTICE file * distributed with this\n- work for additional information * regarding copyright ownership. The ASF\n- licenses this file * to you under the Apache License, Version 2.0 (the *\n- \"License\"); you may not use this file except in compliance * with the License.\n- You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0\n- * * Unless required by applicable law or agreed to in writing, * software\n- distributed under the License is distributed on an * \"AS IS\" BASIS, WITHOUT\n- WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the\n- License for the * specific language governing permissions and limitations\n- * under the License. -->\n+<!--\n+ ~ Licensed to the Apache Software Foundation (ASF) under one or more\n+ ~ contributor license agreements. See the NOTICE file distributed with\n+ ~ this work for additional information regarding copyright ownership.\n+ ~ The ASF licenses this file to You under the Apache License, Version 2.0\n+ ~ (the \"License\"); you may not use this file except in compliance with\n+ ~ the License. You may obtain a copy of the License at\n+ ~\n+ ~ http://www.apache.org/licenses/LICENSE-2.0\n+ ~\n+ ~ Unless required by applicable law or agreed to in writing, software\n+ ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ ~ See the License for the specific language governing permissions and\n+ ~ limitations under the License.\n+ -->\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n<modelVersion>4.0.0</modelVersion>\n<parent>\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Fix license header formatting
49,736
22.01.2018 15:05:16
28,800
47973a9055976ae9a8a7b294e1868cadfedc50cc
Bugfix for fused ReLU-maxpooling and ReLU-maxpooling backward operators
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java", "diff": "@@ -164,10 +164,24 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n}\n}\n- private static boolean isInputReLU(Hop input) {\n- return HopRewriteUtils.isBinary(input, OpOp2.MAX)\n- && (HopRewriteUtils.isLiteralOfValue(input.getInput().get(0), 0)\n- || HopRewriteUtils.isLiteralOfValue(input.getInput().get(1), 0));\n+ /**\n+ * Returns parent matrix X or null\n+ * @param input input hop\n+ * @return either null or X if input is max(X,0) or max(0,X)\n+ */\n+ private static Hop isInputReLU(Hop input) {\n+ if(HopRewriteUtils.isBinary(input, OpOp2.MAX)) {\n+ if(HopRewriteUtils.isLiteralOfValue(input.getInput().get(0), 0)) {\n+ return input.getInput().get(1);\n+ }\n+ else if(HopRewriteUtils.isLiteralOfValue(input.getInput().get(1), 0)) {\n+ return input.getInput().get(0);\n+ }\n+ else\n+ return null;\n+ }\n+ else\n+ return null;\n}\nprivate static boolean isInputConv2d(Hop input) {\n@@ -228,12 +242,13 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n// RELU_MAX_POOLING and RELU_MAX_POOLING_BACKWARD is extremely useful for CP backend\n// by reducing unnecessary sparse-to-dense-to-sparse conversion.\n// For other backends, this operators is not necessary as it reduces an additional relu operator.\n- if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\n- lhsInputLop = inputs.get(0).getInput().get(0).constructLops();\n+ Hop parentReLU = isInputReLU(inputs.get(0));\n+ if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING && parentReLU != null) {\n+ lhsInputLop = parentReLU.constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n- else if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && isInputReLU(inputs.get(0))) {\n- lhsInputLop = inputs.get(0).getInput().get(0).constructLops();\n+ else if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && parentReLU != null) {\n+ lhsInputLop = parentReLU.constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING_BACKWARD;\n}\nelse if(OptimizerUtils.ALLOW_OPERATOR_FUSION && op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {\n@@ -651,11 +666,14 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n*\n* @throws DMLRuntimeException if error occurs\n*/\n- private void inferCHWPQFromParentOp() throws DMLRuntimeException {Hop tmp = getInput().get(0);\n- while(isInputReLU(tmp) || isInputBiasAdd(tmp)) {\n- // Skip ReLU and bias_add and go to its parent\n- tmp = tmp.getInput().get(0);\n- }\n+ private void inferCHWPQFromParentOp() throws DMLRuntimeException {\n+ Hop tmp = getInput().get(0);\n+ // Skip bias_add and go to its parent\n+ tmp = isInputBiasAdd(tmp) ? tmp.getInput().get(0) : tmp;\n+ Hop parentReLU = isInputReLU(tmp);\n+ // Skip ReLU and go to its parent\n+ tmp = (parentReLU != null) ? parentReLU : tmp;\n+\n// Cast tmp as parent\nConvolutionOp parentOp = (tmp instanceof ConvolutionOp) ? ((ConvolutionOp) tmp) : null;\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-540] Bugfix for fused ReLU-maxpooling and ReLU-maxpooling backward operators
49,738
24.01.2018 18:47:04
28,800
52aae222c688859de18d84fd92ae7171dec5dae0
Fix invalid constant folding of matrix-scalar and/or This patch fixes an issue of invalid constant folding of matrix-scalar logical operations, which occured due to the recently added support for elementwise logical operations.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteConstantFolding.java", "new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteConstantFolding.java", "diff": "@@ -268,14 +268,14 @@ public class RewriteConstantFolding extends HopRewriteRule\nprivate static boolean isApplicableFalseConjunctivePredicate( Hop hop ) throws HopsException {\nArrayList<Hop> in = hop.getInput();\n- return ( HopRewriteUtils.isBinary(hop, OpOp2.AND)\n+ return ( HopRewriteUtils.isBinary(hop, OpOp2.AND) && hop.getDataType().isScalar()\n&& ( (in.get(0) instanceof LiteralOp && !((LiteralOp)in.get(0)).getBooleanValue())\n||(in.get(1) instanceof LiteralOp && !((LiteralOp)in.get(1)).getBooleanValue())) );\n}\nprivate static boolean isApplicableTrueDisjunctivePredicate( Hop hop ) throws HopsException {\nArrayList<Hop> in = hop.getInput();\n- return ( HopRewriteUtils.isBinary(hop, OpOp2.OR)\n+ return ( HopRewriteUtils.isBinary(hop, OpOp2.OR) && hop.getDataType().isScalar()\n&& ( (in.get(0) instanceof LiteralOp && ((LiteralOp)in.get(0)).getBooleanValue())\n||(in.get(1) instanceof LiteralOp && ((LiteralOp)in.get(1)).getBooleanValue())) );\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2075] Fix invalid constant folding of matrix-scalar and/or This patch fixes an issue of invalid constant folding of matrix-scalar logical operations, which occured due to the recently added support for elementwise logical operations.
49,698
24.01.2018 19:08:48
28,800
240297bd5776458f5580d89e9df31c4d4f670803
New tests for matrix-scalar logical operations Closes
[ { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/matrix_full_other/FullLogicalMatrixTest.java", "diff": "@@ -35,7 +35,7 @@ import org.apache.sysml.test.utils.TestUtils;\n/**\n* The main purpose of this test is to verify various input combinations for\n- * matrix-matrix logical operations that internally translate to binary operations.\n+ * matrix-scalar logical operations that internally translate to binary operations.\n*\n*/\npublic class FullLogicalMatrixTest extends AutomatedTestBase\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/java/org/apache/sysml/test/integration/functions/binary/scalar/LogicalTest.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.binary.scalar;\n+\n+\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+import org.junit.Test;\n+\n+import java.util.HashMap;\n+\n+\n+public class LogicalTest extends AutomatedTestBase {\n+\n+ private final static String TEST_NAME1 = \"LogicalAndTest\";\n+ private final static String TEST_NAME2 = \"LogicalOrTest\";\n+ private final static String TEST_NAME3 = \"LogicalXorTest\";\n+\n+ private final static String TEST_DIR = \"functions/binary/scalar/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + LogicalTest.class.getSimpleName() + \"/\";\n+\n+ private final static int rows = 2100;\n+ private final static int cols = 70;\n+ private final static double sparsity1 = 0.1; //sparse\n+ private final static double sparsity2 = 0.9; //dense\n+ private final static double eps = 1e-10;\n+\n+ @Override\n+ public void setUp() {\n+ addTestConfiguration(TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"C\" }));\n+ addTestConfiguration(TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"C\" }));\n+ addTestConfiguration(TEST_NAME3, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME3, new String[] { \"C\" }));\n+ }\n+\n+ // And Tests\n+ @Test\n+ public void testAndDenseCP() {\n+ runLogical(TEST_NAME1, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testAndSparseCP() {\n+ runLogical(TEST_NAME1, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testAndDenseSP() {\n+ runLogical(TEST_NAME1, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testAndSparseSP() {\n+ runLogical(TEST_NAME1, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testAndDenseMR() {\n+ runLogical(TEST_NAME1, false, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testAndSparseMR() {\n+ runLogical(TEST_NAME1, true, ExecType.MR);\n+ }\n+\n+ //Or Tests\n+ @Test\n+ public void testOrDenseCP() {\n+ runLogical(TEST_NAME2, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOrSparseCP() {\n+ runLogical(TEST_NAME2, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testOrDenseSP() {\n+ runLogical(TEST_NAME2, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrSparseSP() {\n+ runLogical(TEST_NAME2, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testOrDenseMR() {\n+ runLogical(TEST_NAME2, false, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testOrSparseMR() {\n+ runLogical(TEST_NAME2, true, ExecType.MR);\n+ }\n+\n+ //XOR Tests\n+ @Test\n+ public void testXorDenseCP() {\n+ runLogical(TEST_NAME3, false, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testXorSparseCP() {\n+ runLogical(TEST_NAME3, true, ExecType.CP);\n+ }\n+\n+ @Test\n+ public void testXorDenseSP() {\n+ runLogical(TEST_NAME3, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testXorSparseSP() {\n+ runLogical(TEST_NAME3, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testXorDenseMR() {\n+ runLogical(TEST_NAME3, false, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testXorSparseMR() {\n+ runLogical(TEST_NAME3, true, ExecType.MR);\n+ }\n+\n+\n+ public void runLogical(String testname, boolean sparse, ExecType et) {\n+ //rtplatform for MR\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try {\n+ String TEST_NAME = testname;\n+ getAndLoadTestConfiguration(TEST_NAME);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + TEST_NAME + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-args\", input(\"A\"), output(\"C\")};\n+\n+ fullRScriptName = HOME + TEST_NAME + \".R\";\n+ rCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" + inputDir() + \" \" + expectedDir();\n+\n+ //get a random matrix of values with (-0.5, 0, 1)\n+ double[][] A = getRandomMatrix(rows, cols, -0.5, 1, sparse ? sparsity1:sparsity2, 1234);\n+ writeInputMatrixWithMTD(\"A\", A, true);\n+\n+ //run tests\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"C\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"C\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ }\n+ finally {\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ rtplatform = platformOld;\n+ }\n+ }\n+}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalAndTest.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# for matrix-scalar\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+\n+library(\"Matrix\")\n+\n+A <- readMM(paste(args[1], \"A.mtx\", sep=\"\"))\n+\n+n = 0;\n+C = A & n;\n+\n+writeMM(as(C, \"CsparseMatrix\"), paste(args[2], \"C\", sep=\"\"));\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalAndTest.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+n = 0;\n+\n+C = (A & n);\n+\n+write(C, $2);\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalOrTest.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+\n+library(\"Matrix\")\n+\n+A <- readMM(paste(args[1], \"A.mtx\", sep=\"\"))\n+n = 0;\n+\n+C = A | n;\n+\n+writeMM(as(C, \"CsparseMatrix\"), paste(args[2], \"C\", sep=\"\"));\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalOrTest.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+n = 0;\n+\n+C = (A | n);\n+\n+write(C, $2);\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalXorTest.R", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+options(digits=22)\n+\n+library(\"Matrix\")\n+\n+A <- readMM(paste(args[1], \"A.mtx\", sep=\"\"))\n+\n+n = 0;\n+\n+C = xor(A, n);\n+\n+writeMM(as(C, \"CsparseMatrix\"), paste(args[2], \"C\", sep=\"\"));\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/binary/scalar/LogicalXorTest.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+A = read($1);\n+n = 0;\n+\n+C = xor(A, n);\n+\n+write(C, $2);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-1931] New tests for matrix-scalar logical operations Closes #715.
49,738
24.01.2018 22:01:28
28,800
a3ab197686b96873b0a7686710e083906903aa69
Fix correctness sparse relu-backward CP operations This patch fixes an correctness issues of relu backward over sparse inputs that has been introduced by a recent refactoring and cleanup. In detail, the binary inplace multiply, was mistakenly replaced by a binary inplace plus.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRelu.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNNRelu.java", "diff": "@@ -79,7 +79,7 @@ public class LibMatrixDNNRelu\n}\nelse {\nscalarOperations(_params.input1, out, n, _rl, _ru, GT0); // (X > 0)\n- binaryOperationInPlacePlus(_params.input2, out, n, _rl, _ru); // (X > 0) * dout\n+ binaryOperationInPlaceMult(_params.input2, out, n, _rl, _ru); // (X > 0) * dout\n}\nreturn 0L;\n}\n@@ -113,24 +113,35 @@ public class LibMatrixDNNRelu\n}\n}\n- private static void binaryOperationInPlacePlus(MatrixBlock src,\n+ private static void binaryOperationInPlaceMult(MatrixBlock src,\nDenseBlock c, int destNumCols, int src_rl, int src_ru)\nthrows DMLRuntimeException\n{\n- if( src.isEmptyBlock(false) )\n- return; //do nothing (add 0);\n+ if( src.isEmptyBlock(false) ) {\n+ c.set(src_rl, src_rl, 0, destNumCols, 0);\n+ return;\n+ }\nif(src.isInSparseFormat()) {\nfor(int i = src_rl; i < src_ru; i++) {\n- if( src.getSparseBlock().isEmpty(i) ) continue;\n+ if( !src.getSparseBlock().isEmpty(i) ) {\nint apos = src.getSparseBlock().pos(i);\nint alen = src.getSparseBlock().size(i);\nint[] aix = src.getSparseBlock().indexes(i);\ndouble[] avals = src.getSparseBlock().values(i);\ndouble[] cvals = c.values(i);\nint cix = c.pos(i);\n- for(int j = apos; j < apos+alen; j++)\n- cvals[ cix+aix[j] ] += avals[j];\n+ int prevDestIndex = 0;\n+ for(int j = apos; j < apos+alen; j++) {\n+ c.set(i, i+1, prevDestIndex, aix[j], 0);\n+ prevDestIndex = aix[j]+1;\n+ cvals[ cix+aix[j] ] *= avals[j];\n+ }\n+ c.set(i, i+1, prevDestIndex, destNumCols, 0);\n+ }\n+ else {\n+ c.set(i, i+1, 0, destNumCols, 0);\n+ }\n}\n}\nelse { //DENSE\n@@ -139,7 +150,7 @@ public class LibMatrixDNNRelu\ndouble[] avals = a.values(i), cvals = c.values(i);\nint aix = a.pos(i), cix = c.pos(i);\nfor(int j=0; j<destNumCols; j++)\n- cvals[cix+j] += avals[aix+j];\n+ cvals[cix+j] *= avals[aix+j];\n}\n}\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2080] Fix correctness sparse relu-backward CP operations This patch fixes an correctness issues of relu backward over sparse inputs that has been introduced by a recent refactoring and cleanup. In detail, the binary inplace multiply, was mistakenly replaced by a binary inplace plus.
49,738
27.01.2018 22:50:41
28,800
2ef6342fa013a64cdc0a5d0aa667e60aeeecaa84
Extended spark/mr parfor result merge w/ accumulators This patch completes the runtime integration of parfor result merge with accumulators (for the += operator) by all distributed MR and Spark operations as well as related tests.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMerge.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMerge.java", "diff": "package org.apache.sysml.runtime.controlprogram.parfor;\n+import java.io.Serializable;\nimport java.util.List;\nimport org.apache.commons.logging.Log;\n@@ -37,8 +38,12 @@ import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n* These properties allow us to realize result merging in parallel without any synchronization.\n*\n*/\n-public abstract class ResultMerge\n+public abstract class ResultMerge implements Serializable\n{\n+ //note: this class needs to be serializable to ensure that all attributes of\n+ //ResultMergeRemoteSparkWCompare are included in the task closure\n+ private static final long serialVersionUID = 2620430969346516677L;\n+\nprotected static final Log LOG = LogFactory.getLog(ResultMerge.class.getName());\nprotected static final String NAME_SUFFIX = \"_rm\";\nprotected static final BinaryOperator PLUS = InstructionUtils.parseBinaryOperator(\"+\");\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalAutomatic.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalAutomatic.java", "diff": "@@ -29,6 +29,8 @@ import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\npublic class ResultMergeLocalAutomatic extends ResultMerge\n{\n+ private static final long serialVersionUID = 1600893100602101732L;\n+\nprivate ResultMerge _rm = null;\npublic ResultMergeLocalAutomatic( MatrixObject out, MatrixObject[] in, String outputFilename, boolean accum ) {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalFile.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalFile.java", "diff": "@@ -71,6 +71,8 @@ import org.apache.sysml.runtime.util.MapReduceTool;\n*/\npublic class ResultMergeLocalFile extends ResultMerge\n{\n+ private static final long serialVersionUID = -6905893742840020489L;\n+\n//NOTE: if we allow simple copies, this might result in a scattered file and many MR tasks for subsequent jobs\npublic static final boolean ALLOW_COPY_CELLFILES = false;\n@@ -687,7 +689,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -709,7 +711,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && _isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -779,7 +781,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse and exam sparsity due to append-only\n- if( appendOnly )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -801,7 +803,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -903,7 +905,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && _isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -925,7 +927,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && _isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalMemory.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalMemory.java", "diff": "@@ -42,6 +42,8 @@ import org.apache.sysml.runtime.util.DataConverter;\n*/\npublic class ResultMergeLocalMemory extends ResultMerge\n{\n+ private static final long serialVersionUID = -3543612508601511701L;\n+\n//internal comparison matrix\nprivate DenseBlock _compare = null;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteMR.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteMR.java", "diff": "@@ -59,6 +59,7 @@ import org.apache.sysml.utils.Statistics;\n*/\npublic class ResultMergeRemoteMR extends ResultMerge\n{\n+ private static final long serialVersionUID = 575681838941682037L;\npublic static final byte COMPARE_TAG = 'c';\npublic static final byte DATA_TAG = 'd';\n@@ -185,11 +186,12 @@ public class ResultMergeRemoteMR extends ResultMerge\nif( withCompare ) {\nFileSystem fs = IOUtilFunctions.getFileSystem(pathNew, job);\npathCompare = new Path(fname).makeQualified(fs);\n- MRJobConfiguration.setResultMergeInfo(job, pathCompare.toString(), ii,\n+ MRJobConfiguration.setResultMergeInfo(job, pathCompare.toString(), _isAccum, ii,\nLocalFileUtils.getWorkingDir(LocalFileUtils.CATEGORY_RESULTMERGE), rlen, clen, brlen, bclen);\n}\nelse\n- MRJobConfiguration.setResultMergeInfo(job, \"null\", ii, LocalFileUtils.getWorkingDir(LocalFileUtils.CATEGORY_RESULTMERGE), rlen, clen, bclen, bclen);\n+ MRJobConfiguration.setResultMergeInfo(job, \"null\", _isAccum, ii,\n+ LocalFileUtils.getWorkingDir(LocalFileUtils.CATEGORY_RESULTMERGE), rlen, clen, bclen, bclen);\n//set mappers, reducers, combiners\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteReducer.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteReducer.java", "diff": "@@ -74,18 +74,17 @@ public class ResultMergeRemoteReducer\nString compareFname = MRJobConfiguration.getResultMergeInfoCompareFilename(job);\n//determine compare required\n- boolean requiresCompare = false;\n- if( !compareFname.equals(\"null\") )\n- requiresCompare = true;\n+ boolean requiresCompare = !compareFname.equals(\"null\");\n+ boolean isAccum = MRJobConfiguration.getResultMergeInfoAccumulator(job);\nif( ii == InputInfo.TextCellInputInfo )\n_reducer = new ResultMergeReducerTextCell(requiresCompare);\nelse if( ii == InputInfo.BinaryCellInputInfo )\n_reducer = new ResultMergeReducerBinaryCell(requiresCompare);\nelse if( ii == InputInfo.BinaryBlockInputInfo )\n- _reducer = new ResultMergeReducerBinaryBlock(requiresCompare, job);\n+ _reducer = new ResultMergeReducerBinaryBlock(requiresCompare, isAccum, job);\nelse\n- throw new RuntimeException(\"Unable to configure mapper with unknown input info: \"+ii.toString());\n+ throw new RuntimeException(\"Unable to configure mapper with unknown input info: \"+ii.toString()+\" \"+isAccum);\n}\n@Override\n@@ -266,12 +265,15 @@ public class ResultMergeRemoteReducer\nprivate static class ResultMergeReducerBinaryBlock extends ResultMerge implements ResultMergeReducer\n{\n+ private static final long serialVersionUID = 84399890805869855L;\n+\nprivate boolean _requiresCompare;\nprivate JobConf _job = null;\n- public ResultMergeReducerBinaryBlock(boolean requiresCompare, JobConf job) {\n+ public ResultMergeReducerBinaryBlock(boolean requiresCompare, boolean isAccum, JobConf job) {\n_requiresCompare = requiresCompare;\n_job = job;\n+ _isAccum = isAccum;\n}\n@Override\n@@ -323,7 +325,7 @@ public class ResultMergeRemoteReducer\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && !_isAccum )\nmbOut.sortSparseRows();\n//change sparsity if required after\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteSpark.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteSpark.java", "diff": "@@ -43,12 +43,9 @@ import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.utils.Statistics;\n-/**\n- * MR job class for submitting parfor result merge MR jobs.\n- *\n- */\npublic class ResultMergeRemoteSpark extends ResultMerge\n{\n+ private static final long serialVersionUID = -6924566953903424820L;\nprivate ExecutionContext _ec = null;\nprivate int _numMappers = -1;\n@@ -162,13 +159,12 @@ public class ResultMergeRemoteSpark extends ResultMerge\n//Step 2a: merge with compare\nJavaPairRDD<MatrixIndexes, MatrixBlock> out = null;\n- if( withCompare )\n- {\n+ if( withCompare ) {\nJavaPairRDD<MatrixIndexes, MatrixBlock> compareRdd = (JavaPairRDD<MatrixIndexes, MatrixBlock>)\nsec.getRDDHandleForMatrixObject(compare, InputInfo.BinaryBlockInputInfo);\n//merge values which differ from compare values\n- ResultMergeRemoteSparkWCompare cfun = new ResultMergeRemoteSparkWCompare();\n+ ResultMergeRemoteSparkWCompare cfun = new ResultMergeRemoteSparkWCompare(_isAccum);\nout = rdd.groupByKey(numRed) //group all result blocks per key\n.join(compareRdd) //join compare block and result blocks\n.mapToPair(cfun); //merge result blocks w/ compare\n@@ -176,7 +172,9 @@ public class ResultMergeRemoteSpark extends ResultMerge\n//Step 2b: merge without compare\nelse {\n//direct merge in any order (disjointness guaranteed)\n- out = RDDAggregateUtils.mergeByKey(rdd, false);\n+ out = _isAccum ?\n+ RDDAggregateUtils.sumByKeyStable(rdd, false) :\n+ RDDAggregateUtils.mergeByKey(rdd, false);\n}\n//Step 3: create output rdd handle w/ lineage\n@@ -203,9 +201,7 @@ public class ResultMergeRemoteSpark extends ResultMerge\nprivate static int determineNumReducers(long rlen, long clen, int brlen, int bclen, long numRed) {\n//set the number of mappers and reducers\nlong reducerGroups = Math.max(rlen/brlen,1) * Math.max(clen/bclen, 1);\n- int ret = (int)Math.min( numRed, reducerGroups );\n-\n- return ret;\n+ return (int)Math.min( numRed, reducerGroups );\n}\n@SuppressWarnings(\"unchecked\")\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteSparkWCompare.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeRemoteSparkWCompare.java", "diff": "@@ -36,6 +36,10 @@ public class ResultMergeRemoteSparkWCompare extends ResultMerge implements PairF\n{\nprivate static final long serialVersionUID = -5970805069405942836L;\n+ public ResultMergeRemoteSparkWCompare(boolean accum) {\n+ _isAccum = accum;\n+ }\n+\n@Override\npublic Tuple2<MatrixIndexes, MatrixBlock> call(Tuple2<MatrixIndexes, Tuple2<Iterable<MatrixBlock>, MatrixBlock>> arg)\nthrows Exception\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/MRJobConfiguration.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/mapred/MRJobConfiguration.java", "diff": "@@ -160,6 +160,7 @@ public class MRJobConfiguration\n//result merge info\nprivate static final String RESULTMERGE_INPUT_INFO_CONFIG=\"resultmerge.input.inputinfo\";\nprivate static final String RESULTMERGE_COMPARE_FILENAME_CONFIG=\"resultmerge.compare.filename\";\n+ private static final String RESULTMERGE_ACCUMULATOR_CONFIG=\"resultmerge.accumulator\";\nprivate static final String RESULTMERGE_STAGING_DIR_CONFIG=\"resultmerge.staging.dir\";\nprivate static final String RESULTMERGE_MATRIX_NUM_ROW_CONFIG=\"resultmerge.matrix.num.row\";\nprivate static final String RESULTMERGE_MATRIX_NUM_COLUMN_CONFIG=\"resultmerge.matrix.num.column\";\n@@ -632,10 +633,11 @@ public class MRJobConfiguration\nreturn job.getBoolean(PARTITIONING_TRANSPOSE_COL_CONFIG, false);\n}\n- public static void setResultMergeInfo( JobConf job, String fnameNew, InputInfo ii, String stagingDir, long rlen, long clen, int brlen, int bclen )\n+ public static void setResultMergeInfo( JobConf job, String fnameNew, boolean accum, InputInfo ii, String stagingDir, long rlen, long clen, int brlen, int bclen )\nthrows DMLRuntimeException\n{\njob.set(RESULTMERGE_COMPARE_FILENAME_CONFIG, fnameNew);\n+ job.set(RESULTMERGE_ACCUMULATOR_CONFIG, String.valueOf(accum));\njob.set(RESULTMERGE_INPUT_INFO_CONFIG, InputInfo.inputInfoToString(ii));\njob.set(RESULTMERGE_STAGING_DIR_CONFIG, stagingDir);\njob.set(RESULTMERGE_MATRIX_NUM_ROW_CONFIG, String.valueOf(rlen));\n@@ -644,11 +646,14 @@ public class MRJobConfiguration\njob.set(RESULTMERGE_BLOCK_NUM_COLUMN_CONFIG, String.valueOf(bclen));\n}\n- public static String getResultMergeInfoCompareFilename( JobConf job )\n- {\n+ public static String getResultMergeInfoCompareFilename( JobConf job ) {\nreturn job.get(RESULTMERGE_COMPARE_FILENAME_CONFIG);\n}\n+ public static boolean getResultMergeInfoAccumulator( JobConf job ) {\n+ return Boolean.parseBoolean(job.get(RESULTMERGE_ACCUMULATOR_CONFIG));\n+ }\n+\npublic static InputInfo getResultMergeInputInfo( JobConf job )\n{\nreturn InputInfo.stringToInputInfo( job.get(RESULTMERGE_INPUT_INFO_CONFIG) );\n" }, { "change_type": "MODIFY", "old_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForAccumulatorResultMergeTest.java", "new_path": "src/test/java/org/apache/sysml/test/integration/functions/parfor/ParForAccumulatorResultMergeTest.java", "diff": "@@ -71,6 +71,46 @@ public class ParForAccumulatorResultMergeTest extends AutomatedTestBase\nrunParForAccumulatorResultMergeTest(TEST_NAME1, true, true, ExecType.CP);\n}\n+ @Test\n+ public void testParForAccumulatorRemoteEmptyDenseMR() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME2, false, false, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteEmptySparseMR() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME2, false, true, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteInitDenseMR() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME2, true, false, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteInitSparseMR() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME2, true, true, ExecType.MR);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteEmptyDenseSP() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME3, false, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteEmptySparseSP() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME3, false, true, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteInitDenseSP() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME3, true, false, ExecType.SPARK);\n+ }\n+\n+ @Test\n+ public void testParForAccumulatorRemoteInitSparseSP() {\n+ runParForAccumulatorResultMergeTest(TEST_NAME3, true, true, ExecType.SPARK);\n+ }\n+\nprivate void runParForAccumulatorResultMergeTest( String test, boolean init, boolean sparse, ExecType et )\n{\nRUNTIME_PLATFORM platformOld = rtplatform;\n@@ -96,8 +136,7 @@ public class ParForAccumulatorResultMergeTest extends AutomatedTestBase\nprogramArgs = new String[]{\"-args\",\nString.valueOf(rows), String.valueOf(cols), String.valueOf(init).toUpperCase(),\nString.valueOf(sparse).toUpperCase(), output(\"R\") };\n-\n- fullRScriptName = HOME + TEST_NAME + \".R\";\n+ fullRScriptName = HOME + TEST_NAME.substring(0, TEST_NAME.length()-1) + \".R\";\nrCmd = \"Rscript\" + \" \" + fullRScriptName + \" \" +\nString.valueOf(rows) + \" \" + String.valueOf(cols) + \" \" + String.valueOf(init).toUpperCase()\n+ \" \" + String.valueOf(sparse).toUpperCase() + \" \" + expectedDir();\n" }, { "change_type": "RENAME", "old_path": "src/test/scripts/functions/parfor/parfor_accumulator1.R", "new_path": "src/test/scripts/functions/parfor/parfor_accumulator.R", "diff": "" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor_accumulator2.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+rlen = $1;\n+clen = $2;\n+init = $3\n+sparse = $4;\n+\n+R = matrix(ifelse(init, 7, 0), rlen, clen);\n+if( sparse )\n+ R[,50:300] = matrix(0, rlen, 251);\n+\n+parfor(i in 1:10, opt=CONSTRAINED, resultmerge=REMOTE_MR)\n+ R += matrix(i, rlen, clen);\n+\n+write(R, $5);\n" }, { "change_type": "ADD", "old_path": null, "new_path": "src/test/scripts/functions/parfor/parfor_accumulator3.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+rlen = $1;\n+clen = $2;\n+init = $3\n+sparse = $4;\n+\n+R = matrix(ifelse(init, 7, 0), rlen, clen);\n+if( sparse )\n+ R[,50:300] = matrix(0, rlen, 251);\n+\n+parfor(i in 1:10, opt=CONSTRAINED, resultmerge=REMOTE_SPARK)\n+ R += matrix(i, rlen, clen);\n+\n+write(R, $5);\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2096] Extended spark/mr parfor result merge w/ accumulators This patch completes the runtime integration of parfor result merge with accumulators (for the += operator) by all distributed MR and Spark operations as well as related tests.
49,738
27.01.2018 23:15:14
28,800
00d72a092ebf3abefd006b83275c8288f06afa12
Minor simplifications of nn library w/ += and ifelse
[ { "change_type": "MODIFY", "old_path": "scripts/nn/layers/conv2d.dml", "new_path": "scripts/nn/layers/conv2d.dml", "diff": "@@ -126,14 +126,10 @@ backward = function(matrix[double] dout, int Hout, int Wout,\nN = nrow(X)\nF = nrow(W)\n- # Create gradient volumes\n- # Note: Create convenience gradient volumes for dW and db that will\n- # allow for one gradient to be stored per example, allowing for\n- # parallel computation at the expense of memory. We will reduce at\n- # the end.\n+ # Create output gradient volumes\ndX = matrix(0, rows=N, cols=C*Hin*Win)\n- dWN = matrix(0, rows=N, cols=F*C*Hf*Wf) # dW = matrix(0, rows=F, cols=C*Hf*Wf)\n- dbN = matrix(0, rows=N, cols=F) # db = matrix(0, rows=F, cols=1)\n+ dW = matrix(0, rows=F, cols=C*Hf*Wf)\n+ db = matrix(0, rows=F, cols=1)\n# Partial derivatives for convolution - im2col implementation\nparfor (n in 1:N) { # all examples\n@@ -143,12 +139,10 @@ backward = function(matrix[double] dout, int Hout, int Wout,\nXn = matrix(X[n,], rows=C, cols=Hin*Win) # reshape\nXn_padded = util::pad_image(Xn, Hin, Win, padh, padw, 0) # shape (C, (Hin+2*padh)*(Win+2*padw))\nXn_padded_cols = util::im2col(Xn_padded, Hin+2*padh, Win+2*padw, Hf, Wf, strideh, stridew)\n- # dW = dW + doutn %*% t(Xn_padded_cols)\n- dWN[n,] = matrix(doutn %*% t(Xn_padded_cols), rows=1, cols=F*C*Hf*Wf)\n+ dW += doutn %*% t(Xn_padded_cols)\n# Compute db\n- # db = db + rowSums(doutn)\n- dbN[n,] = matrix(rowSums(doutn), rows=1, cols=F)\n+ db += rowSums(doutn)\n# Compute dX\ndXn_padded_cols = t(W) %*% doutn # shape (C*Hf*Wf, Hout*Wout)\n@@ -157,11 +151,6 @@ backward = function(matrix[double] dout, int Hout, int Wout,\ndXn = util::unpad_image(dXn_padded, Hin, Win, padh, padw)\ndX[n,] = matrix(dXn, rows=1, cols=C*Hin*Win) # reshape\n}\n-\n- # Reduce convenience gradient volumes with one gradient per example\n- # into single gradients for W and b.\n- dW = matrix(colSums(dWN), rows=F, cols=C*Hf*Wf)\n- db = matrix(colSums(dbN), rows=F, cols=1)\n}\ninit = function(int F, int C, int Hf, int Wf)\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/layers/lstm.dml", "new_path": "scripts/nn/layers/lstm.dml", "diff": "@@ -79,12 +79,8 @@ forward = function(matrix[double] X, matrix[double] W, matrix[double] b, int T,\nout_prev = out0\nc_prev = c0\nc = c_prev\n- if (return_sequences) {\n- out = matrix(0, rows=N, cols=T*M)\n- }\n- else {\n- out = matrix(0, rows=N, cols=M)\n- }\n+ out = matrix(0, rows=N, cols=ifelse(return_sequences,T*M, M))\n+\n# caches to be used during the backward pass for performance\ncache_out = matrix(0, rows=T, cols=N*M)\ncache_c = matrix(0, rows=T, cols=N*M)\n" }, { "change_type": "MODIFY", "old_path": "scripts/nn/layers/rnn.dml", "new_path": "scripts/nn/layers/rnn.dml", "diff": "@@ -146,8 +146,8 @@ backward = function(matrix[double] dout, matrix[double] X, matrix[double] W, mat\n}\ninput = cbind(X_t, out_prev) # shape (N, D+M)\ndout_t_raw = (1-out_t^2) * dout_t # into tanh, shape (N, M)\n- dW = dW + t(input) %*% dout_t_raw # shape (D+M, M)\n- db = db + colSums(dout_t_raw) # shape (1, M)\n+ dW += t(input) %*% dout_t_raw # shape (D+M, M)\n+ db += colSums(dout_t_raw) # shape (1, M)\ndinput = dout_t_raw %*% t(W) # shape (N, D+M)\ndX[,(t-1)*D+1:t*D] = dinput[,1:D]\ndout_prev = dinput[,D+1:D+M] # shape (N, M)\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2097] Minor simplifications of nn library w/ += and ifelse
49,738
28.01.2018 01:15:46
28,800
c7604ea26de0705655310adaa3b40d320d389bed
[MINOR] Performance binary inplace ops for result merge, cleanups
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalFile.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalFile.java", "diff": "@@ -711,7 +711,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly && _isAccum )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -905,7 +905,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly && _isAccum )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n@@ -927,7 +927,7 @@ public class ResultMergeLocalFile extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly && _isAccum )\n+ if( appendOnly && !_isAccum )\nmb.sortSparseRows();\n//change sparsity if required after\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalMemory.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/ResultMergeLocalMemory.java", "diff": "@@ -111,7 +111,7 @@ public class ResultMergeLocalMemory extends ResultMerge\n}\n//sort sparse due to append-only\n- if( appendOnly )\n+ if( appendOnly && !_isAccum )\noutMBNew.sortSparseRows();\n//change sparsity if required after\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -1084,62 +1084,58 @@ public class LibMatrixBincell\nreturn;\n}\n- int rlen = m1ret.rlen;\n- int clen = m1ret.clen;\n-\nif(m1ret.sparse && m2.sparse)\n+ safeBinaryInPlaceSparse(m1ret, m2, op);\n+ else if(!m1ret.sparse && !m2.sparse)\n+ safeBinaryInPlaceDense(m1ret, m2, op);\n+ else //GENERIC\n+ safeBinaryInPlaceGeneric(m1ret, m2, op);\n+ }\n+\n+ private static void safeBinaryInPlaceSparse(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op)\n+ throws DMLRuntimeException\n{\nif(m1ret.sparseBlock!=null)\nm1ret.allocateSparseRowsBlock(false);\nif(m2.sparseBlock!=null)\nm2.allocateSparseRowsBlock(false);\n-\nSparseBlock c = m1ret.sparseBlock;\nSparseBlock b = m2.sparseBlock;\n+ final int rlen = m1ret.rlen;\n+ final int clen = m1ret.clen;\n+\nif( c!=null && b!=null )\n{\nfor(int r=0; r<rlen; r++)\n{\n- if(c.isEmpty(r) && b.isEmpty(r))\n- continue;\n+ if(c.isEmpty(r) && b.isEmpty(r)) continue;\n- if( b.isEmpty(r) )\n- {\n+ if( b.isEmpty(r) ) {\nint apos = c.pos(r);\nint alen = c.size(r);\ndouble[] values=c.values(r);\nfor(int i=apos; i<apos+alen; i++)\nvalues[i]=op.fn.execute(values[i], 0);\n- }else\n- {\n- int estimateSize=0;\n- if( !c.isEmpty(r) )\n- estimateSize+=c.size(r);\n- if( !b.isEmpty(r))\n- estimateSize+=b.size(r);\n- estimateSize=Math.min(clen, estimateSize);\n-\n- //temp\n+ }\n+ else {\n+ int estimateSize = Math.min(clen, (!c.isEmpty(r) ?\n+ c.size(r) : 0) + (!b.isEmpty(r) ? b.size(r) : 0));\nSparseRow thisRow = c.get(r);\nc.set(r, new SparseRowVector(estimateSize, clen), false);\n- if(thisRow!=null)\n- {\n+ if(thisRow!=null) {\nm1ret.nonZeros-=thisRow.size();\nmergeForSparseBinary(op, thisRow.values(), thisRow.indexes(), 0,\nthisRow.size(), b.values(r), b.indexes(r), b.pos(r), b.size(r), r, m1ret);\n-\n}\n- else\n- {\n+ else {\nappendRightForSparseBinary(op, b.values(r), b.indexes(r), b.pos(r), b.size(r), 0, r, m1ret);\n}\n}\n}\n}\n- else if(m1ret.sparseBlock==null)\n- {\n+ else if( c == null ) {\nm1ret.sparseBlock = SparseBlockFactory.createSparseBlock(rlen);\nfor(int r=0; r<rlen; r++) {\nif( !b.isEmpty(r) ) {\n@@ -1169,9 +1165,49 @@ public class LibMatrixBincell\n}\n}\n}\n+\n+ m1ret.recomputeNonZeros();\n}\n- else //one side dense\n+\n+ private static void safeBinaryInPlaceDense(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op)\n+ throws DMLRuntimeException\n{\n+ //prepare outputs\n+ m1ret.allocateDenseBlock();\n+ DenseBlock a = m1ret.getDenseBlock();\n+ DenseBlock b = m2.getDenseBlock();\n+ final int rlen = m1ret.rlen;\n+ final int clen = m1ret.clen;\n+\n+ long lnnz = 0;\n+ if( m2.isEmptyBlock(false) ) {\n+ for(int r=0; r<rlen; r++) {\n+ double[] avals = a.values(r);\n+ for(int c=0, ix=a.pos(r); c<clen; c++, ix++) {\n+ double tmp = op.fn.execute(avals[ix], 0);\n+ lnnz += (avals[ix] = tmp) != 0 ? 1: 0;\n+ }\n+ }\n+ }\n+ else {\n+ for(int r=0; r<rlen; r++) {\n+ double[] avals = a.values(r), bvals = b.values(r);\n+ for(int c=0, ix=a.pos(r); c<clen; c++, ix++) {\n+ double tmp = op.fn.execute(avals[ix], bvals[ix]);\n+ lnnz += (avals[ix] = tmp) != 0 ? 1 : 0;\n+ }\n+ }\n+ }\n+\n+ m1ret.setNonZeros(lnnz);\n+ }\n+\n+ private static void safeBinaryInPlaceGeneric(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op)\n+ throws DMLRuntimeException\n+ {\n+ final int rlen = m1ret.rlen;\n+ final int clen = m1ret.clen;\n+\nfor(int r=0; r<rlen; r++)\nfor(int c=0; c<clen; c++) {\ndouble thisvalue = m1ret.quickGetValue(r, c);\n@@ -1180,7 +1216,6 @@ public class LibMatrixBincell\nm1ret.quickSetValue(r, c, resultvalue);\n}\n}\n- }\nprivate static void unsafeBinaryInPlace(MatrixBlock m1ret, MatrixBlock m2, BinaryOperator op) throws DMLRuntimeException\n{\n" } ]
Java
Apache License 2.0
apache/systemds
[MINOR] Performance binary inplace ops for result merge, cleanups
49,738
28.01.2018 13:59:06
28,800
864bfc9123f43856fdcd15cae5aa9c4435f5bf1f
[HOTFIX][SYSTEMML-2095] Fix runtime integration parfor result variables This patch fixes the recent runtime integration of extended parfor result variables (from variable names to meta data handles), which led to wrong parfor optimizer decisions resulting in, for example, OOMs on sparse use cases.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "new_path": "src/main/java/org/apache/sysml/parser/ParForStatementBlock.java", "diff": "@@ -21,6 +21,7 @@ package org.apache.sysml.parser;\nimport java.io.IOException;\nimport java.util.ArrayList;\n+import java.util.Collection;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.LinkedList;\n@@ -1812,9 +1813,9 @@ public class ParForStatementBlock extends ForStatementBlock\n}\n@Override\npublic boolean equals(Object that) {\n- if( !(that instanceof ResultVar) )\n- return false;\n- return _name.equals(((ResultVar)that)._name);\n+ String varname = (that instanceof ResultVar) ?\n+ ((ResultVar)that)._name : that.toString();\n+ return _name.equals(varname);\n}\n@Override\npublic int hashCode() {\n@@ -1824,6 +1825,11 @@ public class ParForStatementBlock extends ForStatementBlock\npublic String toString() {\nreturn _name;\n}\n+ public static boolean contains(Collection<ResultVar> list, String varName) {\n+ //helper function which is necessary because list.contains checks\n+ //varName.equals(rvar) which always returns false because it not a string\n+ return list.stream().anyMatch(rvar -> rvar.equals(varName));\n+ }\n}\nprivate static class Candidate {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -657,7 +657,7 @@ public class OptimizerRuleBased extends Optimizer\nbase = h.getInput().get(0);\n//check result variable\n- if( !resultVars.contains(base.getName()) )\n+ if( !ResultVar.contains(resultVars, base.getName()) )\nret = false;\n}\n@@ -1734,7 +1734,7 @@ public class OptimizerRuleBased extends Optimizer\n}\nelse if( n.getNodeType()== NodeType.HOP) {\nHop h = OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\n- if( h instanceof LeftIndexingOp && retVars.contains( h.getInput().get(0).getName() ) )\n+ if( h instanceof LeftIndexingOp && ResultVar.contains(retVars, h.getInput().get(0).getName() ) )\nret &= (h.getParent().size()==1\n&& h.getParent().get(0).getName().equals(h.getInput().get(0).getName()));\n}\n@@ -1811,7 +1811,7 @@ public class OptimizerRuleBased extends Optimizer\nif( h.getInput() != null )\nfor( Hop cn : h.getInput() )\nif( cn instanceof DataOp && ((DataOp)cn).isRead() //read data\n- && !inplaceResultVars.contains(cn.getName())) //except in-place result vars\n+ && !ResultVar.contains(inplaceResultVars, cn.getName())) //except in-place result vars\nsum += cn.getMemEstimate();\n}\n}\n@@ -1868,7 +1868,6 @@ public class OptimizerRuleBased extends Optimizer\nif( ch instanceof DataOp && ch.getDataType() == DataType.MATRIX\n&& inputVars.contains(ch.getName()) )\n- //&& !partitionedVars.contains(ch.getName()))\n{\nret = true;\nsharedVars.add(ch.getName());\n@@ -1876,7 +1875,6 @@ public class OptimizerRuleBased extends Optimizer\nelse if( HopRewriteUtils.isTransposeOperation(ch)\n&& ch.getInput().get(0) instanceof DataOp && ch.getInput().get(0).getDataType() == DataType.MATRIX\n&& inputVars.contains(ch.getInput().get(0).getName()) )\n- //&& !partitionedVars.contains(ch.getInput().get(0).getName()))\n{\nret = true;\nsharedVars.add(ch.getInput().get(0).getName());\n@@ -2278,7 +2276,7 @@ public class OptimizerRuleBased extends Optimizer\nLeftIndexingOp hop = (LeftIndexingOp) OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\n//check agains set of varname\nString varName = hop.getInput().get(0).getName();\n- if( resultVars.contains(varName) )\n+ if( ResultVar.contains(resultVars, varName) )\n{\nret = true;\nif( checkSize && vars.keySet().contains(varName) )\n@@ -2380,7 +2378,7 @@ public class OptimizerRuleBased extends Optimizer\nLeftIndexingOp hop = (LeftIndexingOp) OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\n//check agains set of varname\nString varName = hop.getInput().get(0).getName();\n- if( resultVars.contains(varName) && vars.keySet().contains(varName) )\n+ if( ResultVar.contains(resultVars, varName) && vars.keySet().contains(varName) )\n{\n//dims of result vars must be known at this point in time\nMatrixObject mo = (MatrixObject) vars.get( hop.getInput().get(0).getName() );\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX][SYSTEMML-2095] Fix runtime integration parfor result variables This patch fixes the recent runtime integration of extended parfor result variables (from variable names to meta data handles), which led to wrong parfor optimizer decisions resulting in, for example, OOMs on sparse use cases.
49,738
28.01.2018 15:57:13
28,800
e63eb2e7a3dc7100dc400cbdad6af26e67e29cad
Fix robustness CSR resizing w/ initial size zero This patch fixes a special cases of CSR resizing where the initial capacity was set to zero, which occurred during sparse result merge of an initially empty update in place variable.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCSR.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/SparseBlockCSR.java", "diff": "@@ -847,12 +847,11 @@ public class SparseBlockCSR extends SparseBlock\nprivate int newCapacity(int minsize) {\n//compute new size until minsize reached\n- double tmpCap = _values.length;\n+ double tmpCap = Math.max(_values.length, 1);\nwhile( tmpCap < minsize ) {\ntmpCap *= (tmpCap <= 1024) ?\nRESIZE_FACTOR1 : RESIZE_FACTOR2;\n}\n-\nreturn (int)Math.min(tmpCap, Integer.MAX_VALUE);\n}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2098] Fix robustness CSR resizing w/ initial size zero This patch fixes a special cases of CSR resizing where the initial capacity was set to zero, which occurred during sparse result merge of an initially empty update in place variable.
49,698
29.01.2018 12:11:00
28,800
87d7fee7321a70729f4272fa68f108e722ec6221
Covariance Kernels Closes
[ { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/gaussian_process/covariance.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+cov = function(matrix[double] X)\n+ return(matrix[double] out) {\n+\n+ K = -2 * X %*% t(X) + rowSums(X^2) + t( rowSums(X^2) )\n+ out = exp(- 0.5 * K);\n+\n+}\n+\n+/*\n+# for each dimension\n+for( di in 1:d) {\n+\n+ Xd = X[1:n,di] %*% matrix(1, rows=1, cols=n);\n+\n+ diff = Xd - t(Xd)\n+\n+}\n+*/\n" }, { "change_type": "ADD", "old_path": null, "new_path": "scripts/staging/gaussian_process/test/covariance.dml", "diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# imports\n+source(\"staging/gaussian_process/covariance.dml\") as covariance\n+\n+X = matrix(\"1 2 3\n+ 4 5 6\n+ 7 8 9\", rows=3, cols=3);\n+\n+# ability to give cholesky factorization, tests for the positive\n+# definiteness of the covariance matrix.\n+tmp = covariance::cov(X);\n+\n+for(ri in 1:nrow(X)) {\n+ for(ci in 1:ncol(X)) {\n+ print(as.scalar(tmp[ri, ci]) )\n+ }\n+}\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2004] Covariance Kernels Closes #719.
49,736
29.01.2018 14:56:42
28,800
9b270d61a16cab35b4cb66bbae36f09c5d738289
Added two-step strategy to deal with potential fragmentation on GPU Closes
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -292,6 +292,9 @@ public class GPUContext {\npublic Pointer allocate(String instructionName, long size, int statsCount) throws DMLRuntimeException {\nlong t0 = 0, t1 = 0, end = 0;\nPointer A;\n+ if(size < 0) {\n+ throw new DMLRuntimeException(\"Cannot allocate memory of size \" + size);\n+ }\nif (freeCUDASpaceMap.containsKey(size)) {\nif (LOG.isTraceEnabled()) {\nLOG.trace(\n@@ -321,7 +324,41 @@ public class GPUContext {\nt0 = System.nanoTime();\nensureFreeSpace(instructionName, size);\nA = new Pointer();\n+ try {\n+ cudaMalloc(A, size);\n+ } catch(jcuda.CudaException e) {\n+ if(!DMLScript.EAGER_CUDA_FREE) {\n+ // Strategy to avoid memory allocation due to potential fragmentation (a rare event):\n+ // Step 1. First clear up lazy matrices and try cudaMalloc again.\n+ // Step 2. Even if the issue persists, then evict all the allocated GPU objects and and try cudaMalloc again.\n+ // After Step 2, SystemML will hold no pointers on GPU and the hope is that cudaMalloc will start afresh\n+ // by allocating objects sequentially with no holes.\n+\n+ // Step 1:\n+ LOG.debug(\"Eagerly deallocating rmvar-ed matrices to avoid memory allocation error due to potential fragmentation.\");\n+ clearFreeCUDASpaceMap(instructionName, -1);\n+ try {\ncudaMalloc(A, size);\n+ } catch(jcuda.CudaException e1) {\n+ // Step 2:\n+ GPUStatistics.cudaForcedClearUnpinnedMatCount.add(1);\n+ LOG.warn(\"Eagerly deallocating unpinned matrices to avoid memory allocation error due to potential fragmentation. \"\n+ + \"If you see this warning often, we recommend that you set systemml.gpu.eager.cudaFree configuration property to true\");\n+ for(GPUObject toBeRemoved : allocatedGPUObjects) {\n+ if (!toBeRemoved.isLocked()) {\n+ if (toBeRemoved.dirty) {\n+ toBeRemoved.copyFromDeviceToHost(instructionName, true);\n+ }\n+ toBeRemoved.clearData(true);\n+ }\n+ }\n+ cudaMalloc(A, size);\n+ }\n+ }\n+ else {\n+ throw new DMLRuntimeException(\"Unable to allocate memory of size \" + size + \" using cudaMalloc\", e);\n+ }\n+ }\nif (DMLScript.STATISTICS)\nGPUStatistics.cudaAllocTime.add(System.nanoTime() - t0);\nif (DMLScript.STATISTICS)\n@@ -466,30 +503,24 @@ public class GPUContext {\n}\n/**\n- * Memory on the GPU is tried to be freed up until either a chunk of needed size is freed up\n- * or it fails.\n- * First the set of reusable blocks is freed up. If that isn't enough, the set of allocated matrix\n- * blocks with zero locks on them is freed up.\n- * The process cycles through the sorted list of allocated {@link GPUObject} instances. Sorting is based on\n- * number of (read) locks that have been obtained on it (reverse order). It repeatedly frees up\n- * blocks on which there are zero locks until the required size has been freed up.\n- * // TODO: update it with hybrid policy\n+ * Release the set of free blocks maintained in a GPUObject.freeCUDASpaceMap to free up space\n*\n* @param instructionName name of the instruction for which performance measurements are made\n- * @param neededSize desired size to be freed up on the GPU\n+ * @param neededSize desired size to be freed up on the GPU (-1 if we want to eagerly free up all the blocks)\n* @throws DMLRuntimeException If no reusable memory blocks to free up or if not enough matrix blocks with zero locks on them.\n*/\n- protected void evict(String instructionName, final long neededSize) throws DMLRuntimeException {\n- if (LOG.isTraceEnabled()) {\n- LOG.trace(\"GPU : evict called from \" + instructionName + \" for size \" + neededSize + \" on \" + this);\n+ protected void clearFreeCUDASpaceMap(String instructionName, final long neededSize) throws DMLRuntimeException {\n+ if(neededSize < 0) {\n+ GPUStatistics.cudaForcedClearLazyFreedMatCount.add(1);\n+ while(freeCUDASpaceMap.size() > 0) {\n+ Entry<Long, Set<Pointer>> toFreeListPair = freeCUDASpaceMap.removeAndGetLRUEntry();\n+ freeCUDASpaceMap.remove(toFreeListPair.getKey());\n+ for(Pointer toFree : toFreeListPair.getValue()) {\n+ cudaFreeHelper(instructionName, toFree, true);\n}\n- GPUStatistics.cudaEvictionCount.add(1);\n- if (LOG.isDebugEnabled()) {\n- printMemoryInfo(\"EVICTION_CUDA_FREE_SPACE\");\n}\n-\n- // Release the set of free blocks maintained in a GPUObject.freeCUDASpaceMap\n- // to free up space\n+ }\n+ else {\nLRUCacheMap<Long, Set<Pointer>> lruCacheMap = freeCUDASpaceMap;\nwhile (lruCacheMap.size() > 0) {\nif (neededSize <= getAvailableMemory())\n@@ -506,6 +537,33 @@ public class GPUContext {\nlruCacheMap.remove(size);\ncudaFreeHelper(instructionName, toFree, true);\n}\n+ }\n+ }\n+\n+ /**\n+ * Memory on the GPU is tried to be freed up until either a chunk of needed size is freed up\n+ * or it fails.\n+ * First the set of reusable blocks is freed up. If that isn't enough, the set of allocated matrix\n+ * blocks with zero locks on them is freed up.\n+ * The process cycles through the sorted list of allocated {@link GPUObject} instances. Sorting is based on\n+ * number of (read) locks that have been obtained on it (reverse order). It repeatedly frees up\n+ * blocks on which there are zero locks until the required size has been freed up.\n+ * // TODO: update it with hybrid policy\n+ *\n+ * @param instructionName name of the instruction for which performance measurements are made\n+ * @param neededSize desired size to be freed up on the GPU\n+ * @throws DMLRuntimeException If no reusable memory blocks to free up or if not enough matrix blocks with zero locks on them.\n+ */\n+ protected void evict(String instructionName, final long neededSize) throws DMLRuntimeException {\n+ if (LOG.isTraceEnabled()) {\n+ LOG.trace(\"GPU : evict called from \" + instructionName + \" for size \" + neededSize + \" on \" + this);\n+ }\n+ GPUStatistics.cudaEvictionCount.add(1);\n+ if (LOG.isDebugEnabled()) {\n+ printMemoryInfo(\"EVICTION_CUDA_FREE_SPACE\");\n+ }\n+\n+ clearFreeCUDASpaceMap(instructionName, neededSize);\nif (neededSize <= getAvailableMemory())\nreturn;\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "@@ -57,6 +57,8 @@ public class GPUStatistics {\npublic static LongAdder cudaToDevCount = new LongAdder();\npublic static LongAdder cudaFromDevCount = new LongAdder();\npublic static LongAdder cudaEvictionCount = new LongAdder();\n+ public static LongAdder cudaForcedClearLazyFreedMatCount = new LongAdder();\n+ public static LongAdder cudaForcedClearUnpinnedMatCount = new LongAdder();\n// Per instruction miscellaneous timers.\n// Used to record events in a CP Heavy Hitter instruction and\n@@ -89,6 +91,8 @@ public class GPUStatistics {\ncudaToDevCount.reset();\ncudaFromDevCount.reset();\ncudaEvictionCount.reset();\n+ cudaForcedClearLazyFreedMatCount.reset();\n+ cudaForcedClearUnpinnedMatCount.reset();\nresetMiscTimers();\n}\n@@ -193,14 +197,16 @@ public class GPUStatistics {\n+ String.format(\"%.3f\", cudaMemSet0Time.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaToDevTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaFromDevTime.longValue()*1e-9) + \" sec.\\n\");\n- sb.append(\"GPU mem tx count (alloc/dealloc/set0/toDev/fromDev/evict):\\t\"\n+ sb.append(\"GPU mem tx count (alloc/dealloc/set0/toDev/fromDev/evict/forcedEvict(lazy/unpinned)):\\t\"\n+ cudaAllocCount.longValue() + \"/\"\n+ cudaDeAllocCount.longValue() + \"/\"\n+ cudaMemSet0Count.longValue() + \"/\"\n+ cudaSparseConversionCount.longValue() + \"/\"\n+ cudaToDevCount.longValue() + \"/\"\n+ cudaFromDevCount.longValue() + \"/\"\n- + cudaEvictionCount.longValue() + \".\\n\");\n+ + cudaEvictionCount.longValue() + \"/(\"\n+ + cudaForcedClearLazyFreedMatCount.longValue() + \"/\"\n+ + cudaForcedClearUnpinnedMatCount.longValue() + \").\\n\");\nsb.append(\"GPU conversion time (sparseConv/sp2dense/dense2sp):\\t\"\n+ String.format(\"%.3f\", cudaSparseConversionTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaSparseToDenseTime.longValue()*1e-9) + \"/\"\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Added two-step strategy to deal with potential fragmentation on GPU Closes #669.
49,736
29.01.2018 16:59:40
28,800
91f6fb57293d9adf15714dc55c0bba962aefd107
Display time taken for eviction as well as two-staged fragmentation in the statistics.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java", "diff": "@@ -336,10 +336,15 @@ public class GPUContext {\n// Step 1:\nLOG.debug(\"Eagerly deallocating rmvar-ed matrices to avoid memory allocation error due to potential fragmentation.\");\n+ long forcedEvictStartTime = DMLScript.STATISTICS ? System.nanoTime() : 0;\nclearFreeCUDASpaceMap(instructionName, -1);\n+ if(DMLScript.STATISTICS) {\n+ GPUStatistics.cudaForcedClearLazyFreedEvictTime.add(System.nanoTime()-forcedEvictStartTime);\n+ }\ntry {\ncudaMalloc(A, size);\n} catch(jcuda.CudaException e1) {\n+ forcedEvictStartTime = DMLScript.STATISTICS ? System.nanoTime() : 0;\n// Step 2:\nGPUStatistics.cudaForcedClearUnpinnedMatCount.add(1);\nLOG.warn(\"Eagerly deallocating unpinned matrices to avoid memory allocation error due to potential fragmentation. \"\n@@ -352,6 +357,9 @@ public class GPUContext {\ntoBeRemoved.clearData(true);\n}\n}\n+ if(DMLScript.STATISTICS) {\n+ GPUStatistics.cudaForcedClearUnpinnedEvictTime.add(System.nanoTime()-forcedEvictStartTime);\n+ }\ncudaMalloc(A, size);\n}\n}\n@@ -555,6 +563,7 @@ public class GPUContext {\n* @throws DMLRuntimeException If no reusable memory blocks to free up or if not enough matrix blocks with zero locks on them.\n*/\nprotected void evict(String instructionName, final long neededSize) throws DMLRuntimeException {\n+ long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;\nif (LOG.isTraceEnabled()) {\nLOG.trace(\"GPU : evict called from \" + instructionName + \" for size \" + neededSize + \" on \" + this);\n}\n@@ -565,8 +574,12 @@ public class GPUContext {\nclearFreeCUDASpaceMap(instructionName, neededSize);\n- if (neededSize <= getAvailableMemory())\n+ if (neededSize <= getAvailableMemory()) {\n+ if(DMLScript.STATISTICS) {\n+ GPUStatistics.cudaEvictTime.add(System.nanoTime() - t0);\n+ }\nreturn;\n+ }\nif (allocatedGPUObjects.size() == 0) {\nthrow new DMLRuntimeException(\n@@ -628,6 +641,9 @@ public class GPUContext {\n}\ntoBeRemoved.clearData(true);\n}\n+ if(DMLScript.STATISTICS) {\n+ GPUStatistics.cudaEvictTime.add(System.nanoTime() - t0);\n+ }\n}\n/**\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "new_path": "src/main/java/org/apache/sysml/utils/GPUStatistics.java", "diff": "@@ -51,6 +51,9 @@ public class GPUStatistics {\npublic static LongAdder cudaMemSet0Time = new LongAdder(); // time spent in setting memory to 0 on the GPU (part of reusing and for new allocates)\npublic static LongAdder cudaToDevTime = new LongAdder(); // time spent in copying data from host (CPU) to device (GPU) memory\npublic static LongAdder cudaFromDevTime = new LongAdder(); // time spent in copying data from device to host\n+ public static LongAdder cudaEvictTime = new LongAdder(); // time spent in eviction\n+ public static LongAdder cudaForcedClearLazyFreedEvictTime = new LongAdder(); // time spent in forced lazy eviction\n+ public static LongAdder cudaForcedClearUnpinnedEvictTime = new LongAdder(); // time spent in forced unpinned eviction\npublic static LongAdder cudaAllocCount = new LongAdder();\npublic static LongAdder cudaDeAllocCount = new LongAdder();\npublic static LongAdder cudaMemSet0Count = new LongAdder();\n@@ -86,6 +89,9 @@ public class GPUStatistics {\ncudaMemSet0Count.reset();\ncudaToDevTime.reset();\ncudaFromDevTime.reset();\n+ cudaEvictTime.reset();\n+ cudaForcedClearLazyFreedEvictTime.reset();\n+ cudaForcedClearUnpinnedEvictTime.reset();\ncudaAllocCount.reset();\ncudaDeAllocCount.reset();\ncudaToDevCount.reset();\n@@ -191,12 +197,15 @@ public class GPUStatistics {\nsb.append(\"CUDA/CuLibraries init time:\\t\" + String.format(\"%.3f\", cudaInitTime*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaLibrariesInitTime*1e-9) + \" sec.\\n\");\nsb.append(\"Number of executed GPU inst:\\t\" + getNoOfExecutedGPUInst() + \".\\n\");\n- sb.append(\"GPU mem tx time (alloc/dealloc/set0/toDev/fromDev):\\t\"\n+ sb.append(\"GPU mem tx time (alloc/dealloc/set0/toDev/fromDev/evict/forcedEvict(lazy/unpinned)):\\t\"\n+ String.format(\"%.3f\", cudaAllocTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaDeAllocTime.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaMemSet0Time.longValue()*1e-9) + \"/\"\n+ String.format(\"%.3f\", cudaToDevTime.longValue()*1e-9) + \"/\"\n- + String.format(\"%.3f\", cudaFromDevTime.longValue()*1e-9) + \" sec.\\n\");\n+ + String.format(\"%.3f\", cudaFromDevTime.longValue()*1e-9) + \"/\"\n+ + String.format(\"%.3f\", cudaEvictTime.longValue()*1e-9) + \"/(\"\n+ + String.format(\"%.3f\", cudaForcedClearLazyFreedEvictTime.longValue()*1e-9) + \"/\"\n+ + String.format(\"%.3f\", cudaForcedClearUnpinnedEvictTime.longValue()*1e-9) + \") sec.\\n\");\nsb.append(\"GPU mem tx count (alloc/dealloc/set0/toDev/fromDev/evict/forcedEvict(lazy/unpinned)):\\t\"\n+ cudaAllocCount.longValue() + \"/\"\n+ cudaDeAllocCount.longValue() + \"/\"\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Display time taken for eviction as well as two-staged fragmentation in the statistics.
49,736
30.01.2018 16:45:55
28,800
9c3057a34c84d5bf1c698ad0a5c3c34d90412dbb
Support newer as well as older Keras API
[ { "change_type": "MODIFY", "old_path": "src/main/python/systemml/mllearn/keras2caffe.py", "new_path": "src/main/python/systemml/mllearn/keras2caffe.py", "diff": "@@ -71,7 +71,9 @@ supportedLayers = {\ndef _getInboundLayers(layer):\nin_names = []\n- for node in layer.inbound_nodes: # get inbound nodes to current layer\n+ # get inbound nodes to current layer (support newer as well as older APIs)\n+ inbound_nodes = layer.inbound_nodes if hasattr(layer, 'inbound_nodes') else layer._inbound_nodes\n+ for node in inbound_nodes:\nnode_list = node.inbound_layers # get layers pointing to this node\nin_names = in_names + node_list\nif any('flat' in s.name for s in in_names): # For Caffe2DML to reroute any use of Flatten layers\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-445] Support newer as well as older Keras API
49,738
30.01.2018 17:20:58
28,800
e6ee26a0e87ff75499ff27ba24d3dddd0f9ba999
[HOTFIX] Fix sparse binary inplace operations (empty row handling)
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixBincell.java", "diff": "@@ -1213,6 +1213,7 @@ public class LibMatrixBincell\nreturn;\nSparseBlock b = m2.sparseBlock;\nfor(int r=0; r<rlen; r++) {\n+ if( b.isEmpty(r) ) continue;\nint bpos = b.pos(r);\nint blen = b.size(r);\nint[] bix = b.indexes(r);\n" } ]
Java
Apache License 2.0
apache/systemds
[HOTFIX] Fix sparse binary inplace operations (empty row handling)
49,738
30.01.2018 21:14:01
28,800
4b5b14b8b2765b2f5e2d66da94623f71e71a5d9a
Extended JMLC API (configuration handling) This patch extends the JMLC API by set/reset functionality for dml configuration properties. In detail, we introduce an interface for configurable APIs in order to ensure API consistency across, for example, MLContext and JMLC.
[ { "change_type": "ADD", "old_path": null, "new_path": "src/main/java/org/apache/sysml/api/ConfigurableAPI.java", "diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.api;\n+\n+/**\n+ * This interface defines the programmatic access to dml configuration properties\n+ * (as defined in SystemML-config.xml) to ensure API consistency across all APIs.\n+ */\n+public interface ConfigurableAPI\n+{\n+ /**\n+ * Reset configuration settings to default settings.\n+ */\n+ public void resetConfig();\n+\n+ /**\n+ * Set configuration property, such as\n+ * {@code setConfigProperty(\"sysml.localtmpdir\", \"/tmp/systemml\")}.\n+ *\n+ * @param propertyName\n+ * property name\n+ * @param propertyValue\n+ * property value\n+ */\n+ public void setConfigProperty(String propertyName, String propertyValue);\n+}\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "new_path": "src/main/java/org/apache/sysml/api/jmlc/PreparedScript.java", "diff": "@@ -28,6 +28,7 @@ import java.util.Map.Entry;\nimport org.apache.commons.lang.NotImplementedException;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.api.ConfigurableAPI;\nimport org.apache.sysml.api.DMLException;\nimport org.apache.sysml.conf.CompilerConfig;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -37,6 +38,7 @@ import org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.hops.ipa.FunctionCallGraph;\nimport org.apache.sysml.parser.DMLProgram;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.FunctionProgramBlock;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\n@@ -62,7 +64,7 @@ import org.apache.sysml.utils.Explain;\n/**\n* Representation of a prepared (precompiled) DML/PyDML script.\n*/\n-public class PreparedScript\n+public class PreparedScript implements ConfigurableAPI\n{\nprivate static final Log LOG = LogFactory.getLog(PreparedScript.class.getName());\n@@ -122,6 +124,21 @@ public class PreparedScript\n_cconf = cconf;\n}\n+ @Override\n+ public void resetConfig() {\n+ _dmlconf.set(new DMLConfig());\n+ }\n+\n+ @Override\n+ public void setConfigProperty(String propertyName, String propertyValue) {\n+ try {\n+ _dmlconf.setTextValue(propertyName, propertyValue);\n+ }\n+ catch( DMLRuntimeException e ) {\n+ throw new RuntimeException(e);\n+ }\n+ }\n+\n/**\n* Binds a scalar boolean to a registered input variable.\n*\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java", "diff": "@@ -26,6 +26,7 @@ import org.apache.log4j.Logger;\nimport org.apache.spark.SparkContext;\nimport org.apache.spark.api.java.JavaSparkContext;\nimport org.apache.spark.sql.SparkSession;\n+import org.apache.sysml.api.ConfigurableAPI;\nimport org.apache.sysml.api.DMLScript;\nimport org.apache.sysml.conf.ConfigurationManager;\nimport org.apache.sysml.conf.DMLConfig;\n@@ -48,7 +49,8 @@ import org.apache.sysml.utils.MLContextProxy;\n* languages such as Scala, Java, and Python.\n*\n*/\n-public class MLContext {\n+public class MLContext implements ConfigurableAPI\n+{\n/**\n* Logger for MLContext\n*/\n@@ -277,24 +279,12 @@ public class MLContext {\nMLContextUtil.setCompilerConfig();\n}\n- /**\n- * Reset configuration settings to default settings.\n- */\n+ @Override\npublic void resetConfig() {\nMLContextUtil.setDefaultConfig();\n}\n-\n-\n- /**\n- * Set configuration property, such as\n- * {@code setConfigProperty(\"sysml.localtmpdir\", \"/tmp/systemml\")}.\n- *\n- * @param propertyName\n- * property name\n- * @param propertyValue\n- * property value\n- */\n+ @Override\npublic void setConfigProperty(String propertyName, String propertyValue) {\nDMLConfig config = ConfigurationManager.getDMLConfig();\ntry {\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java", "diff": "@@ -55,7 +55,6 @@ import org.xml.sax.SAXException;\npublic class DMLConfig\n{\n-\npublic static final String DEFAULT_SYSTEMML_CONFIG_FILEPATH = \"./SystemML-config.xml\";\nprivate static final Log LOG = LogFactory.getLog(DMLConfig.class.getName());\n@@ -141,8 +140,7 @@ public class DMLConfig\n_defaultVals.put(FLOATING_POINT_PRECISION, \"double\" );\n}\n- public DMLConfig()\n- {\n+ public DMLConfig() {\n}\n@@ -171,11 +169,21 @@ public class DMLConfig\nLOCAL_MR_MODE_STAGING_DIR = getTextValue(LOCAL_TMP_DIR) + \"/hadoop/mapred/staging\";\n}\n- public DMLConfig( Element root )\n- {\n+ public DMLConfig( Element root ) {\n_xmlRoot = root;\n}\n+ public DMLConfig( DMLConfig dmlconf ) {\n+ set(dmlconf);\n+ }\n+\n+ public void set(DMLConfig dmlconf) {\n+ _fileName = dmlconf._fileName;\n+ _xmlRoot = dmlconf._xmlRoot;\n+ _documentBuilder = dmlconf._documentBuilder;\n+ _document = dmlconf._document;\n+ }\n+\n/**\n* Method to parse configuration\n* @throws ParserConfigurationException\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2100] Extended JMLC API (configuration handling) This patch extends the JMLC API by set/reset functionality for dml configuration properties. In detail, we introduce an interface for configurable APIs in order to ensure API consistency across, for example, MLContext and JMLC.
49,738
30.01.2018 22:56:18
28,800
56205d025bf049cc52acd99c87cbf287ddf2929a
Fix JMLC robustness for missing fs impl classes This patch increases the robustness of JMLC for missing file system classes. Thrown NoClassDefFoundErrors so far passed the exception handling leading to failures even though the persistent reads are later replaced by in-memory inputs.
[ { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java", "diff": "@@ -95,6 +95,7 @@ import org.apache.sysml.runtime.instructions.cp.Data;\nimport org.apache.sysml.runtime.instructions.cp.FunctionCallCPInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.instructions.spark.data.RDDObject;\n+import org.apache.sysml.runtime.io.IOUtilFunctions;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.MetaDataFormat;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -1096,11 +1097,10 @@ public class OptimizerRuleBased extends Optimizer\n//account for remaining hdfs capacity\ntry {\n- FileSystem fs = FileSystem.get(ConfigurationManager.getCachedJobConf());\n+ FileSystem fs = IOUtilFunctions.getFileSystem(ConfigurationManager.getCachedJobConf());\nlong hdfsCapacityRemain = fs.getStatus().getRemaining();\nlong sizeInputs = 0; //sum of all input sizes (w/o replication)\n- for( String var : partitionedMatrices.keySet() )\n- {\n+ for( String var : partitionedMatrices.keySet() ) {\nMatrixObject mo = (MatrixObject)vars.get(var);\nPath fname = new Path(mo.getFileName());\nif( fs.exists( fname ) ) //non-existing (e.g., CP) -> small file\n" }, { "change_type": "MODIFY", "old_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "new_path": "src/main/java/org/apache/sysml/runtime/io/IOUtilFunctions.java", "diff": "@@ -67,8 +67,20 @@ public class IOUtilFunctions\nConfigurationManager.getCachedJobConf());\n}\n+ public static FileSystem getFileSystem(Configuration conf) throws IOException {\n+ try{\n+ return FileSystem.get(conf);\n+ } catch(NoClassDefFoundError err) {\n+ throw new IOException(err.getMessage());\n+ }\n+ }\n+\npublic static FileSystem getFileSystem(Path fname, Configuration conf) throws IOException {\n+ try {\nreturn FileSystem.get(fname.toUri(), conf);\n+ } catch(NoClassDefFoundError err) {\n+ throw new IOException(err.getMessage());\n+ }\n}\npublic static boolean isSameFileScheme(Path path1, Path path2) {\n" } ]
Java
Apache License 2.0
apache/systemds
[SYSTEMML-2101] Fix JMLC robustness for missing fs impl classes This patch increases the robustness of JMLC for missing file system classes. Thrown NoClassDefFoundErrors so far passed the exception handling leading to failures even though the persistent reads are later replaced by in-memory inputs.
49,772
31.01.2018 11:24:13
28,800
9970fd8148e835a65ed45d66e7ebaff57af3b01b
Minor cleanup and formatting of the new Factorization Machines code
[ { "change_type": "MODIFY", "old_path": "scripts/nn/layers/fm.dml", "new_path": "scripts/nn/layers/fm.dml", "diff": "@@ -38,21 +38,22 @@ forward = function(matrix[double] X, matrix[double] w0, matrix[double] W, matrix\n* - V : factorized interaction terms, of shape (d, k).\n*\n* Outputs:\n- * - out : target vector, of shape (n, 1)\n+ * - out : target vector, of shape (n, 1).\n*/\n-\n- out = (X %*% W) + (0.5 * rowSums((X %*% V)^2 - (X^2 %*% V^2)) ) + w0; # target vector, shape (n, 1)\n+ out = (X %*% W) + (0.5 * rowSums((X %*% V)^2 - (X^2 %*% V^2)) ) + w0 # shape (n, 1)\n}\n-backward = function(matrix[double] dout, matrix[double] X, matrix[double] w0, matrix[double] W, matrix[double] V)\n+backward = function(matrix[double] dout, matrix[double] X, matrix[double] w0, matrix[double] W,\n+ matrix[double] V)\nreturn (matrix[double] dw0, matrix[double] dW, matrix[double] dV) {\n-\n/*\n- * This function accepts the upstream gradients w.r.t output target vector, and\n- * returns the gradients of the loss w.r.t the parameters\n+ * This function accepts the upstream gradients w.r.t. output target\n+ * vector, and returns the gradients of the loss w.r.t. the\n+ * parameters.\n*\n* Inputs:\n- * - dout : the gradient of the loss function w.r.t y, of shape (n, 1).\n+ * - dout : the gradient of the loss function w.r.t y, of\n+ * shape (n, 1).\n* - X, w0, W, V are as mentioned in the above forward function.\n*\n* Outputs:\n@@ -61,31 +62,32 @@ backward = function(matrix[double] dout, matrix[double] X, matrix[double] w0, ma\n* - dW : the gradient of loss function w.r.t W, of shape (d, 1).\n* - dV : the gradient of loss function w.r.t V, of shape (d, k).\n*/\n- n = nrow(X);\n- d = ncol(X);\n- k = ncol(V);\n+ n = nrow(X)\n+ d = ncol(X)\n+ k = ncol(V)\n# 1. gradient of target vector w.r.t. w0\n- g_w0 = as.matrix(1); # shape (1, 1)\n+ g_w0 = as.matrix(1) # shape (1, 1)\n## gradient of loss function w.r.t. w0\n- dw0 = colSums(dout) ; # shape (1, 1)\n+ dw0 = colSums(dout) # shape (1, 1)\n# 2. gradient target vector w.r.t. W\n- g_W = X ; # shape (n, d)\n+ g_W = X # shape (n, d)\n## gradient of loss function w.r.t. W\n- dW = t(g_W) %*% dout; # shape (d, 1)\n+ dW = t(g_W) %*% dout # shape (d, 1)\n+ # TODO: VECTORIZE THE FOLLOWING CODE (https://issues.apache.org/jira/browse/SYSTEMML-2102)\n# 3. gradient of target vector w.r.t. V\n- # First term -> g_V1 = t(X) %*% (X %*% V); # shape (d, k)\n+ # First term -> g_V1 = t(X) %*% (X %*% V) # shape (d, k)\n## gradient of loss function w.r.t. V\n# First term -> t(X) %*% X %*% V\n# Second term -> V(i,f) * (X(i))^2\n- Xt = t( X^2 ) %*% dout # of shape (d,1)\n+ Xt = t( X^2 ) %*% dout # shape (d,1)\ng_V2 = Xt[1,] %*% V[1,]\n@@ -105,7 +107,6 @@ backward = function(matrix[double] dout, matrix[double] X, matrix[double] w0, ma\ndV = (t(X) %*% g_V1) - g_V2\n# dV = mean(dout) * (t(X) %*% X %*%V) - g_V2\n-\n}\ninit = function(int n, int d, int k)\n@@ -122,7 +123,6 @@ init = function(int n, int d, int k)\n* - W : the strength of each feature, of shape (d, 1).\n* - V : factorized interaction terms, of shape (d, k).\n*/\n-\nw0 = matrix(0, rows=1, cols=1)\nW = matrix(0, rows=d, cols=1)\nV = rand(rows=d, cols=k, min=0.0, max=1.0, pdf=\"uniform\", sparsity=.08)\n" } ]
Java
Apache License 2.0
apache/systemds
Minor cleanup and formatting of the new Factorization Machines code