code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package net.mkowalski.sparkfim.model
import net.mkowalski.sparkfim.test.BaseTest
class ItemTest extends BaseTest {
test("Items with same prefix") {
val item1 = Item(1, 2, 3, 4)
val item2 = Item(1, 2, 3, 5)
assert(item1.prefix == item2.prefix)
}
test("Empty prefix when single item id") {
val item = Item(1)
val emptyPrefix = Prefix()
assert(item.prefix == emptyPrefix)
}
test("Valid prefix when multiple item ids") {
val item = Item(1, 2, 3, 4, 5)
val expectedPrefix = Prefix(1, 2, 3, 4)
assert(item.prefix == expectedPrefix)
}
test("Single item extension when single item id") {
val item = Item(1)
assert(item.extension == 1)
}
test("Valid item extension when multiple item ids") {
val item = Item(1, 2, 3)
assert(item.extension == 3)
}
test("Empty items equality") {
assert(Item() == Item())
}
test("Non empty items equality") {
assert(Item(1, 2, 3) == Item(1, 2, 3))
}
test("Empty items hashcode equality") {
// by default arrays (different objects) with same elements will get different hashCode
// so this check ensures correctness of the hashCode() method
assert(Item().hashCode() == Item().hashCode())
}
test("Non empty, single id items hashcode equality") {
assert(Item(1, 2).hashCode() == Item(1, 2).hashCode())
}
test("Can mergeOption items with same prefix") {
val item1 = Item(1, 2, 3, 4)
val item2 = Item(1, 2, 3, 5)
assertResult(true) {
item1.merge(item2).isDefined
}
}
test("Can mergeOption items with empty prefix") {
val item1 = Item(1)
val item2 = Item(2)
assertResult(true) {
item1.merge(item2).isDefined
}
}
test("Can't mergeOption items with different prefix") {
val item1 = Item(1, 2)
val item2 = Item(2)
assertResult(false) {
item1.merge(item2).isDefined
}
}
}
|
mjkowalski/spark-fim
|
src/test/scala/net/mkowalski/sparkfim/model/ItemTest.scala
|
Scala
|
mit
| 1,892 |
package com.sksamuel.elastic4s.searches.queries
import com.sksamuel.exts.OptionImplicits._
case class FuzzyQuery(field: String,
termValue: Any,
fuzziness: Option[String] = None,
boost: Option[Double] = None,
transpositions: Option[Boolean] = None,
maxExpansions: Option[Int] = None,
prefixLength: Option[Int] = None,
queryName: Option[String] = None,
rewrite: Option[String] = None)
extends MultiTermQuery {
def fuzziness(fuzziness: String): FuzzyQuery = copy(fuzziness = fuzziness.some)
def boost(boost: Double): FuzzyQuery = copy(boost = boost.some)
def transpositions(transpositions: Boolean): FuzzyQuery = copy(transpositions = transpositions.some)
def maxExpansions(maxExpansions: Int): FuzzyQuery = copy(maxExpansions = maxExpansions.some)
def prefixLength(prefixLength: Int): FuzzyQuery = copy(prefixLength = prefixLength.some)
def queryName(queryName: String): FuzzyQuery = copy(queryName = queryName.some)
def rewrite(rewrite: String): FuzzyQuery = copy(rewrite = rewrite.some)
}
|
Tecsisa/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/FuzzyQuery.scala
|
Scala
|
apache-2.0
| 1,256 |
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
object Macros {
def foo[U: Numeric](x: U): Unit = macro foo_impl[U]
def bar[U: Numeric : Equiv, Y <% String](x: U)(implicit s: String): Unit = macro bar_impl[U, Y]
def foo_impl[U](c: Context)(x: c.Expr[U])(numeric: c.Expr[Numeric[U]]) = {
import c.universe._
val plusOne = Apply(Select(numeric.tree, newTermName("plus")), List(x.tree, Literal(Constant(1))))
val body = Apply(Select(Ident(definitions.PredefModule), newTermName("println")), List(plusOne))
c.Expr[Unit](body)
}
def bar_impl[U, Y](c: Context)(x: c.Expr[U])(numeric: c.Expr[Numeric[U]], equiv: c.Expr[Equiv[U]], viewAsString: c.Expr[Y => String], s: c.Expr[String]) = {
import c.universe._
val plusOne = Apply(Select(numeric.tree, newTermName("plus")), List(x.tree, Literal(Constant(1))))
val plusLen = Apply(Select(numeric.tree, newTermName("plus")), List(plusOne, Select(s.tree, newTermName("length"))))
val body = Apply(Select(Ident(definitions.PredefModule), newTermName("println")), List(plusLen))
c.Expr[Unit](body)
}
}
|
lrytz/scala
|
test/files/pos/t5744/Macros_1.scala
|
Scala
|
apache-2.0
| 1,127 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream, File, IOException}
import java.security.PrivilegedExceptionAction
import java.text.DateFormat
import java.util.{Arrays, Comparator, Date, Locale}
import scala.collection.JavaConverters._
import scala.collection.immutable.Map
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util.control.NonFatal
import com.google.common.primitives.Longs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.BUFFER_SIZE
import org.apache.spark.util.Utils
/**
* Contains util methods to interact with Hadoop from Spark.
*/
private[spark] class SparkHadoopUtil extends Logging {
private val sparkConf = new SparkConf(false).loadFromSystemProperties(true)
val conf: Configuration = newConfiguration(sparkConf)
UserGroupInformation.setConfiguration(conf)
/**
* Runs the given function with a Hadoop UserGroupInformation as a thread local variable
* (distributed to child threads), used for authenticating HDFS and YARN calls.
*
* IMPORTANT NOTE: If this function is going to be called repeated in the same process
* you need to look https://issues.apache.org/jira/browse/HDFS-3545 and possibly
* do a FileSystem.closeAllForUGI in order to avoid leaking Filesystems
*/
def runAsSparkUser(func: () => Unit) {
createSparkUser().doAs(new PrivilegedExceptionAction[Unit] {
def run: Unit = func()
})
}
def createSparkUser(): UserGroupInformation = {
val user = Utils.getCurrentUserName()
logDebug("creating UGI for user: " + user)
val ugi = UserGroupInformation.createRemoteUser(user)
transferCredentials(UserGroupInformation.getCurrentUser(), ugi)
ugi
}
def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation) {
dest.addCredentials(source.getCredentials())
}
/**
* Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
* configuration.
*/
def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a [[SparkConf]] to a Hadoop
* configuration without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
SparkHadoopUtil.appendSparkHadoopConfigs(conf, hadoopConf)
}
/**
* Appends spark.hadoop.* configurations from a Map to another without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(
srcMap: Map[String, String],
destMap: HashMap[String, String]): Unit = {
// Copy any "spark.hadoop.foo=bar" system properties into destMap as "foo=bar"
for ((key, value) <- srcMap if key.startsWith("spark.hadoop.")) {
destMap.put(key.substring("spark.hadoop.".length), value)
}
}
/**
* Return an appropriate (subclass) of Configuration. Creating config can initialize some Hadoop
* subsystems.
*/
def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = SparkHadoopUtil.newConfiguration(conf)
hadoopConf.addResource(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE)
hadoopConf
}
/**
* Add any user credentials to the job conf which are necessary for running on a secure Hadoop
* cluster.
*/
def addCredentials(conf: JobConf): Unit = {
val jobCreds = conf.getCredentials()
jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
}
def addCurrentUserCredentials(creds: Credentials): Unit = {
UserGroupInformation.getCurrentUser.addCredentials(creds)
}
def loginUserFromKeytab(principalName: String, keytabFilename: String): Unit = {
if (!new File(keytabFilename).exists()) {
throw new SparkException(s"Keytab file: ${keytabFilename} does not exist")
} else {
logInfo("Attempting to login to Kerberos " +
s"using principal: ${principalName} and keytab: ${keytabFilename}")
UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename)
}
}
/**
* Add or overwrite current user's credentials with serialized delegation tokens,
* also confirms correct hadoop configuration is set.
*/
private[spark] def addDelegationTokens(tokens: Array[Byte], sparkConf: SparkConf) {
UserGroupInformation.setConfiguration(newConfiguration(sparkConf))
val creds = deserialize(tokens)
logInfo("Updating delegation tokens for current user.")
logDebug(s"Adding/updating delegation tokens ${dumpTokens(creds)}")
addCurrentUserCredentials(creds)
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes read. If
* getFSBytesReadOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes read on r since t.
*/
private[spark] def getFSBytesReadOnThreadCallback(): () => Long = {
val f = () => FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics.getBytesRead).sum
val baseline = (Thread.currentThread().getId, f())
/**
* This function may be called in both spawned child threads and parent task thread (in
* PythonRDD), and Hadoop FileSystem uses thread local variables to track the statistics.
* So we need a map to track the bytes read from the child threads and parent thread,
* summing them together to get the bytes read of this task.
*/
new Function0[Long] {
private val bytesReadMap = new mutable.HashMap[Long, Long]()
override def apply(): Long = {
bytesReadMap.synchronized {
bytesReadMap.put(Thread.currentThread().getId, f())
bytesReadMap.map { case (k, v) =>
v - (if (k == baseline._1) baseline._2 else 0)
}.sum
}
}
}
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes written. If
* getFSBytesWrittenOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes written on r since t.
*
* @return None if the required method can't be found.
*/
private[spark] def getFSBytesWrittenOnThreadCallback(): () => Long = {
val threadStats = FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics)
val f = () => threadStats.map(_.getBytesWritten).sum
val baselineBytesWritten = f()
() => f() - baselineBytesWritten
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafStatuses(fs, fs.getFileStatus(basePath))
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, leaves) = fs.listStatus(status.getPath).partition(_.isDirectory)
leaves ++ directories.flatMap(f => listLeafStatuses(fs, f))
}
if (baseStatus.isDirectory) recurse(baseStatus) else Seq(baseStatus)
}
def listLeafDirStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafDirStatuses(fs, fs.getFileStatus(basePath))
}
def listLeafDirStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, files) = fs.listStatus(status.getPath).partition(_.isDirectory)
val leaves = if (directories.isEmpty) Seq(status) else Seq.empty[FileStatus]
leaves ++ directories.flatMap(dir => listLeafDirStatuses(fs, dir))
}
assert(baseStatus.isDirectory)
recurse(baseStatus)
}
def isGlobPath(pattern: Path): Boolean = {
pattern.toString.exists("{}[]*?\\\\".toSet.contains)
}
def globPath(pattern: Path): Seq[Path] = {
val fs = pattern.getFileSystem(conf)
globPath(fs, pattern)
}
def globPath(fs: FileSystem, pattern: Path): Seq[Path] = {
Option(fs.globStatus(pattern)).map { statuses =>
statuses.map(_.getPath.makeQualified(fs.getUri, fs.getWorkingDirectory)).toSeq
}.getOrElse(Seq.empty[Path])
}
def globPathIfNecessary(pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(pattern) else Seq(pattern)
}
def globPathIfNecessary(fs: FileSystem, pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(fs, pattern) else Seq(pattern)
}
/**
* Lists all the files in a directory with the specified prefix, and does not end with the
* given suffix. The returned {{FileStatus}} instances are sorted by the modification times of
* the respective files.
*/
def listFilesSorted(
remoteFs: FileSystem,
dir: Path,
prefix: String,
exclusionSuffix: String): Array[FileStatus] = {
try {
val fileStatuses = remoteFs.listStatus(dir,
new PathFilter {
override def accept(path: Path): Boolean = {
val name = path.getName
name.startsWith(prefix) && !name.endsWith(exclusionSuffix)
}
})
Arrays.sort(fileStatuses, new Comparator[FileStatus] {
override def compare(o1: FileStatus, o2: FileStatus): Int = {
Longs.compare(o1.getModificationTime, o2.getModificationTime)
}
})
fileStatuses
} catch {
case NonFatal(e) =>
logWarning("Error while attempting to list files from application staging dir", e)
Array.empty
}
}
private[spark] def getSuffixForCredentialsPath(credentialsPath: Path): Int = {
val fileName = credentialsPath.getName
fileName.substring(
fileName.lastIndexOf(SparkHadoopUtil.SPARK_YARN_CREDS_COUNTER_DELIM) + 1).toInt
}
private val HADOOP_CONF_PATTERN = "(\\\\$\\\\{hadoopconf-[^\\\\}\\\\$\\\\s]+\\\\})".r.unanchored
/**
* Substitute variables by looking them up in Hadoop configs. Only variables that match the
* ${hadoopconf- .. } pattern are substituted.
*/
def substituteHadoopVariables(text: String, hadoopConf: Configuration): String = {
text match {
case HADOOP_CONF_PATTERN(matched) =>
logDebug(text + " matched " + HADOOP_CONF_PATTERN)
val key = matched.substring(13, matched.length() - 1) // remove ${hadoopconf- .. }
val eval = Option[String](hadoopConf.get(key))
.map { value =>
logDebug("Substituted " + matched + " with " + value)
text.replace(matched, value)
}
if (eval.isEmpty) {
// The variable was not found in Hadoop configs, so return text as is.
text
} else {
// Continue to substitute more variables.
substituteHadoopVariables(eval.get, hadoopConf)
}
case _ =>
logDebug(text + " didn't match " + HADOOP_CONF_PATTERN)
text
}
}
/**
* Dump the credentials' tokens to string values.
*
* @param credentials credentials
* @return an iterator over the string values. If no credentials are passed in: an empty list
*/
private[spark] def dumpTokens(credentials: Credentials): Iterable[String] = {
if (credentials != null) {
credentials.getAllTokens.asScala.map(tokenToString)
} else {
Seq.empty
}
}
/**
* Convert a token to a string for logging.
* If its an abstract delegation token, attempt to unmarshall it and then
* print more details, including timestamps in human-readable form.
*
* @param token token to convert to a string
* @return a printable string value.
*/
private[spark] def tokenToString(token: Token[_ <: TokenIdentifier]): String = {
val df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT, Locale.US)
val buffer = new StringBuilder(128)
buffer.append(token.toString)
try {
val ti = token.decodeIdentifier
buffer.append("; ").append(ti)
ti match {
case dt: AbstractDelegationTokenIdentifier =>
// include human times and the renewer, which the HDFS tokens toString omits
buffer.append("; Renewer: ").append(dt.getRenewer)
buffer.append("; Issued: ").append(df.format(new Date(dt.getIssueDate)))
buffer.append("; Max Date: ").append(df.format(new Date(dt.getMaxDate)))
case _ =>
}
} catch {
case e: IOException =>
logDebug(s"Failed to decode $token: $e", e)
}
buffer.toString
}
def serialize(creds: Credentials): Array[Byte] = {
val byteStream = new ByteArrayOutputStream
val dataStream = new DataOutputStream(byteStream)
creds.writeTokenStorageToStream(dataStream)
byteStream.toByteArray
}
def deserialize(tokenBytes: Array[Byte]): Credentials = {
val tokensBuf = new ByteArrayInputStream(tokenBytes)
val creds = new Credentials()
creds.readTokenStorageStream(new DataInputStream(tokensBuf))
creds
}
def isProxyUser(ugi: UserGroupInformation): Boolean = {
ugi.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.PROXY
}
}
private[spark] object SparkHadoopUtil {
private lazy val instance = new SparkHadoopUtil
val SPARK_YARN_CREDS_TEMP_EXTENSION = ".tmp"
val SPARK_YARN_CREDS_COUNTER_DELIM = "-"
/**
* Number of records to update input metrics when reading from HadoopRDDs.
*
* Each update is potentially expensive because we need to use reflection to access the
* Hadoop FileSystem API of interest (only available in 2.5), so we should do this sparingly.
*/
private[spark] val UPDATE_INPUT_METRICS_INTERVAL_RECORDS = 1000
/**
* Name of the file containing the gateway's Hadoop configuration, to be overlayed on top of the
* cluster's Hadoop config. It is up to the Spark code launching the application to create
* this file if it's desired. If the file doesn't exist, it will just be ignored.
*/
private[spark] val SPARK_HADOOP_CONF_FILE = "__spark_hadoop_conf__.xml"
def get: SparkHadoopUtil = instance
/**
* Returns a Configuration object with Spark configuration applied on top. Unlike
* the instance method, this will always return a Configuration instance, and not a
* cluster manager-specific type.
*/
private[spark] def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
hadoopConf
}
private def appendS3AndSparkHadoopConfigurations(
conf: SparkConf,
hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
if (conf != null) {
// Explicitly check for S3 environment variables
val keyId = System.getenv("AWS_ACCESS_KEY_ID")
val accessKey = System.getenv("AWS_SECRET_ACCESS_KEY")
if (keyId != null && accessKey != null) {
hadoopConf.set("fs.s3.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3n.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3a.access.key", keyId)
hadoopConf.set("fs.s3.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3n.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3a.secret.key", accessKey)
val sessionToken = System.getenv("AWS_SESSION_TOKEN")
if (sessionToken != null) {
hadoopConf.set("fs.s3a.session.token", sessionToken)
}
}
appendSparkHadoopConfigs(conf, hadoopConf)
val bufferSize = conf.get(BUFFER_SIZE).toString
hadoopConf.set("io.file.buffer.size", bufferSize)
}
}
private def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Copy any "spark.hadoop.foo=bar" spark properties into conf as "foo=bar"
for ((key, value) <- conf.getAll if key.startsWith("spark.hadoop.")) {
hadoopConf.set(key.substring("spark.hadoop.".length), value)
}
}
// scalastyle:off line.size.limit
/**
* Create a path that uses replication instead of erasure coding (ec), regardless of the default
* configuration in hdfs for the given path. This can be helpful as hdfs ec doesn't support
* hflush(), hsync(), or append()
* https://hadoop.apache.org/docs/r3.0.0/hadoop-project-dist/hadoop-hdfs/HDFSErasureCoding.html#Limitations
*/
// scalastyle:on line.size.limit
def createNonECFile(fs: FileSystem, path: Path): FSDataOutputStream = {
try {
// Use reflection as this uses apis only avialable in hadoop 3
val builderMethod = fs.getClass().getMethod("createFile", classOf[Path])
// the builder api does not resolve relative paths, nor does it create parent dirs, while
// the old api does.
if (!fs.mkdirs(path.getParent())) {
throw new IOException(s"Failed to create parents of $path")
}
val qualifiedPath = fs.makeQualified(path)
val builder = builderMethod.invoke(fs, qualifiedPath)
val builderCls = builder.getClass()
// this may throw a NoSuchMethodException if the path is not on hdfs
val replicateMethod = builderCls.getMethod("replicate")
val buildMethod = builderCls.getMethod("build")
val b2 = replicateMethod.invoke(builder)
buildMethod.invoke(b2).asInstanceOf[FSDataOutputStream]
} catch {
case _: NoSuchMethodException =>
// No createFile() method, we're using an older hdfs client, which doesn't give us control
// over EC vs. replication. Older hdfs doesn't have EC anyway, so just create a file with
// old apis.
fs.create(path)
}
}
}
|
WindCanDie/spark
|
core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala
|
Scala
|
apache-2.0
| 19,113 |
package scalariform.formatter
import scalariform.lexer.Token
import scalariform.lexer.Tokens._
import scalariform.parser._
import scalariform.utils.Utils
import scalariform.utils.TextEditProcessor
import scalariform.utils.BooleanLang._
import scalariform.formatter.preferences._
import PartialFunction._
import scala.math.{ max, min }
trait CaseClauseFormatter { self: HasFormattingPreferences with ExprFormatter with HasHiddenTokenInfo with ScalaFormatter β
def format(caseClausesAstNode: CaseClauses)(implicit formatterState: FormatterState): FormatResult = {
val clauseGroups: List[Either[ConsecutiveSingleLineCaseClauses, CaseClause]] =
if (formattingPreferences(AlignSingleLineCaseStatements) && !formattingPreferences(IndentWithTabs))
groupClauses(caseClausesAstNode)
else
caseClausesAstNode.caseClauses.map(Right(_))
var formatResult: FormatResult = NoFormatResult
var isFirstCaseClause = true
// We have to decide whether to indent the hidden tokens before the CASE token (or possibly a preceeding
// NEWLINE token from a prior case block).
def handleCaseIndent(caseClause: CaseClause) {
if (!isFirstCaseClause) {
previousCaseClauseTrailingNewlineOpt(caseClause, caseClausesAstNode) match {
case Some(newline) β
formatResult = formatResult.formatNewline(newline, formatterState.currentIndentLevelInstruction)
case None β
if (hiddenPredecessors(caseClause.firstToken).containsNewline)
formatResult = formatResult.before(caseClause.firstToken, formatterState.currentIndentLevelInstruction)
}
}
}
def formatSingleCaseClause(caseClause: CaseClause) {
handleCaseIndent(caseClause)
formatResult ++= formatCaseClause(caseClause)
isFirstCaseClause = false
}
for (clauseGroup β clauseGroups)
clauseGroup match {
case Left(consecutiveClauses @ ConsecutiveSingleLineCaseClauses(caseClauses, largestCasePatternLength, smallestCasePatternLength)) β
if (consecutiveClauses.patternLengthRange <= formattingPreferences(AlignSingleLineCaseStatements.MaxArrowIndent)) {
for (caseClause @ CaseClause(casePattern, statSeq) β caseClauses) {
handleCaseIndent(caseClause)
val arrowInstruction = PlaceAtColumn(formatterState.indentLevel, largestCasePatternLength + 1)
formatResult ++= formatCaseClause(caseClause, Some(arrowInstruction))
isFirstCaseClause = false
}
} else {
caseClauses foreach formatSingleCaseClause
}
case Right(caseClause) β
formatSingleCaseClause(caseClause)
}
formatResult
}
private def groupClauses(caseClausesAstNode: CaseClauses): List[Either[ConsecutiveSingleLineCaseClauses, CaseClause]] = {
val clausesAreMultiline = containsNewline(caseClausesAstNode) || hiddenPredecessors(caseClausesAstNode.firstToken).containsNewline
def groupClauses(caseClauses: List[CaseClause], first: Boolean): List[Either[ConsecutiveSingleLineCaseClauses, CaseClause]] =
caseClauses match {
case Nil β Nil
case (caseClause @ CaseClause(casePattern, statSeq)) :: otherClauses β
val otherClausesGrouped = groupClauses(otherClauses, first = false)
val formattedCasePattern = {
val casePatternSource = getSource(casePattern)
val casePatternFormatResult = formatCasePattern(casePattern)(FormatterState(indentLevel = 0))
val offset = casePattern.firstToken.offset
val edits = writeTokens(casePatternSource, casePattern.tokens, casePatternFormatResult, offset)
TextEditProcessor.runEdits(casePatternSource, edits)
}
val newlineBeforeClause = hiddenPredecessors(caseClause.firstToken).containsNewline ||
previousCaseClauseEndsWithNewline(caseClause, caseClausesAstNode)
// To evaluate whether a clause body is multiline, we ignore a trailing newline:
val prunedStatSeq = pruneTrailingNewline(statSeq)
val clauseBodyIsMultiline = containsNewline(pruneTrailingNewline(statSeq)) ||
statSeq.firstTokenOption.exists(hiddenPredecessors(_).containsNewline)
if (formattedCasePattern.contains('\\n') || (first && !clausesAreMultiline) || (!first && !newlineBeforeClause) || clauseBodyIsMultiline)
Right(caseClause) :: otherClausesGrouped
else {
val arrowAdjust = (if (formattingPreferences(RewriteArrowSymbols)) 1 else casePattern.arrow.length) + 1
val casePatternLength = formattedCasePattern.length - arrowAdjust
otherClausesGrouped match {
case Left(consecutiveSingleLineCaseClauses) :: otherGroups β
Left(consecutiveSingleLineCaseClauses.prepend(caseClause, casePatternLength)) :: otherGroups
case _ β
Left(ConsecutiveSingleLineCaseClauses(caseClause :: Nil, casePatternLength, casePatternLength)) :: otherClausesGrouped
}
}
}
groupClauses(caseClausesAstNode.caseClauses, first = true)
}
private case class ConsecutiveSingleLineCaseClauses(clauses: List[CaseClause], largestCasePatternLength: Int, smallestCasePatternLength: Int) {
def prepend(clause: CaseClause, length: Int) =
ConsecutiveSingleLineCaseClauses(clause :: clauses, max(length, largestCasePatternLength), min(length, smallestCasePatternLength))
def patternLengthRange = largestCasePatternLength - smallestCasePatternLength
}
private def formatCasePattern(casePattern: CasePattern, arrowInstructionOpt: Option[PlaceAtColumn] = None)(implicit formatterState: FormatterState): FormatResult = {
val CasePattern(caseToken: Token, pattern: Expr, guardOption: Option[Guard], arrow: Token) = casePattern
var formatResult: FormatResult = NoFormatResult
formatResult ++= format(pattern)
for (guard β guardOption)
formatResult ++= format(guard)
arrowInstructionOpt foreach { instruction β formatResult = formatResult.before(arrow, instruction) }
formatResult
}
private def formatCaseClause(caseClause: CaseClause, arrowInstructionOpt: Option[PlaceAtColumn] = None)(implicit formatterState: FormatterState): FormatResult = {
val CaseClause(casePattern: CasePattern, statSeq: StatSeq) = caseClause
var formatResult: FormatResult = NoFormatResult
formatResult ++= formatCasePattern(casePattern, arrowInstructionOpt)
val singleExpr =
cond(statSeq.firstStatOpt) { case Some(Expr(_)) β true } &&
cond(statSeq.otherStats) { case Nil | List((_, None)) β true }
val indentBlock =
statSeq.firstTokenOption.isDefined && newlineBefore(statSeq) ||
containsNewline(statSeq) && !singleExpr
if (indentBlock)
formatResult = formatResult.before(statSeq.firstToken, formatterState.nextIndentLevelInstruction)
val stateForStatSeq = if (singleExpr && !indentBlock) formatterState else formatterState.indent
formatResult ++= format(statSeq)(stateForStatSeq)
formatResult
}
/**
* @return a NEWLINE(S) token at the end of the caseClause, if present, else None
*/
private def getTrailingNewline(caseClause: CaseClause): Option[Token] =
for {
(separator, stat) β caseClause.statSeq.otherStats.lastOption
if stat.isEmpty
if separator.isNewline
} yield separator
private def previousCaseClauseTrailingNewlineOpt(caseClause: CaseClause, caseClauses: CaseClauses): Option[Token] =
Utils.pairWithPrevious(caseClauses.caseClauses).collect {
case (Some(previousClause), `caseClause`) β previousClause
}.headOption.flatMap(getTrailingNewline)
private def previousCaseClauseEndsWithNewline(caseClause: CaseClause, caseClauses: CaseClauses): Boolean =
previousCaseClauseTrailingNewlineOpt(caseClause, caseClauses).isDefined
/**
* Remove a trailing NEWLINE / NEWLINES token from the end of the StatSeq.
*/
private def pruneTrailingNewline(statSeq: StatSeq): StatSeq = statSeq.otherStats.lastOption match {
case Some((separator, None)) if separator.isNewline β statSeq.copy(otherStats = statSeq.otherStats.init)
case _ β statSeq
}
}
|
raboof/scalariform
|
scalariform/src/main/scala/scalariform/formatter/CaseClauseFormatter.scala
|
Scala
|
mit
| 8,280 |
val itermax=100000
val nbreplications=100
val vision = Val[Double]
val minimumSeparation = Val[Double]
val maxAlignTurn = Val[Double]
val maxCohereTurn = Val[Double]
val maxSeparateTurn = Val[Double]
val groupCount = Val[Double]
val relativeDiffusion = Val[Double]
val velocity = Val[Double]
val seed = Val[Long]
val modelTask =
ScalaTask("""
|val behaviour = Behaviour(32.0, 32.0, 128, 0.5, vision, minimumSeparation, maxAlignTurn, maxCohereTurn, maxSeparateTurn)
|val groupCount = behaviour(0)
|val relativeDiffusion = if(behaviour(1) < -1.0) -1.0 else behaviour(1)
|val velocity = behaviour(2)""".stripMargin
) set (
usedClasses += classOf[fr.iscpif.flocking.behaviour.Behaviour],
imports += "fr.iscpif.flocking.behaviour._",
inputs += (vision, minimumSeparation, maxAlignTurn, maxCohereTurn, maxSeparateTurn, seed),
outputs += (groupCount, relativeDiffusion, velocity)
)
val model = Capsule(modelTask)
val stat =
StatisticsTask() set (
statistics += (groupCount, groupCount, median),
statistics += (relativeDiffusion, relativeDiffusion, median),
statistics += (velocity, velocity, median)
)
val replicateModel = Capsule(MoleTask(Replicate(model, seed in UniformDistribution[Long]() take nbreplications, stat)))
val scales =
Seq(
vision -> (0.0, 32.0),
minimumSeparation -> (0.0, 32.0),
maxAlignTurn -> (0.0, math.Pi),
maxCohereTurn -> (0.0, math.Pi),
maxSeparateTurn -> (0.0, math.Pi)
)
val evolution =
BehaviourSearch (
termination = itermax,
inputs = scales,
observables = Seq(groupCount, relativeDiffusion, velocity),
gridSize = Seq(1.0, 0.2, 0.05),
reevaluate = 0.01
)
val nsga2 = SteadyGA(evolution)(replicateModel, 5000)
// Define the execution
val env = DIRACEnvironment("biomed", "https://ccdirac06.in2p3.fr:9178", cpuTime = 4 hours, openMOLEMemory = 1200)
def execution(i: Int) = {
val hookCondition = s"${nsga2.generation.name} % 100 == 0"
val savePopulation = SavePopulationHook(nsga2, s"./${i}/") when hookCondition
val display = DisplayHook("Generation ${" + nsga2.generation.name + "} for " + i)
(nsga2.puzzle + (replicateModel by 10 on env) + (nsga2.output hook savePopulation hook display)) toExecution
}
val executions = (0 until 10).map(execution)
executions.foreach(_.start)
|
ISCPIF/PSEExperiments
|
openmolescripts/flocking/pse/flockingRepli.scala
|
Scala
|
agpl-3.0
| 2,326 |
package com.eevolution.context.dictionary.infrastructure.service.impl
import java.util.UUID
import com.eevolution.context.dictionary.infrastructure.repository.ReferenceListRepository
import com.eevolution.context.dictionary.infrastructure.service.ReferenceListService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 21/11/17.
*/
/**
* Reference List Service Implementation
* @param registry
* @param referenceListRepository
*/
class ReferenceListServiceImpl (registry: PersistentEntityRegistry, referenceListRepository: ReferenceListRepository) extends ReferenceListService {
private val DefaultPageSize = 10
override def getAll() = ServiceCall {_ => referenceListRepository.getAll()}
override def getAllByPage(page : Option[Int], pageSize : Option[Int]) = ServiceCall{_ => referenceListRepository.getAllByPage(page.getOrElse(0) , pageSize.getOrElse(DefaultPageSize))}
override def getById(id: Int) = ServiceCall { _ => referenceListRepository.getById(id)}
override def getByUUID(uuid: UUID) = ServiceCall { _ => referenceListRepository.getByUUID(uuid)}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/impl/ReferenceListServiceImpl.scala
|
Scala
|
gpl-3.0
| 2,082 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.common.json
import com.twitter.zipkin.query.TraceTimeline
import com.twitter.finagle.tracing.SpanId
case class JsonTraceTimeline(traceId: String, rootSpanId: String, annotations: Seq[JsonTimelineAnnotation],
binaryAnnotations: Seq[JsonBinaryAnnotation])
extends WrappedJson
object JsonTraceTimeline {
def wrap(t: TraceTimeline) =
new JsonTraceTimeline(SpanId(t.traceId).toString, SpanId(t.rootSpanId).toString, t.annotations map { JsonTimelineAnnotation.wrap(_) }, t.binaryAnnotations map { JsonBinaryAnnotation.wrap(_) })
}
|
siddhaism/zipkin
|
zipkin-web/src/main/scala/com/twitter/zipkin/common/json/JsonTraceTimeline.scala
|
Scala
|
apache-2.0
| 1,188 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.r
import java.io.{File, OutputStream}
import java.net.Socket
import java.util.{Map => JMap}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.spark._
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.security.SocketAuthServer
private abstract class BaseRRDD[T: ClassTag, U: ClassTag](
parent: RDD[T],
numPartitions: Int,
func: Array[Byte],
deserializer: String,
serializer: String,
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]])
extends RDD[U](parent) with Logging {
override def getPartitions: Array[Partition] = parent.partitions
override def compute(partition: Partition, context: TaskContext): Iterator[U] = {
val runner = new RRunner[T, U](
func, deserializer, serializer, packageNames, broadcastVars, numPartitions)
// The parent may be also an RRDD, so we should launch it first.
val parentIterator = firstParent[T].iterator(partition, context)
runner.compute(parentIterator, partition.index)
}
}
/**
* Form an RDD[(Int, Array[Byte])] from key-value pairs returned from R.
* This is used by SparkR's shuffle operations.
*/
private class PairwiseRRDD[T: ClassTag](
parent: RDD[T],
numPartitions: Int,
hashFunc: Array[Byte],
deserializer: String,
packageNames: Array[Byte],
broadcastVars: Array[Object])
extends BaseRRDD[T, (Int, Array[Byte])](
parent, numPartitions, hashFunc, deserializer,
SerializationFormats.BYTE, packageNames,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
lazy val asJavaPairRDD : JavaPairRDD[Int, Array[Byte]] = JavaPairRDD.fromRDD(this)
}
/**
* An RDD that stores serialized R objects as Array[Byte].
*/
private class RRDD[T: ClassTag](
parent: RDD[T],
func: Array[Byte],
deserializer: String,
serializer: String,
packageNames: Array[Byte],
broadcastVars: Array[Object])
extends BaseRRDD[T, Array[Byte]](
parent, -1, func, deserializer, serializer, packageNames,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
lazy val asJavaRDD : JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this)
}
/**
* An RDD that stores R objects as Array[String].
*/
private class StringRRDD[T: ClassTag](
parent: RDD[T],
func: Array[Byte],
deserializer: String,
packageNames: Array[Byte],
broadcastVars: Array[Object])
extends BaseRRDD[T, String](
parent, -1, func, deserializer, SerializationFormats.STRING, packageNames,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
lazy val asJavaRDD : JavaRDD[String] = JavaRDD.fromRDD(this)
}
private[spark] object RRDD {
def createSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Array[String],
sparkEnvirMap: JMap[Object, Object],
sparkExecutorEnvMap: JMap[Object, Object]): JavaSparkContext = {
val sparkConf = new SparkConf().setAppName(appName)
.setSparkHome(sparkHome)
// Override `master` if we have a user-specified value
if (master != "") {
sparkConf.setMaster(master)
} else {
// If conf has no master set it to "local" to maintain
// backwards compatibility
sparkConf.setIfMissing("spark.master", "local")
}
for ((name, value) <- sparkEnvirMap.asScala) {
sparkConf.set(name.toString, value.toString)
}
for ((name, value) <- sparkExecutorEnvMap.asScala) {
sparkConf.setExecutorEnv(name.toString, value.toString)
}
if (sparkEnvirMap.containsKey("spark.r.sql.derby.temp.dir") &&
System.getProperty("derby.stream.error.file") == null) {
// This must be set before SparkContext is instantiated.
System.setProperty("derby.stream.error.file",
Seq(sparkEnvirMap.get("spark.r.sql.derby.temp.dir").toString, "derby.log")
.mkString(File.separator))
}
val jsc = new JavaSparkContext(SparkContext.getOrCreate(sparkConf))
jars.foreach { jar =>
jsc.addJar(jar)
}
jsc
}
/**
* Create an RRDD given a sequence of byte arrays. Used to create RRDD when `parallelize` is
* called from R.
*/
def createRDDFromArray(jsc: JavaSparkContext, arr: Array[Array[Byte]]): JavaRDD[Array[Byte]] = {
JavaRDD.fromRDD(jsc.sc.parallelize(arr, arr.length))
}
/**
* Create an RRDD given a temporary file name. This is used to create RRDD when parallelize is
* called on large R objects.
*
* @param fileName name of temporary file on driver machine
* @param parallelism number of slices defaults to 4
*/
def createRDDFromFile(jsc: JavaSparkContext, fileName: String, parallelism: Int):
JavaRDD[Array[Byte]] = {
JavaRDD.readRDDFromFile(jsc, fileName, parallelism)
}
private[spark] def serveToStream(
threadName: String)(writeFunc: OutputStream => Unit): Array[Any] = {
SocketAuthServer.serveToStream(threadName, new RAuthHelper(SparkEnv.get.conf))(writeFunc)
}
}
/**
* Helper for making RDD[Array[Byte]] from some R data, by reading the data from R
* over a socket. This is used in preference to writing data to a file when encryption is enabled.
*/
private[spark] class RParallelizeServer(sc: JavaSparkContext, parallelism: Int)
extends SocketAuthServer[JavaRDD[Array[Byte]]](
new RAuthHelper(SparkEnv.get.conf), "sparkr-parallelize-server") {
override def handleConnection(sock: Socket): JavaRDD[Array[Byte]] = {
val in = sock.getInputStream()
JavaRDD.readRDDFromInputStream(sc.sc, in, parallelism)
}
}
|
shaneknapp/spark
|
core/src/main/scala/org/apache/spark/api/r/RRDD.scala
|
Scala
|
apache-2.0
| 6,564 |
import scala.reflect.api.{Universe => ApiUniverse}
import scala.reflect.runtime.{universe => ru}
object Test extends dotty.runtime.LegacyApp {
println(ru.typeOf[List[Int]])
def foo[T: ru.TypeTag] = {
println(ru.typeOf[T])
println(implicitly[ApiUniverse#TypeTag[T]])
}
foo[Map[String, String]]
}
|
yusuke2255/dotty
|
tests/pending/run/newTags.scala
|
Scala
|
bsd-3-clause
| 312 |
package edu.gemini.gsa.client.api
import edu.gemini.model.p1.immutable._
import edu.gemini.spModel.core.Coordinates
import scalaz._
import Scalaz._
// To avoid circular dependencies we bridge the p1 Instrument to GSA in the class below
sealed trait GSAInstrument {
def name: String
}
object GSAInstrument {
// GSAInstrument is a key on a map, make it into a case class to have correct equals and hash code
private case class GSAInstrumentImpl(name: String) extends GSAInstrument
def apply(i: Instrument): Option[GSAInstrument] = i match {
case Instrument.GmosNorth => GSAInstrumentImpl("GMOS-N").some
case Instrument.Gnirs => GSAInstrumentImpl("GNIRS").some
case Instrument.Nifs => GSAInstrumentImpl("NIFS").some
case Instrument.Niri => GSAInstrumentImpl("NIRI").some
case Instrument.Flamingos2 => GSAInstrumentImpl("F2").some
case Instrument.GmosSouth => GSAInstrumentImpl("GMOS-S").some
case Instrument.Gpi => GSAInstrumentImpl("GPI").some
case Instrument.Graces => GSAInstrumentImpl("GRACES").some
case Instrument.Gsaoi => GSAInstrumentImpl("GSAOI").some
case _ => none // Instruments not in GSA
}
}
sealed trait GsaParams
/**
* Sidereal target search params take a fixed ra/dec coordinate.
*/
case class GsaSiderealParams(coords: Coordinates, instrument: GSAInstrument) extends GsaParams
/**
* Non-sidereal target search params just take the target name.
*/
case class GsaNonSiderealParams(targetName: String, instrument: GSAInstrument) extends GsaParams
/**
* GSA doesn't support all instruments
*/
case object GsaUnsupportedParams extends GsaParams
object GsaParams {
/**
* Extracts parameters from the given observation, if possible. If there is
* no target or blueprint, or if the target is ToO, then there are no
* parameters matching that observation.
*/
def get(obs: Observation): Option[GsaParams] =
for {
t <- obs.target
b <- obs.blueprint
q <- get(t, b)
} yield q
/**
* Extracts the parameters for the given target (if not ToO) and blueprint.
*/
def get(target: Target, blueprint: BlueprintBase): Option[GsaParams] =
blueprint match {
case g: GeminiBlueprintBase => getGeminiParams(target, g)
case _ => none
}
private def getGeminiParams(target: Target, blueprint: GeminiBlueprintBase): Option[GsaParams] =
GSAInstrument(blueprint.instrument).fold(GsaUnsupportedParams.some : Option[GsaParams]) { i =>
target match {
case s: SiderealTarget => GsaSiderealParams(s.coords, i).some
case n: NonSiderealTarget => GsaNonSiderealParams(n.name, i).some
case _ => none
}
}
}
|
spakzad/ocs
|
bundle/edu.gemini.gsa.client/src/main/scala/edu/gemini/gsa/client/api/GsaParams.scala
|
Scala
|
bsd-3-clause
| 2,786 |
package edu.berkeley.cs.amplab.carat.tools
import spark._
import spark.SparkContext._
import edu.berkeley.cs.amplab.carat.dynamodb.DynamoAnalysisUtil
import spark.timeseries.TimeSeriesSpark
import edu.berkeley.cs.amplab.carat.CaratRate
object APrioriRateAnalysis extends App {
val tmpdir = "/mnt/TimeSeriesSpark-unstable/spark-temp-plots/"
val start = DynamoAnalysisUtil.start()
var master = "local[1]"
if (args != null && args.length >= 1) {
master = args(0)
}
// turn off INFO logging for spark:
System.setProperty("hadoop.root.logger", "WARN,console")
// This is misspelled in the spark jar log4j.properties:
System.setProperty("log4j.threshhold", "WARN")
// Include correct spelling to make sure
System.setProperty("log4j.threshold", "WARN")
// Fix Spark running out of space on AWS.
System.setProperty("spark.local.dir", tmpdir)
//System.setProperty("spark.kryo.registrator", classOf[CaratRateRegistrator].getName)
val sc = TimeSeriesSpark.init(master, "default", "CaratDynamoDataToPlots")
val rfile = "apriori-rates.rdd"
val f = new java.io.File(rfile)
val rdd = {
if (!f.exists()) {
val allRates = DynamoAnalysisUtil.getRatesUnabriged(sc, tmpdir)
saveApriori(sc, allRates)
} else {
loadApriori(sc)
}
}
val coll = rdd.collect().sortWith((x, y) => {
if (x.uuid < y.uuid)
true
else if (y.uuid == x.uuid)
x.time2 < y.time2
else
false
})
println("A Priori has %d rates. They are (chronological):".format(coll.size))
for (k <- coll)
println(k)
def saveApriori(sc: SparkContext, rdd: RDD[CaratRate]) = {
rdd.saveAsObjectFile("all-rates.rdd")
val pointRates = rdd.filter(!_.isRateRange())
pointRates.saveAsObjectFile(rfile)
pointRates
}
def loadApriori(sc: SparkContext) = {
sc.objectFile[CaratRate](rfile)
}
}
|
lagerspetz/TimeSeriesSpark
|
src/edu/berkeley/cs/amplab/carat/tools/APrioriRateAnalysis.scala
|
Scala
|
bsd-3-clause
| 1,865 |
package chrome.system.cpu.bindings
import scala.scalajs.js
@js.native
trait CPUInfo extends js.Object {
def numOfProcessors: Int = js.native
def archName: String = js.native
def modelName: String = js.native
def features: js.Array[Feature] = js.native
def processors: js.Array[Processor] = js.native
}
|
lucidd/scala-js-chrome
|
bindings/src/main/scala/chrome/system/cpu/bindings/CPUInfo.scala
|
Scala
|
mit
| 320 |
package org.jetbrains.plugins.scala.testingSupport.specs2
/**
* @author Roman.Shein
* @since 11.02.2015.
*/
abstract class Specs2WholeSuiteTest extends Specs2TestCase {
def testSpecification() {
addFileToProject("SpecificationTest.scala",
"""
|import org.specs2.mutable.Specification
|
|class SpecificationTest extends Specification {
| "The 'SpecificationTest'" should {
| "run single test" in {
| print(">>TEST: OK<<")
| 1 mustEqual 1
| }
|
| "ignore other test" in {
| print(">>TEST: FAILED<<")
| 1 mustEqual 1
| }
| }
|}
""".stripMargin
)
runTestByLocation(3, 14, "SpecificationTest.scala",
checkConfigAndSettings(_, "SpecificationTest"),
root => checkResultTreeHasExactNamedPath(root, "[root]", "SpecificationTest", "The 'SpecificationTest' should", "run single test") &&
checkResultTreeHasExactNamedPath(root, "[root]", "SpecificationTest", "The 'SpecificationTest' should", "ignore other test"),
debug = true
)
}
}
|
LPTK/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/specs2/Specs2WholeSuiteTest.scala
|
Scala
|
apache-2.0
| 1,141 |
package im.actor.server.persist.sequence
import com.google.protobuf.wrappers.StringValue
import im.actor.server.db.ActorPostgresDriver.api._
import im.actor.server.model.{ UpdateMapping, SeqUpdate }
private[sequence] final class UserSequenceTable(tag: Tag) extends Table[SeqUpdate](tag, "user_sequence") {
def userId = column[Int]("user_id", O.PrimaryKey)
def seq = column[Int]("seq", O.PrimaryKey)
def timestamp = column[Long]("timestamp")
def reduceKey = column[Option[StringValue]]("reduce_key")
def mapping = column[Array[Byte]]("mapping")
def * = (userId, seq, timestamp, reduceKey, mapping) <> (applySeqUpdate.tupled, unapplySeqUpdate)
private def applySeqUpdate: (Int, Int, Long, Option[StringValue], Array[Byte]) β SeqUpdate = {
(userId, seq, timestamp, reduceKey, mapping) β
SeqUpdate(userId, seq, timestamp, reduceKey, Some(UpdateMapping.parseFrom(mapping)))
}
private def unapplySeqUpdate: SeqUpdate β Option[(Int, Int, Long, Option[StringValue], Array[Byte])] = {
seqUpdate β
Some(
(
seqUpdate.userId,
seqUpdate.seq,
seqUpdate.timestamp,
seqUpdate.reduceKey,
seqUpdate.mapping.map(_.toByteArray).getOrElse(Array.empty)
)
)
}
}
object UserSequenceRepo {
private val sequence = TableQuery[UserSequenceTable]
private val sequenceC = Compiled(sequence)
private def byUser(userId: Rep[Int]) = sequence.filter(_.userId === userId)
private def byUserAfterSeq(userId: Rep[Int], seq: Rep[Int], limit: ConstColumn[Long]) =
byUser(userId)
.filter(_.seq > seq)
.sortBy(_.seq.asc)
.take(limit)
private val userSequence = Compiled(byUserAfterSeq _)
private val userSequenceSeq = Compiled {
byUser _ andThen (_.sortBy(_.seq.desc).map(_.seq).take(1))
}
def create(updates: Seq[SeqUpdate]) = (sequenceC ++= updates).transactionally
def create(update: SeqUpdate) = sequenceC += update
def fetchSeq(userId: Int) = userSequenceSeq(userId).result.headOption
def fetchAfterSeq(userId: Int, seq: Int, limit: Long) =
userSequence((userId, seq, limit)).result
}
|
ljshj/actor-platform
|
actor-server/actor-persist/src/main/scala/im/actor/server/persist/sequence/UserSequenceRepo.scala
|
Scala
|
mit
| 2,136 |
/* Copyright (c) 2018 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lingua
package fst2
package filter
import semiring._
import scala.language.higherKinds
object EpsilonSequencingFilter extends EpsilonSequencingFilter[NTransition]
class EpsilonSequencingFilter[T[_, _]](implicit trans: Transition[T, Option, Option]) extends Filter[T] {
import Transition._
val states = Set(0, 1, -1)
val initial = 0
val blocking = -1
def step[In, Out1, Out2](t1: T[In, Option[Out1]], t2: T[Option[Out1], Out2], state: State): (T[In, Option[Out1]], T[Option[Out1], Out2], State) =
(t1.out, t2.in) match {
case (Some(Some(x1)), Some(Some(x2))) if x1 == x2 =>
(t1, t2, 0)
case (Some(None), None) if state == 0 =>
(t1, t2, 0)
case (None, Some(None)) =>
(t1, t2, 1)
case _ =>
(t1, t2, -1)
}
}
class WEpsilonSequencingFilter[T[_, _], Weight](implicit trans: Transition[T, Option, Option], sem: Semiring[Weight]) extends EpsilonSequencingFilter[T] with WFilter[T, Weight] {
def finalWeight(s: State): Weight = sem.one
}
|
satabin/lingua
|
fst/src/main/scala/lingua/fst2/filter/EpsilonSequencingFilter.scala
|
Scala
|
apache-2.0
| 1,622 |
package ru.finagram.api
/**
* This trait represents an incoming update.
*/
trait Update {
val updateId: Long
}
/**
* This object represents an incoming update with [[Message]].
*
* @param updateId The updateβs unique identifier.
* Update identifiers start from a certain positive number and increase sequentially.
* @param message New incoming message of any kind β text, photo, sticker, etc.
*/
case class MessageUpdate(updateId: Long, message: Message) extends Update
/**
* This object represents an incoming update with [[CallbackQuery]].
*
* @param updateId The updateβs unique identifier.
* @param callbackQuery New incoming callback query.
*/
case class CallbackQueryUpdate(updateId: Long, callbackQuery: CallbackQuery) extends Update
|
finagram/finagram
|
src/main/scala/ru/finagram/api/Update.scala
|
Scala
|
mit
| 788 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/**
* Trait that contains the <code>note</code> method, which can be used to send a status notification to the reporter.
*
* <p>
* The difference between <code>note</code> and the <code>info</code> method of <a href="Informer.html"><code>Informer</code></a> is that
* <code>info</code> messages provided during a test are recorded and sent as part of test completion event, whereas
* <code>note</code> messages are sent right away as <a href="events/NoteProvided.html"><code>NoteProvided</code></a> messages. For long-running tests,
* <code>note</code> allows you to send "status notifications" to the reporter right away, so users can track the
* progress of the long-running tests. By contrast, <code>info</code> messages will only be seen by the user after the
* test has completed, and are more geared towards specification (such as <a href="GivenWhenThen.html">Given/When/Then</a> messages) than notification.
* </p>
*
* <p>
* The difference between <code>note</code> and the <code>alert</code> method of <a href="Alerting.html"><code>Alerting</code></a> is
* that <code>alert</code> is intended to be used
* for warnings or notifications of potential problems, whereas <code>note</code> is just for status notifications.
* In string reporters for which ANSI color is enabled, <code>note</code> notifications are shown in green and <code>alert</code> notifications
* in yellow.
* </p>
*/
trait Notifying {
/**
* Returns an <code>Notifier</code> that can send a status notification via an <code>NoteProvided</code> event to the reporter.
*/
protected def note: Notifier
}
|
travisbrown/scalatest
|
src/main/scala/org/scalatest/Notifying.scala
|
Scala
|
apache-2.0
| 2,230 |
package com.freshsoft.matterbridge.client.rss
import java.time.OffsetDateTime
import java.time.format.DateTimeFormatter
import akka.actor.{Actor, ActorRef, Props}
import akka.event.{Logging, LoggingAdapter}
import com.freshsoft.matterbridge.server.{MatterBridgeContext, RssConfigActorService}
import com.freshsoft.matterbridge.util.MatterBridgeHttpClient
import model.MatterBridgeEntities.{RssReaderIncomingModel, RssReaderModel}
import model.RssEntity
import net.ruippeixotog.scalascraper.browser.JsoupBrowser
import net.ruippeixotog.scalascraper.dsl.DSL.Extract._
import net.ruippeixotog.scalascraper.dsl.DSL._
import org.joda.time.DateTime
import scala.language.postfixOps
import scala.xml.{NodeSeq, XML}
/**
* The rss reader worker actor fetch the rss feed and build a Message for the rssReaderSenderActor
*/
class RssReaderWorkerActor extends Actor with MatterBridgeContext with RssConfigActorService {
val log: LoggingAdapter = Logging.getLogger(system, this)
val readerActor: ActorRef = context.actorOf(Props(classOf[RssReaderSenderActor]))
override def receive: Receive = {
case x: RssEntity =>
retrieveRssData(x) foreach {
case Some(model) => readerActor ! model
case None =>
log.info("Got no rss items to send. No rss content was sent")
}
}
/**
* Retrieves the raw rss feed data and build the raw rss model to send
*
* @param rssConfig the rss configuration
* @return A rss reader incoming model or none when parsing was not successful
*/
private def retrieveRssData(rssConfig: RssEntity) = {
val rawRssData = MatterBridgeHttpClient.getUrlContent(rssConfig.rssUrl)
rawRssData map { rssContent =>
if (rssContent.nonEmpty) buildRssModel(rssConfig, rssContent) else None
}
}
/**
* Check if the given pudDate String of an article is newer then the old search time
*
* @param actualPubDate The actual rss feed item article time
* @param lastScanDate The last actor run time saved in a model
* @param isRssFeed Indicator if it is an rss feed or an atom feed
* @return true when the actual rss item (pubDate) is new
*/
private def isArticleNew(actualPubDate: String, lastScanDate: String, isRssFeed: Boolean) = {
val rssFeedTime = (x: String) => OffsetDateTime.parse(x, DateTimeFormatter.RFC_1123_DATE_TIME)
val atomFeedTime = (x: String) => OffsetDateTime.parse(x, DateTimeFormatter.ISO_DATE_TIME)
if (isRssFeed) {
rssFeedTime(actualPubDate).isAfter(atomFeedTime(lastScanDate))
} else {
atomFeedTime(actualPubDate).isAfter(atomFeedTime(lastScanDate))
}
}
/**
* Looks in the description tag for a image link
*
* @param description The content of a rss item description tag
* @return A link of an image otherwise an empty string (compatibility)
*/
private def extractImageFromContent(description: String) = {
try {
val doc = JsoupBrowser().parseString(description)
(doc >> element("img")).attr("src")
} catch {
case _: Throwable => ""
}
}
/**
* Build a optional RssReaderIncomingModel which belongs to a rss feed config entry
*
* @param rssConfig The rss config entry to retrieve the necessary information
* @param content The raw rss feed content as string
* @return A optional RssReaderIncomingModel
*/
private def buildRssModel(rssConfig: RssEntity,
content: String): Option[RssReaderIncomingModel] = {
try {
val xml = XML.loadString(content)
val items = xml \\\\ "item"
val entries = xml \\\\ "entry"
val isRssFeed = items.nonEmpty
val allRssModels =
if (isRssFeed) getRssFeedModels(rssConfig, items)
else getAtomFeedModels(rssConfig, entries)
val lastTime =
rssConfig.updatedAt.getOrElse(DateTime.now().minusDays(3)) toDateTimeISO () toString
val rssModels =
allRssModels.filter(m => isArticleNew(m.pubDate, lastTime, isRssFeed))
if (rssModels.nonEmpty) {
rssConfigService.update(rssConfig.id)
}
Some(RssReaderIncomingModel(rssConfig, rssModels))
} catch {
case e: Throwable =>
log.error(s"Could not parse rss content $content", e)
None
}
}
private def getRssFeedModels(rssConfig: RssEntity, rssNodeSeq: NodeSeq) = {
(for {
i <- rssNodeSeq
title = (i \\ "title").text
link = (i \\ "link").text
pubDate = (i \\ "pubDate").text
description = (i \\ "description").text
imageLink = extractImageFromContent(description)
} yield RssReaderModel(title, link, pubDate, description, imageLink, rssConfig.name)).toList
}
private def getAtomFeedModels(rssConfig: RssEntity, atomNodeSeq: NodeSeq) = {
(for {
i <- atomNodeSeq
title = (i \\ "title").text
link = (i \\ "link").text
pubDate = (i \\ "updated").text
description = (i \\ "summary").text
imageLink = extractImageFromContent(description)
} yield RssReaderModel(title, link, pubDate, description, imageLink, rssConfig.name)).toList
}
}
|
Freshwood/matterbridge
|
src/main/scala/com/freshsoft/matterbridge/client/rss/RssReaderWorkerActor.scala
|
Scala
|
mit
| 5,059 |
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.util
import annotation.tailrec
import collection.immutable.HashMap
private[akka] object WildcardTree {
private val empty = new WildcardTree[Nothing]()
def apply[T](): WildcardTree[T] = empty.asInstanceOf[WildcardTree[T]]
}
private[akka] final case class WildcardTree[T](data: Option[T] = None, children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) {
def insert(elems: Iterator[String], d: T): WildcardTree[T] =
if (!elems.hasNext) {
copy(data = Some(d))
} else {
val e = elems.next()
copy(children = children.updated(e, children.get(e).getOrElse(WildcardTree()).insert(elems, d)))
}
@tailrec final def find(elems: Iterator[String]): WildcardTree[T] =
if (!elems.hasNext) this
else {
(children.get(elems.next()) orElse children.get("*")) match {
case Some(branch) β branch.find(elems)
case None β WildcardTree()
}
}
}
|
jmnarloch/akka.js
|
akka-js-actor/shared/src/main/scala/akka/util/WildcardTree.scala
|
Scala
|
bsd-3-clause
| 1,023 |
/*
* Copyright 2015-2020 Snowflake Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.snowflake.spark.snowflake
import java.net.Proxy.Type
import java.net.{InetSocketAddress, Proxy}
import java.security.InvalidKeyException
import java.util.Properties
import net.snowflake.client.core.SFSessionProperty
import net.snowflake.client.jdbc.SnowflakeSQLException
import net.snowflake.client.jdbc.internal.amazonaws.ClientConfiguration
import net.snowflake.client.jdbc.internal.fasterxml.jackson.databind.ObjectMapper
import net.snowflake.client.jdbc.internal.fasterxml.jackson.databind.node.ObjectNode
import net.snowflake.client.jdbc.internal.microsoft.azure.storage.OperationContext
import org.apache.spark.sql.SparkSession
import org.scalatest.{FunSuite, Matchers}
/**
* Unit tests for all kinds of some classes
*/
class MiscSuite01 extends FunSuite with Matchers {
private val mapper = new ObjectMapper()
test("test ProxyInfo with all fields") {
val sfOptions = Map(
Parameters.PARAM_USE_PROXY -> "true",
Parameters.PARAM_PROXY_HOST -> "proxyHost",
Parameters.PARAM_PROXY_PORT -> "1234",
Parameters.PARAM_PROXY_USER -> "proxyUser",
Parameters.PARAM_PROXY_PASSWORD -> "proxyPassword",
Parameters.PARAM_NON_PROXY_HOSTS -> "nonProxyHosts",
)
val param = Parameters.MergedParameters(sfOptions)
val proxyInfo = param.proxyInfo.get
// Set proxy for JDBC
val jdbcProperties = new Properties()
param.setJDBCProxyIfNecessary(jdbcProperties)
assert(jdbcProperties.getProperty(
SFSessionProperty.USE_PROXY.getPropertyKey).equals("true"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_HOST.getPropertyKey).equals("proxyHost"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_PORT.getPropertyKey).equals("1234"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_USER.getPropertyKey).equals("proxyUser"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_PASSWORD.getPropertyKey).equals("proxyPassword"))
assert(jdbcProperties.getProperty(
SFSessionProperty.NON_PROXY_HOSTS.getPropertyKey).equals("nonProxyHosts"))
// Set proxy for AWS
val clientConfig = new ClientConfiguration()
proxyInfo.setProxyForS3(clientConfig)
assert(clientConfig.getProxyHost.equals("proxyHost"))
assert(clientConfig.getProxyPort.equals(1234))
assert(clientConfig.getProxyUsername.equals("proxyUser"))
assert(clientConfig.getProxyPassword.equals("proxyPassword"))
assert(clientConfig.getNonProxyHosts.equals("nonProxyHosts"))
// Set proxy for Azure
proxyInfo.setProxyForAzure()
assert(OperationContext.getDefaultProxy.equals(new Proxy(
Type.HTTP,
new InetSocketAddress("proxyHost", 1234)
)))
}
test("test ProxyInfo with hostname and port only") {
val sfOptions = Map(
Parameters.PARAM_USE_PROXY -> "true",
Parameters.PARAM_PROXY_HOST -> "proxyHost",
Parameters.PARAM_PROXY_PORT -> "1234"
)
val param = Parameters.MergedParameters(sfOptions)
val proxyInfo = param.proxyInfo.get
// Set proxy for JDBC
val jdbcProperties = new Properties()
param.setJDBCProxyIfNecessary(jdbcProperties)
assert(jdbcProperties.getProperty(
SFSessionProperty.USE_PROXY.getPropertyKey).equals("true"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_HOST.getPropertyKey).equals("proxyHost"))
assert(jdbcProperties.getProperty(
SFSessionProperty.PROXY_PORT.getPropertyKey).equals("1234"))
// Set proxy for AWS
val clientConfig = new ClientConfiguration()
proxyInfo.setProxyForS3(clientConfig)
assert(clientConfig.getProxyHost.equals("proxyHost"))
assert(clientConfig.getProxyPort.equals(1234))
// Set proxy for Azure
proxyInfo.setProxyForAzure()
assert(OperationContext.getDefaultProxy.equals(new Proxy(
Type.HTTP,
new InetSocketAddress("proxyHost", 1234)
)))
}
test("test ProxyInfo with negative value") {
// Wrong case 1. Don't set proxyport
var sfOptions = Map(
Parameters.PARAM_USE_PROXY -> "true",
Parameters.PARAM_PROXY_HOST -> "proxyHost"
)
var param = Parameters.MergedParameters(sfOptions)
assertThrows[IllegalArgumentException]({
param.proxyInfo.get.setProxyForAzure()
})
// Wrong case 2. port is not number
sfOptions = Map(
Parameters.PARAM_USE_PROXY -> "true",
Parameters.PARAM_PROXY_HOST -> "proxyHost",
Parameters.PARAM_PROXY_PORT -> "notNumber"
)
param = Parameters.MergedParameters(sfOptions)
assertThrows[IllegalArgumentException]({
param.proxyInfo.get.setProxyForAzure()
})
// Wrong case 3. password set, user name is not set
sfOptions = Map(
Parameters.PARAM_USE_PROXY -> "true",
Parameters.PARAM_PROXY_HOST -> "proxyHost",
Parameters.PARAM_PROXY_PORT -> "1234",
Parameters.PARAM_PROXY_PASSWORD -> "proxyPassword"
)
param = Parameters.MergedParameters(sfOptions)
assertThrows[IllegalArgumentException]({
param.proxyInfo.get.setProxyForAzure()
})
}
test("test Parameters.removeQuoteForStageTableName()") {
// Explicitly set it as true
var sfOptions = Map(Parameters.PARAM_INTERNAL_STAGING_TABLE_NAME_REMOVE_QUOTES_ONLY -> "true")
var param = Parameters.MergedParameters(sfOptions)
assert(param.stagingTableNameRemoveQuotesOnly)
// Explicitly set it as false
sfOptions = Map(Parameters.PARAM_INTERNAL_STAGING_TABLE_NAME_REMOVE_QUOTES_ONLY -> "false")
param = Parameters.MergedParameters(sfOptions)
assert(!param.stagingTableNameRemoveQuotesOnly)
// It is false by default.
sfOptions = Map(Parameters.PARAM_USE_PROXY -> "false")
param = Parameters.MergedParameters(sfOptions)
assert(!param.stagingTableNameRemoveQuotesOnly)
}
test("test SnowflakeConnectorUtils.handleS3Exception") {
// positive test
val ex1 = new Exception("test S3Exception",
new InvalidKeyException("test InvalidKeyException"))
assertThrows[SnowflakeConnectorException]({
SnowflakeConnectorUtils.handleS3Exception(ex1)
})
// negative test
val ex2 = new IllegalArgumentException("test IllegalArgumentException")
assertThrows[IllegalArgumentException]({
SnowflakeConnectorUtils.handleS3Exception(ex2)
})
}
test("test SnowflakeFailMessage") {
println(SnowflakeFailMessage.FAIL_PUSHDOWN_AGGREGATE_EXPRESSION)
println(SnowflakeFailMessage.FAIL_PUSHDOWN_GENERATE_QUERY)
println(SnowflakeFailMessage.FAIL_PUSHDOWN_SET_TO_EXPR)
println(SnowflakeFailMessage.FAIL_PUSHDOWN_STATEMENT)
}
test("unit test for SnowflakeTelemetry.getClientConfig") {
// Configure some spark options for the spark session
SparkSession.builder
.master("local")
.appName("test config info sent")
.config("spark.driver.memory", "2G")
.config("spark.executor.memory", "888M")
.config("spark.driver.extraJavaOptions", s"-Duser.timezone=GMT")
.config("spark.executor.extraJavaOptions", s"-Duser.timezone=UTC")
.config("spark.sql.session.timeZone", "America/Los_Angeles")
.getOrCreate()
val metric = SnowflakeTelemetry.getClientConfig()
// Check one version
assert(metric.get(TelemetryClientInfoFields.SPARK_CONNECTOR_VERSION).asText().equals(Utils.VERSION))
// check one JVM option
assert(metric.get(TelemetryClientInfoFields.MAX_MEMORY_IN_MB).asLong() > 0)
// check Spark options
val sparkConfNode = metric.get(TelemetryClientInfoFields.SPARK_CONFIG)
assert(sparkConfNode.get("spark.master").asText().equals("local"))
assert(sparkConfNode.get("spark.app.name").asText().equals("test config info sent"))
assert(sparkConfNode.get("spark.driver.memory").asText().equals("2G"))
assert(sparkConfNode.get("spark.executor.memory").asText().equals("888M"))
assert(sparkConfNode.get("spark.driver.extraJavaOptions").asText().equals("-Duser.timezone=GMT"))
assert(sparkConfNode.get("spark.executor.extraJavaOptions").asText().equals("-Duser.timezone=UTC"))
assert(sparkConfNode.get("spark.sql.session.timeZone").asText().equals("America/Los_Angeles"))
}
test("unit test for SnowflakeTelemetry.addThrowable()") {
val errorMessage = "SnowflakeTelemetry.addThrowable() test exception message"
val queryId = "019840d2-04c3-90c5-0000-0ca911a381c6"
val sqlState = "22018"
val errorCode = 100038
// Test a SnowflakeSQLException Exception
var metric: ObjectNode = mapper.createObjectNode()
SnowflakeTelemetry.addThrowable(metric, new SnowflakeSQLException(queryId, errorMessage, sqlState, errorCode))
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_CLASS_NAME).asText()
.equals("class net.snowflake.client.jdbc.SnowflakeSQLException"))
var expectedMessage = s"SnowflakeSQLException: ErrorCode=$errorCode SQLState=$sqlState QueryId=$queryId"
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_MESSAGE).asText().equals(expectedMessage))
assert(metric.get(TelemetryQueryStatusFields.STACKTRACE).asText().contains(expectedMessage))
assert(!metric.get(TelemetryQueryStatusFields.STACKTRACE).asText().contains(errorMessage))
// Test an OutOfMemoryError
metric = mapper.createObjectNode()
SnowflakeTelemetry.addThrowable(metric, new OutOfMemoryError(errorMessage))
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_CLASS_NAME).asText()
.equals("class java.lang.OutOfMemoryError"))
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_MESSAGE).asText().equals(errorMessage))
assert(metric.get(TelemetryQueryStatusFields.STACKTRACE).asText().contains(errorMessage))
// Test an Exception
metric = mapper.createObjectNode()
SnowflakeTelemetry.addThrowable(metric, new Exception(errorMessage))
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_CLASS_NAME).asText()
.equals("class java.lang.Exception"))
assert(metric.get(TelemetryQueryStatusFields.EXCEPTION_MESSAGE).asText().equals(errorMessage))
assert(metric.get(TelemetryQueryStatusFields.STACKTRACE).asText().contains(errorMessage))
}
test("test Parameters invalid option values") {
var sfOptions = Map(
Parameters.PARAM_EXPECTED_PARTITION_SIZE_IN_MB -> "wrong_number",
Parameters.PARAM_UPLOAD_CHUNK_SIZE_IN_MB -> "1"
)
var param = Parameters.MergedParameters(sfOptions)
assertThrows[IllegalArgumentException]({
param.expectedPartitionSize
})
assertThrows[IllegalArgumentException]({
param.uploadChunkSize
})
}
}
|
snowflakedb/spark-snowflakedb
|
src/test/scala/net/snowflake/spark/snowflake/MiscSuite01.scala
|
Scala
|
apache-2.0
| 11,147 |
package com.arcusys.learn.slide
import SlideEntityConverters._
import com.arcusys.learn.persistence.liferay.service.LFSlideLocalServiceUtil
import com.arcusys.valamis.slide.model.SlideModel
import com.arcusys.valamis.slide.storage.SlideRepositoryContract
import scala.collection.JavaConversions._
import com.arcusys.learn.storage.impl.liferay.LiferayCommon._
class SlideRepository extends SlideRepositoryContract {
override def getCount = LFSlideLocalServiceUtil.getLFSlidesCount
override def getAll = LFSlideLocalServiceUtil.getLFSlides(0, getCount).toList
override def getBySlideSetId(slideSetId: Long) = getAll.filter(_.slideSetId == slideSetId)
override def delete(id: Long) = LFSlideLocalServiceUtil.deleteLFSlide(id)
override def create(slideModel: SlideModel) = {
val slide = LFSlideLocalServiceUtil.createLFSlide()
slide.setTitle(slideModel.title)
slide.setBgcolor(slideModel.bgColor)
slide.setBgimage(slideModel.bgImage)
slide.setLeftSlideId(slideModel.leftSlideId)
slide.setTopSlideId(slideModel.topSlideId)
slide.setSlideSetId(slideModel.slideSetId)
slide.setStatementVerb(slideModel.statementVerb)
slide.setStatementObject(slideModel.statementObject)
slide.setStatementCategoryId(slideModel.statementCategoryId)
LFSlideLocalServiceUtil.updateLFSlide(slide)
}
override def update(slideModel: SlideModel) = {
val slide = LFSlideLocalServiceUtil.getLFSlide(slideModel.id.get)
slide.setTitle(slideModel.title)
slide.setBgcolor(slideModel.bgColor)
slide.setBgimage(slideModel.bgImage)
slide.setLeftSlideId(slideModel.leftSlideId)
slide.setTopSlideId(slideModel.topSlideId)
slide.setSlideSetId(slideModel.slideSetId)
slide.setStatementVerb(slideModel.statementVerb)
slide.setStatementObject(slideModel.statementObject)
slide.setStatementCategoryId(slideModel.statementCategoryId)
LFSlideLocalServiceUtil.updateLFSlide(slide)
}
}
|
ViLPy/Valamis
|
learn-persistence-liferay-wrapper/src/main/scala/com/arcusys/learn/slide/SlideRepository.scala
|
Scala
|
lgpl-3.0
| 1,945 |
package org.receiver2d.engine.geometry
import org.receiver2d.engine.math.{Matrix, Vec2}
/**
* Represents a circle
*/
case class Circle(center: Vec2, radius: Float) extends Ellipse(center, radius)
|
Prince781/Receiver2D
|
src/main/scala/org/receiver2d/engine/geometry/Circle.scala
|
Scala
|
gpl-2.0
| 199 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.util.Date
import org.locationtech.jts.geom.Point
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinConversionProcessTest extends Specification {
import scala.collection.JavaConversions._
val sft = SimpleFeatureTypes.createType("bin",
"name:String,track:String,dtg:Date,dtg2:Date,*geom:Point:srid=4326,geom2:Point:srid=4326")
val process = new BinConversionProcess
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"0$i")
sf.setAttribute("name", s"name$i")
sf.setAttribute("track", s"$i")
sf.setAttribute("dtg", s"2017-02-20T00:00:0$i.000Z")
sf.setAttribute("dtg2", s"2017-02-21T00:00:0$i.000Z")
sf.setAttribute("geom", s"POINT(40 ${50 + i})")
sf.setAttribute("geom2", s"POINT(20 ${30 + i})")
sf
}
val ids = features.map(_.getID.hashCode)
val names = features.map(_.getAttribute("name").hashCode)
val tracks = features.map(_.getAttribute("track").hashCode)
val dates = features.map(_.getAttribute("dtg").asInstanceOf[Date].getTime)
val dates2 = features.map(_.getAttribute("dtg2").asInstanceOf[Date].getTime)
val lonlat = features.map(_.getAttribute("geom").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon = lonlat.map(_.swap)
val lonlat2 = features.map(_.getAttribute("geom2").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon2 = lonlat2.map(_.swap)
val listCollection = new ListFeatureCollection(sft, features)
// converts to tuples that we can compare to zipped values
def toTuples(value: EncodedValues): Any = value match {
case EncodedValues(trackId, lat, lon, dtg, label) if label == -1L => ((trackId, dtg), (lat, lon))
case EncodedValues(trackId, lat, lon, dtg, label) => (((trackId, dtg), (lat, lon)), label)
}
"BinConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, "lonlat")
bytes must beEmpty
}
"encode a generic feature collection" in {
val bytes = process.execute(listCollection, null, null, null, null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat))
}
"encode a generic feature collection with alternate values" in {
val bytes = process.execute(listCollection, "name", "geom2", "dtg2", null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(names.zip(dates2).zip(lonlat2))
}
"encode a generic feature collection with labels" in {
val bytes = process.execute(listCollection, null, null, null, "track", "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat).zip(tracks))
}
}
}
|
aheyne/geomesa
|
geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/transform/BinConversionProcessTest.scala
|
Scala
|
apache-2.0
| 3,989 |
package validator
import models.{LineStation, StopStation}
import scala.collection.breakOut
class StationStopValidator(allStops: Seq[StopStation]) {
import StationStopValidator._
def validate(stations: Seq[LineStation]): Seq[Error] = {
val stationIds: Set[Long] = allStops.map(_.lineStationId)(breakOut)
stations.flatMap { st =>
if (stationIds.contains(st.id)) None
else {
Some(new UndefinedTrainError(st))
}
}
}
}
object StationStopValidator {
class UndefinedTrainError(station: LineStation) extends Error {
override def message: String = s"Undefined arrived train at ${lineName}:${stationName}"
override def url: Option[String] = None
def lineName = station.line.map(_.name).getOrElse(s"(lineId = ${station.lineId})")
def stationName = station.station.map(_.name).getOrElse(s"(stationId = ${station.stationId})")
}
}
|
ponkotuy/train-stamp-rally
|
app/validator/StationStopValidator.scala
|
Scala
|
apache-2.0
| 888 |
package se.lu.nateko.cp.meta.api
import scala.collection.AbstractIterator
trait CloseableIterator[+T] extends Iterator[T] with AutoCloseable{self =>
def ++[A >: T](other: => CloseableIterator[A]) = new CloseableIterator[A]{
private[this] var thatInitialized = false
private[this] lazy val that = {
thatInitialized = true
other
}
def hasNext = self.hasNext || that.hasNext
def next(): A = if(self.hasNext) self.next() else that.next()
def close(): Unit = {
self.close()
if(thatInitialized) that.close()
}
}
}
object CloseableIterator{
def empty = new CloseableIterator[Nothing]{
def hasNext = false
def next(): Nothing = throw new NoSuchElementException("Empty iterator cannot have next element")
def close(): Unit = {}
}
class Wrap[T](inner: Iterator[T], closer: () => Unit) extends AbstractIterator[T] with CloseableIterator[T]{
private[this] var closed: Boolean = false
def close(): Unit = synchronized{
if(!closed){
closer()
closed = true;
}
}
def hasNext: Boolean = !closed && {
try{
val has = inner.hasNext
if(!has) close()
has
}
catch{
case err: Throwable =>
close()
throw err
}
}
def next(): T =
try{
inner.next()
}
catch{
case err: Throwable =>
close()
throw err
}
}
}
|
ICOS-Carbon-Portal/meta
|
src/main/scala/se/lu/nateko/cp/meta/api/CloseableIterator.scala
|
Scala
|
gpl-3.0
| 1,325 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.connectors
import cats.instances.int._
import cats.syntax.eq._
import com.google.inject.{ImplementedBy, Inject, Singleton}
import play.mvc.Http.Status.OK
import uk.gov.hmrc.helptosavefrontend.config.FrontendAppConfig
import uk.gov.hmrc.helptosavefrontend.http.HttpClient.HttpClientOps
import uk.gov.hmrc.helptosavefrontend.models.iv._
import uk.gov.hmrc.helptosavefrontend.util.{Logging, toFuture}
import uk.gov.hmrc.http.{HeaderCarrier, HttpClient}
import scala.concurrent.{ExecutionContext, Future}
@ImplementedBy(classOf[IvConnectorImpl])
trait IvConnector {
def getJourneyStatus(
journeyId: JourneyId
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Option[IvResponse]]
}
@Singleton
class IvConnectorImpl @Inject() (http: HttpClient)(implicit val frontendAppConfig: FrontendAppConfig)
extends IvConnector with Logging {
override def getJourneyStatus(
journeyId: JourneyId
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Option[IvResponse]] =
http
.get(frontendAppConfig.ivJourneyResultUrl(journeyId))
.flatMap {
case r if r.status === OK β
val result = (r.json \\ "result").as[String]
IvSuccessResponse.fromString(result)
case r β
logger.warn(
s"Unexpected ${r.status} response getting IV journey status from identity-verification-frontend-service"
)
Some(IvUnexpectedResponse(r))
}
.recoverWith {
case e: Exception β
logger.warn("Error getting IV journey status from identity-verification-frontend-service", e)
Some(IvErrorResponse(e))
}
}
|
hmrc/help-to-save-frontend
|
app/uk/gov/hmrc/helptosavefrontend/connectors/IvConnector.scala
|
Scala
|
apache-2.0
| 2,285 |
package com.twitter.scalding.serialization.macros.impl.ordered_serialization.providers
import scala.reflect.macros.whitebox.Context
/**
* The `knownDirectSubclasses` method doesn't provide stable ordering
* since it returns an unordered `Set` and the `Type` AST nodes don't
* override the `hashCode` method, relying on the default identity
* `hashCode`.
*
* This function makes the ordering stable using a list ordered by the
* full name of the types.
*/
object StableKnownDirectSubclasses {
def apply(c: Context)(tpe: c.Type): List[c.universe.TypeSymbol] =
tpe.typeSymbol.asClass.knownDirectSubclasses.map(_.asType).toList.sortBy(_.fullName)
}
|
tdyas/scalding
|
scalding-serialization/src/main/scala/com/twitter/scalding/serialization/macros/impl/ordered_serialization/providers/StableKnownDirectSubclasses.scala
|
Scala
|
apache-2.0
| 662 |
package demo
package components
package materialui.svgicons
import chandu0101.scalajs.react.components.materialui.MuiSvgIcon
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
object FileFolder {
val component = ReactComponentB[Unit]("FileFolder")
.render(P => {
MuiSvgIcon()(
<.svg.path(^.key := "acg", ^.svg.d := "M10 4H4c-1.1 0-1.99.9-1.99 2L2 18c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2h-8l-2-2z")
)
}).buildU
def apply() = component()
}
|
tpdi/scalajs-react-components
|
demo/src/main/scala/demo/components/materialui/svgicons/FileFolder.scala
|
Scala
|
apache-2.0
| 514 |
package im.actor.server.util
import scala.concurrent._
import scala.language.postfixOps
import akka.actor.ActorSystem
import slick.dbio.Effect.Read
import slick.dbio.{ DBIO, DBIOAction, Effect, NoStream }
import slick.driver.PostgresDriver.api._
import slick.profile.SqlAction
import im.actor.api.rpc._
import im.actor.api.rpc.users._
import im.actor.server.db.DbExtension
import im.actor.server.models.UserPhone
import im.actor.server.{ models, persist }
object UserUtils {
def defaultUserContactRecords(phones: Vector[Long], emails: Vector[String]): Vector[ContactRecord] = {
val phoneRecords = phones map { phone β
ContactRecord(ContactType.Phone, stringValue = None, longValue = Some(phone), title = Some("Mobile phone"), subtitle = None)
}
val emailRecords = emails map { email β
ContactRecord(ContactType.Email, stringValue = Some(email), longValue = None, title = Some("Email"), subtitle = None)
}
phoneRecords ++ emailRecords
}
def userContactRecords(phones: Vector[models.UserPhone], emails: Vector[models.UserEmail]): Vector[ContactRecord] = {
val phoneRecords = phones map { phone β
ContactRecord(ContactType.Phone, stringValue = None, longValue = Some(phone.number), title = Some(phone.title), subtitle = None)
}
val emailRecords = emails map { email β
ContactRecord(ContactType.Email, stringValue = Some(email.email), longValue = None, title = Some(email.title), subtitle = None)
}
phoneRecords ++ emailRecords
}
def userStruct(u: models.User, localName: Option[String], senderAuthId: Long)(
implicit
ec: ExecutionContext,
s: ActorSystem
): DBIOAction[User, NoStream, Read with Read with Read with Read] =
for {
phones β persist.UserPhone.findByUserId(u.id) map (_.toVector)
emails β persist.UserEmail.findByUserId(u.id)
adOpt β persist.AvatarData.findByUserId(u.id).headOption
} yield {
users.User(
id = u.id,
accessHash = ACLUtils.userAccessHash(senderAuthId, u),
name = u.name,
localName = normalizeLocalName(localName),
sex = u.sex.toOption map (sex β users.Sex.apply(sex.toInt)),
avatar = adOpt flatMap (ImageUtils.avatar),
phone = userPhone(u, phones),
isBot = Some(u.isBot),
contactInfo = userContactRecords(phones.toVector, emails.toVector),
nick = u.nickname,
about = u.about
)
}
def userStruct(u: models.User, senderUserId: Int, senderAuthId: Long)(
implicit
ec: ExecutionContext,
s: ActorSystem
): DBIOAction[User, NoStream, Read with Read with Read with Read with Read] =
for {
localName β persist.contact.UserContact.findName(senderUserId: Int, u.id).headOption map (_.getOrElse(None))
phones β persist.UserPhone.findByUserId(u.id) map (_.toVector)
emails β persist.UserEmail.findByUserId(u.id)
adOpt β persist.AvatarData.findByUserId(u.id).headOption
} yield {
users.User(
id = u.id,
accessHash = ACLUtils.userAccessHash(senderAuthId, u),
name = u.name,
localName = normalizeLocalName(localName),
sex = u.sex.toOption map (sex β users.Sex.apply(sex.toInt)),
avatar = adOpt flatMap (ImageUtils.avatar),
phone = userPhone(u, phones),
isBot = Some(u.isBot),
contactInfo = userContactRecords(phones.toVector, emails.toVector),
nick = u.nickname,
about = u.about
)
}
def userPhone(u: models.User, phones: Seq[UserPhone]): Option[Long] = {
phones.headOption match {
case Some(phone) β Some(phone.number)
case None β Some(0L)
}
}
def getUserStructOpt(userId: Int, senderUserId: Int, senderAuthId: Long)(implicit ec: ExecutionContext, s: ActorSystem): DBIOAction[Option[User], NoStream, Read with Read with Read with Read with Read with Read] =
persist.User.find(userId).headOption flatMap {
case Some(userModel) β userStruct(userModel, senderUserId, senderAuthId) map (Some(_))
case None β DBIO.successful(None)
}
def getUserStructs(userIds: Set[Int], senderUserId: Int, senderAuthId: Long)(implicit ec: ExecutionContext, s: ActorSystem): DBIOAction[Seq[User], NoStream, Read with Read with Read with Read with Read with Read] = {
DBIO.sequence(userIds.toSeq map (getUserStructOpt(_, senderUserId, senderAuthId))) map (_.flatten)
}
def getUserStructs(userIds: Set[Int])(implicit client: AuthorizedClientData, ec: ExecutionContext, s: ActorSystem): DBIOAction[Seq[User], NoStream, Read with Read with Read with Read with Read with Read] =
getUserStructs(userIds, client.userId, client.authId)
def getUserStructsPar(userIds: Set[Int], senderUserId: Int, senderAuthId: Long)(implicit ec: ExecutionContext, s: ActorSystem, db: Database): DBIOAction[Seq[User], NoStream, Effect] = {
DBIO.sequence(userIds.toSeq map (userId β DBIO.from(db.run(getUserStructOpt(userId, senderUserId, senderAuthId))))) map (_.flatten)
}
def getUserStructsPar(userIds: Set[Int])(implicit client: AuthorizedClientData, ec: ExecutionContext, s: ActorSystem, db: Database): DBIOAction[Seq[User], NoStream, Effect] =
getUserStructsPar(userIds, client.userId, client.authId)
def getUser(userId: Int) = {
persist.User.find(userId).headOption
}
def getUserUnsafe(userId: Int)(implicit ec: ExecutionContext) = {
getUser(userId) map {
case Some(user) β user
case None β throw new Exception(s"User ${userId} not found")
}
}
def getClientUser(implicit client: AuthorizedClientData): SqlAction[Option[models.User], NoStream, Read] = {
getUser(client.userId)
}
def getClientUserUnsafe(implicit client: AuthorizedClientData, ec: ExecutionContext): DBIOAction[models.User, NoStream, Read] = {
getUserUnsafe(client.userId)
}
def getClientUserPhone(implicit client: AuthorizedClientData, ec: ExecutionContext): DBIOAction[Option[(models.User, models.UserPhone)], NoStream, Read with Read] = {
getClientUser.flatMap {
case Some(user) β
persist.UserPhone.findByUserId(client.userId).headOption map {
case Some(userPhone) β Some((user, userPhone))
case None β None
}
case None β DBIO.successful(None)
}
}
def getClientUserPhoneUnsafe(implicit client: AuthorizedClientData, ec: ExecutionContext): DBIOAction[(models.User, models.UserPhone), NoStream, Read with Read] = {
getClientUserPhone map {
case Some(user_phone) β user_phone
case None β throw new Exception("Client user phone not found")
}
}
def normalizeLocalName(name: Option[String]) = name match {
case n @ Some(name) if name.nonEmpty β n
case _ β None
}
}
|
supertanglang/actor-platform
|
actor-server/actor-utils/src/main/scala/im/actor/server/util/UserUtils.scala
|
Scala
|
mit
| 6,837 |
package cwe.scala.library.runtime.test.math.bignumbers
import cwe.scala.library.runtime.test.ScalaTest
import cwe.scala.library.math.bignumbers._
import cwe.scala.library.boxes.Numerics._
import cwe.scala.library.audit.AuditServiceProvider
class TestRealImplBaseN extends ScalaTest {
override def run(): Unit = {
AuditServiceProvider.registerTraceServiceWithDebug
//val baseN: Int = 46340
//val baseN: Int = 11547
val baseN: Int = 10500
BigNumbersServiceProvider.setInstance(new BigNumbersServiceProvider() {
override def createReal(i: Int): Real = new RealImplBaseN(baseN, i).asInstanceOf[Real]
override def createReal(i: String): Real = new RealImplBaseN(baseN, i).asInstanceOf[Real]
override def createReal(integerPart: Int, decimalPart: Int): Real = new RealImplBaseN(baseN, integerPart, decimalPart).asInstanceOf[Real]
override def createReal(integerPart: String, decimalPart: String): Real = new RealImplBaseN(baseN, integerPart, decimalPart).asInstanceOf[Real]
})
val rops = BigNumbersServiceProvider.getRealOperationsService()
rops.MAX_DECIMAL_PRECISION = Natural.create(80)
rops.MAX_DIVISION_PRECISION = 90
/*dummy operation to force lazy loading*/ Real.create("2.3") * Real.create("-3.4")
AuditServiceProvider.unregisterTraceService
this.log("****** START OF TEST ****")
/*
test1(baseN)
test3(baseN)
test2(baseN)
*/
//test4(baseN)
test5
}
def test1(baseN: Int) = {
val pf = AuditServiceProvider.createProfiler()
pf.startProfiling()
var r1 = Real.create("123456.789")
pf.stopProfiling()
this.log(pf.getResultsAsXml("r1 creation"))
this.inspect(r1)("r1 memory")
pf.startProfiling()
val r12 = (r1 * r1).asInstanceOf[RealBaseN[_]]
pf.stopProfiling()
this.log(pf.getResultsAsXml("r1*r1"))
this.log((r12.getIntegerPartBaseN, r12.getDecimalPartBaseN()))
this.inspect(r12.asInstanceOf[Real])
val r12s = r12.asInstanceOf[Real].toString()
this.log(r12s)
val r121 = Real.create(r12s).asInstanceOf[RealBaseN[_]]
this.log((r121.getIntegerPartBaseN, r121.getDecimalPartBaseN()))
this.assertEqual((r12.getIntegerPartBaseN, r12.getDecimalPartBaseN()), (r121.getIntegerPartBaseN, r121.getDecimalPartBaseN()), "change base OK")
//AuditServiceProvider.registerTraceServiceWithDebug
this.inspect(r12 / r1)("r12/r1=r1")
this.assertEqual(r12 / r1, r1, "sqrt(r*r)=r")
val rops = BigNumbersServiceProvider.getRealOperationsService()
rops.MAX_DECIMAL_PRECISION = Natural.create(200)
rops.MAX_DIVISION_PRECISION = 210
val m1 = Real.create("12845798247598247520859082757892475")
this.log(m1 / (m1 * m1 * m1 * m1))
this.assert(m1 / (m1 * m1 * m1 * m1) > Real.ZERO, "test division")
this.assert(m1 / (m1 * m1 * m1 * m1) < Real.ONE, "test division")
rops.MAX_DECIMAL_PRECISION = Natural.create(80)
rops.MAX_DIVISION_PRECISION = 90
}
def test2(baseN: Int) = {
val r1 = Real.create("987654321").asInstanceOf[RealImplBaseN[Int]]
this.assertEqual(baseN, r1.getBaseN(), "bases are equal")
this.log("integer part base " + r1.getBaseN() + ": " + r1.getIntegerPartBaseN())
this.log("integer part base 10: " + r1.getIntegerPart())
this.log("integer part base 2: " + r1.getIntegerPartBaseX(byteNumeric.two))
this.log("integer part base 16: " + r1.getIntegerPartBaseX(16.asInstanceOf[Byte]))
var a: Real = null
this.inspect({ a = Real.create("89898989898989898989898989898989898989898989898998988989898989898998"); a })
this.log("inspects multiplication")
this.inspect(a * a)
val r4_i = "123456123456123456123456123456123456123456123456123456123456"
val r4_d = "789789789789789789789789789789789789789789"
this.assertEqual(Real.create(r4_i, r4_d).toString(), r4_i + "." + r4_d, "test big numbers")
// Test leading and extra decimal zeros removing
val extraZeros = "000000000000000000000000000000"
val r5 = Real.create(extraZeros + r4_d + extraZeros)
this.inspect(r5)("r5")
this.log(r5.asInstanceOf[RealImplBaseN[Int]].getIntegerPartBaseN)
this.assertEqual(r5.toString(), r4_d + extraZeros, "test big numbers with extra zeros removal")
val r4 = Real.create(extraZeros + r4_i + extraZeros, extraZeros + r4_d + extraZeros)
this.inspect(r4)("r4")
this.log(r4.asInstanceOf[RealImplBaseN[Int]].getDecimalPartBaseN)
this.assertEqual(r4.toString(), r4_i + extraZeros + "." + extraZeros + r4_d, "test big numbers with extra zeros removal")
val two = Real.TWO
this.assertEqual(Real.create(1) / two, Real.create("0.5"), "division by two")
this.assertEqual(Real.TWO / two, Real.create("1"), "division by two")
this.assertEqual(Real.create(3) / two, Real.create("1.5"), "division by two")
this.assertEqual(Real.create(4) / two, Real.create("2"), "division by two")
this.assertEqual(Real.create(5) / two, Real.create("2.5"), "division by two")
this.assertEqual(Real.create(6) / two, Real.create("3"), "division by two")
this.assertEqual(Real.create(7) / two, Real.create("3.5"), "division by two")
this.assertEqual(Real.create(8) / two, Real.create("4"), "division by two")
this.assertEqual(Real.create("4.5") * two, Real.create("9"), "multiplication by two")
AuditServiceProvider.registerTraceServiceWithDebug
val x = (Real.create(1) / two).asInstanceOf[RealBaseN[_]]
this.log((x.getDecimalPart(), x.getDecimalPartBaseN()))
val ten = Real.create(10)
this.assertEqual((x * ten), Real.create(5), "0.5*10 = 5")
this.assertEqual(Real.create(9) / Real.TWO, Real.create((9.0 / 2).toString), "test division")
AuditServiceProvider.unregisterTraceService
this.runOtherTest(new TestReal)
}
def test3(baseN: Int) = {
val r1 = new RealImplBaseN[Long](12, "123456789", "123123")
val r2 = new RealImplBaseN[Byte](12.asInstanceOf[Byte], "123456789", "123123")
this.log(r1)
this.log(r2)
this.assertEqual(r1, r2, "equals should match if base is identical")
}
def test4(baseN: Int) = {
val rops = BigNumbersServiceProvider.getRealOperationsService()
rops.MAX_DECIMAL_PRECISION = Natural.create(10)
rops.MAX_DIVISION_PRECISION = 10
AuditServiceProvider.registerTraceServiceWithDebug
var x = Real.create("123456.789012")
this.log("x*x")
x *= x
this.log("1/x")
x = Real.ONE / x
this.log("1/x to base 10")
this.log(x)
AuditServiceProvider.unregisterTraceService()
}
def test5() = {
this.runOtherTest(new TestReal)
}
}
|
wwwigii-system/research
|
cwe-scala-library/src/cwe/scala/library/runtime/test/math/bignumbers/TestRealImplBaseN.scala
|
Scala
|
gpl-3.0
| 6,305 |
package im.actor.server.activation
object Activation {
sealed trait Code {
def code: String
}
sealed trait PhoneCode extends Code {
def phone: Long
}
final case class SmsCode(phone: Long, code: String) extends PhoneCode {
override def equals(that: Any): Boolean =
that match {
case that: SmsCode β this.phone == that.phone
case _ β false
}
override def hashCode(): Int = phone.hashCode()
}
final case class CallCode(phone: Long, code: String, language: String) extends PhoneCode {
override def equals(that: Any): Boolean =
that match {
case that: CallCode β this.phone == that.phone
case _ β false
}
override def hashCode(): Int = phone.hashCode()
}
final case class EmailCode(email: String, code: String) extends Code {
override def equals(that: Any): Boolean =
that match {
case that: EmailCode β this.email == that.email
case _ β false
}
override def hashCode(): Int = email.hashCode()
}
}
|
lzpfmh/actor-platform
|
actor-server/actor-activation/src/main/scala/im/actor/server/activation/Activation.scala
|
Scala
|
mit
| 1,080 |
import java.io.File
import java.text.SimpleDateFormat
import java.util.TimeZone
import com.github.tototoshi.csv._
import org.joda.time.{DateTimeZone, LocalDateTime}
object PLDIAssignments extends App {
if (args.size == 0) {
println("Usage:")
println("\\tsbt \\"run <users.csv> <assignments.csv> <print times? true/false>\\"")
sys.exit(1)
}
val csv_users = args(0)
val csv_assignments = args(1)
val with_times = if (args.size == 2) { false } else { args(2).toBoolean }
val reader_users = CSVReader.open(new File(csv_users))
val users: List[Map[String, String]] = reader_users.allWithHeaders()
reader_users.close()
val user_db = users.map { row =>
val vol_id = row("id")
val last = row("last")
val first = row("first")
val email = row("email")
val gender = row("gender")
vol_id -> Map('first -> first, 'last -> last, 'gender -> gender, 'email -> email)
}.toMap
val reader_assignments = CSVReader.open(new File(csv_assignments))
val assignments: List[Map[String, String]] = reader_assignments.allWithHeaders()
reader_assignments.close()
// define date format parser
val df = new SimpleDateFormat("dd.MM.yyyy kk:mm zzzz")
// for each user, find all of that user's assignments
// want:
// user -> List((assignment1,time1),...,(assignmentn,timen))
val user_assignments = users.flatMap { row =>
val vol_id = row("id")
val assns = assignments.flatMap { arow =>
val volunteers = arow("assigned_volunteers").split(";")
if(volunteers.contains(vol_id)) {
// parse dates & convert to PDT
val start_d = date_conv(df.parse(arow("start")))
val end_d = date_conv(df.parse(arow("end")))
// unfortunately, the string representation is still wrong
val start_conv = start_d.toString.replace("EDT","PDT")
val end_conv = end_d.toString.replace("EDT","PDT")
// compute duration in mins
val duration = (end_d.getTime - start_d.getTime)/1000/60
Some((arow("event"),start_conv,end_conv,duration,arow("total_duration"),start_d,end_d))
} else {
None
}
}
if (assns.isEmpty) {
None
} else {
Some(vol_id -> assns)
}
}
user_assignments.foreach { case (vol_id, assns) =>
val total_duration = assns.foldLeft(0L){ case (acc,(_,_,_,_,admin_duration,_,_)) => acc + admin_duration.toInt}
val duration_txt = if (with_times) { " (" + total_duration + " total minutes)" } else { "" }
println(user_db(vol_id)('first) +
" " + user_db(vol_id)('last) +
" <" + user_db(vol_id)('email) + ">" +
duration_txt
)
assns.foreach { case(event, start, end, duration, admin_duration, start_d, end_d) =>
println("\\t" + start + " UNTIL " + end + " (" + admin_duration + " minutes)" + "\\t" + event)
}
println()
}
val hourly_schedule = user_assignments.map { case (volunteer_id, assignments) =>
assignments.map { case (event, start, end, duration, admin_duration, start_d, end_d) =>
// unfortunately, the string representation is still wrong
val start_conv = start_d.toString.replace("EDT","PDT")
val end_conv = end_d.toString.replace("EDT","PDT")
(event, start_conv, end_conv, duration, admin_duration, volunteer_id, start_d, end_d)
}
}.flatten.sortBy { case (event, start, end, duration, admin_duration, volunteer_id, start_d, end_d) => start_d }
hourly_schedule.foreach { case (event, start, end, duration, admin_duration, volunteer_id, start_d, end_d) =>
println(List(start, end, event, admin_duration, (user_db(volunteer_id)('first) + " " + user_db(volunteer_id)('last))).mkString(", "))
}
// assignments.sortBy{ arow => arow("start") }.foreach { arow =>
// val event = arow("event")
// val start_d = date_conv(df.parse(arow("start")))
// val end_d = date_conv(df.parse(arow("end")))
// val duration = (end_d.getTime - start_d.getTime)/1000/60
// println(List(event,arow("start"),arow("end"),duration).mkString(","))
// }
def date_conv(date: java.util.Date) : java.util.Date = {
val start_dj = new LocalDateTime(date)
val srcDateTime = start_dj.toDateTime(DateTimeZone.forID("US/Eastern"))
val dstDateTime = srcDateTime.withZone(DateTimeZone.forID("US/Pacific"))
dstDateTime.toLocalDateTime.toDateTime.toDate
}
}
|
dbarowy/PLDI_15_SV_assignments
|
src/main/scala/PLDIAssignments.scala
|
Scala
|
bsd-2-clause
| 4,346 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.support.matching
/**
* Encapsulates word matching strategies for parsing text and acronyms
*/
object WordMatchStrategies {
/**
* Matches whole words with lowercase
*/
case object FullLowerCaseWordStrategy extends WordMatchStrategy with FullLowerCaseMatchStrategy
/**
* Matches whole words with uppercase
*/
case object FullUpperCaseWordStrategy extends WordMatchStrategy with FullUpperCaseMatchStrategy
/**
* Matches whole words using TitleCase
*/
case object TitleCaseWordStrategy extends WordMatchStrategy with FullUpperCaseMatchStrategy
/**
* Matches whole words ignoring and implicitly preserving case
*/
case object IgnoreCaseWordStrategy extends WordMatchStrategy with FullIgnoreCaseMatchStrategy
}
|
adarro/ddo-calc
|
subprojects/common/ddo-util/src/main/scala/io/truthencode/ddo/support/matching/WordMatchStrategies.scala
|
Scala
|
apache-2.0
| 1,424 |
package com.example.http4s
import cats.effect._
import cats.implicits._
import fs2.{Scheduler, Stream}
import io.circe.Json
import org.http4s._
import org.http4s.MediaType._
import org.http4s.circe._
import org.http4s.dsl.Http4sDsl
import org.http4s.headers._
import org.http4s.server._
import org.http4s.server.middleware.authentication.BasicAuth
import org.http4s.server.middleware.authentication.BasicAuth.BasicAuthenticator
import org.http4s.twirl._
import scala.concurrent._
import scala.concurrent.duration._
class ExampleService[F[_]](implicit F: Effect[F]) extends Http4sDsl[F] {
// A Router can mount multiple services to prefixes. The request is passed to the
// service with the longest matching prefix.
def service(
implicit scheduler: Scheduler,
executionContext: ExecutionContext = ExecutionContext.global): HttpService[F] =
Router[F](
"" -> rootService,
"/auth" -> authService,
"/science" -> new ScienceExperiments[F].service
)
def rootService(
implicit scheduler: Scheduler,
executionContext: ExecutionContext): HttpService[F] =
HttpService[F] {
case GET -> Root =>
// Supports Play Framework template -- see src/main/twirl.
Ok(html.index())
case _ -> Root =>
// The default route result is NotFound. Sometimes MethodNotAllowed is more appropriate.
MethodNotAllowed()
case GET -> Root / "ping" =>
// EntityEncoder allows for easy conversion of types to a response body
Ok("pong")
case GET -> Root / "future" =>
// EntityEncoder allows rendering asynchronous results as well
Ok(Future("Hello from the future!"))
case GET -> Root / "streaming" =>
// It's also easy to stream responses to clients
Ok(dataStream(100))
case req @ GET -> Root / "ip" =>
// It's possible to define an EntityEncoder anywhere so you're not limited to built in types
val json = Json.obj("origin" -> Json.fromString(req.remoteAddr.getOrElse("unknown")))
Ok(json)
case GET -> Root / "redirect" =>
// Not every response must be Ok using a EntityEncoder: some have meaning only for specific types
TemporaryRedirect(Location(uri("/http4s/")))
case GET -> Root / "content-change" =>
// EntityEncoder typically deals with appropriate headers, but they can be overridden
Ok("<h2>This will have an html content type!</h2>", `Content-Type`(`text/html`))
case req @ GET -> "static" /: path =>
// captures everything after "/static" into `path`
// Try http://localhost:8080/http4s/static/nasa_blackhole_image.jpg
// See also org.http4s.server.staticcontent to create a mountable service for static content
StaticFile.fromResource(path.toString, Some(req)).getOrElseF(NotFound())
///////////////////////////////////////////////////////////////
//////////////// Dealing with the message body ////////////////
case req @ POST -> Root / "echo" =>
// The body can be used in the response
Ok(req.body).map(_.putHeaders(`Content-Type`(`text/plain`)))
case GET -> Root / "echo" =>
Ok(html.submissionForm("echo data"))
case req @ POST -> Root / "echo2" =>
// Even more useful, the body can be transformed in the response
Ok(req.body.drop(6), `Content-Type`(`text/plain`))
case GET -> Root / "echo2" =>
Ok(html.submissionForm("echo data"))
case req @ POST -> Root / "sum" =>
// EntityDecoders allow turning the body into something useful
req
.decode[UrlForm] { data =>
data.values.get("sum") match {
case Some(Seq(s, _*)) =>
val sum = s.split(' ').filter(_.length > 0).map(_.trim.toInt).sum
Ok(sum.toString)
case None => BadRequest(s"Invalid data: " + data)
}
}
.handleErrorWith { // We can handle errors using effect methods
case e: NumberFormatException => BadRequest("Not an int: " + e.getMessage)
}
case GET -> Root / "sum" =>
Ok(html.submissionForm("sum"))
///////////////////////////////////////////////////////////////
////////////////////// Blaze examples /////////////////////////
// You can use the same service for GET and HEAD. For HEAD request,
// only the Content-Length is sent (if static content)
case GET -> Root / "helloworld" =>
helloWorldService
case HEAD -> Root / "helloworld" =>
helloWorldService
// HEAD responses with Content-Length, but empty content
case HEAD -> Root / "head" =>
Ok("", `Content-Length`.unsafeFromLong(1024))
// Response with invalid Content-Length header generates
// an error (underflow causes the connection to be closed)
case GET -> Root / "underflow" =>
Ok("foo", `Content-Length`.unsafeFromLong(4))
// Response with invalid Content-Length header generates
// an error (overflow causes the extra bytes to be ignored)
case GET -> Root / "overflow" =>
Ok("foo", `Content-Length`.unsafeFromLong(2))
///////////////////////////////////////////////////////////////
//////////////// Form encoding example ////////////////////////
case GET -> Root / "form-encoded" =>
Ok(html.formEncoded())
case req @ POST -> Root / "form-encoded" =>
// EntityDecoders return an F[A] which is easy to sequence
req.decode[UrlForm] { m =>
val s = m.values.mkString("\\n")
Ok(s"Form Encoded Data\\n$s")
}
///////////////////////////////////////////////////////////////
//////////////////////// Server Push //////////////////////////
/*
case req @ GET -> Root / "push" =>
// http4s intends to be a forward looking library made with http2.0 in mind
val data = <html><body><img src="image.jpg"/></body></html>
Ok(data)
.withContentType(Some(`Content-Type`(`text/html`)))
.push("/image.jpg")(req)
*/
case req @ GET -> Root / "image.jpg" =>
StaticFile
.fromResource("/nasa_blackhole_image.jpg", Some(req))
.getOrElseF(NotFound())
///////////////////////////////////////////////////////////////
//////////////////////// Multi Part //////////////////////////
/* TODO fs2 port
case req @ GET -> Root / "form" =>
Ok(html.form())
case req @ POST -> Root / "multipart" =>
req.decode[Multipart] { m =>
Ok(s"""Multipart Data\\nParts:${m.parts.length}\\n${m.parts.map { case f: Part => f.name }.mkString("\\n")}""")
}
*/
}
def helloWorldService: F[Response[F]] = Ok("Hello World!")
// This is a mock data source, but could be a Process representing results from a database
def dataStream(n: Int)(implicit scheduler: Scheduler, ec: ExecutionContext): Stream[F, String] = {
val interval = 100.millis
val stream =
scheduler
.awakeEvery[F](interval)
.map(_ => s"Current system time: ${System.currentTimeMillis()} ms\\n")
.take(n.toLong)
Stream.emit(s"Starting $interval stream intervals, taking $n results\\n\\n") ++ stream
}
// Services can be protected using HTTP authentication.
val realm = "testrealm"
val authStore: BasicAuthenticator[F, String] = (creds: BasicCredentials) =>
if (creds.username == "username" && creds.password == "password") F.pure(Some(creds.username))
else F.pure(None)
// An AuthedService[A, F] is a Service[F, (A, Request[F]), Response[F]] for some
// user type A. `BasicAuth` is an auth middleware, which binds an
// AuthedService to an authentication store.
val basicAuth: AuthMiddleware[F, String] = BasicAuth(realm, authStore)
def authService: HttpService[F] =
basicAuth(AuthedService[String, F] {
// AuthedServices look like Services, but the user is extracted with `as`.
case GET -> Root / "protected" as user =>
Ok(s"This page is protected using HTTP authentication; logged in as $user")
})
}
|
reactormonk/http4s
|
examples/src/main/scala/com/example/http4s/ExampleService.scala
|
Scala
|
apache-2.0
| 8,118 |
package org.judal.examples.scala.jdbc
import org.junit.Test
import org.judal.Using._
import org.scalatest.Suite
import org.judal.storage.scala.TableOperation
import org.judal.examples.scala.model.Student
/**
* Use TableOperation wrapper to fetch students whose last name is "Kol"
* then change the last name to "Col"
* Data is read from the database using the DataSource provided by
* EngineFactory.DefaultThreadDataSource.get()
*/
class E13_FetchByNonUniqueIndexUsingOperationWrapper extends Suite {
@Test def demo() = {
E13_FetchByNonUniqueIndexUsingOperationWrapper.setUp
val s = new Student()
var op : TableOperation[Student] = null
using (op) {
op = new TableOperation(s)
// Fetch students whose last name is "Kol" returning the results sorted by first_name in ascending order
val students : Iterable[Student] = op.fetchAsc(s.fetchGroup, "last_name", "Kol", "first_name")
for (t <- students) {
// Change last name and update record in the database
t.setLastName("Col")
t.store()
}
}
E13_FetchByNonUniqueIndexUsingOperationWrapper.tearDown
}
}
object E13_FetchByNonUniqueIndexUsingOperationWrapper {
def setUp() = {
E10_WriteCSVDataIntoTheDatabase.setUp
}
def tearDown() = {
E10_WriteCSVDataIntoTheDatabase.tearDown
}
}
|
sergiomt/judal
|
aexample/src/main/scala/org/judal/examples/scala/jdbc/E13_FetchByNonUniqueIndexUsingOperationWrapper.scala
|
Scala
|
apache-2.0
| 1,352 |
package go3d.testing
import org.junit.{Assert, Test}
import go3d.*
class TestConstants:
@Test def testMinBoardSize(): Unit =
Assert.assertEquals(MinBoardSize, 3)
@Test def testMaxBoardSize(): Unit =
Assert.assertEquals(MaxBoardSize, 25)
@Test def testMaxHandicaps(): Unit =
Assert.assertEquals(MaxHandicaps, 27)
@Test def testMaxPlayers(): Unit =
Assert.assertEquals(MaxPlayers, 2)
@Test def testDefaultPlayers(): Unit =
Assert.assertEquals(DefaultPlayers, 2)
|
lene/go-3
|
src/test/scala/TestConstants.scala
|
Scala
|
gpl-2.0
| 496 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.io.File
import java.lang.{Integer => JInt}
import java.net.InetSocketAddress
import java.util.{Map => JMap, Properties}
import java.util.concurrent.TimeoutException
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.language.postfixOps
import scala.util.control.NonFatal
import kafka.admin.AdminUtils
import kafka.api.Request
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.ZkUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.streaming.Time
import org.apache.spark.util.Utils
/**
* This is a helper class for Kafka test suites. This has the functionality to set up
* and tear down local Kafka servers, and to push data using Kafka producers.
*
* The reason to put Kafka test utility class in src is to test Python related Kafka APIs.
*/
private[kafka010] class KafkaTestUtils extends Logging {
// Zookeeper related configurations
private val zkHost = "localhost"
private var zkPort: Int = 0
private val zkConnectionTimeout = 60000
private val zkSessionTimeout = 6000
private var zookeeper: EmbeddedZookeeper = _
private var zkUtils: ZkUtils = _
// Kafka broker related configurations
private val brokerHost = "localhost"
private var brokerPort = 0
private var brokerConf: KafkaConfig = _
// Kafka broker server
private var server: KafkaServer = _
// Kafka producer
private var producer: KafkaProducer[String, String] = _
// Flag to test whether the system is correctly started
private var zkReady = false
private var brokerReady = false
def zkAddress: String = {
assert(zkReady, "Zookeeper not setup yet or already torn down, cannot get zookeeper address")
s"$zkHost:$zkPort"
}
def brokerAddress: String = {
assert(brokerReady, "Kafka not setup yet or already torn down, cannot get broker address")
s"$brokerHost:$brokerPort"
}
def zookeeperClient: ZkUtils = {
assert(zkReady, "Zookeeper not setup yet or already torn down, cannot get zookeeper client")
Option(zkUtils).getOrElse(
throw new IllegalStateException("Zookeeper client is not yet initialized"))
}
// Set up the Embedded Zookeeper server and get the proper Zookeeper port
private def setupEmbeddedZookeeper(): Unit = {
// Zookeeper server startup
zookeeper = new EmbeddedZookeeper(s"$zkHost:$zkPort")
// Get the actual zookeeper binding port
zkPort = zookeeper.actualPort
zkUtils = ZkUtils(s"$zkHost:$zkPort", zkSessionTimeout, zkConnectionTimeout, false)
zkReady = true
}
// Set up the Embedded Kafka server
private def setupEmbeddedKafkaServer(): Unit = {
assert(zkReady, "Zookeeper should be set up beforehand")
// Kafka broker startup
Utils.startServiceOnPort(brokerPort, port => {
brokerPort = port
brokerConf = new KafkaConfig(brokerConfiguration, doLog = false)
server = new KafkaServer(brokerConf)
server.startup()
brokerPort = server.boundPort()
(server, brokerPort)
}, new SparkConf(), "KafkaBroker")
brokerReady = true
}
/** setup the whole embedded servers, including Zookeeper and Kafka brokers */
def setup(): Unit = {
setupEmbeddedZookeeper()
setupEmbeddedKafkaServer()
}
/** Teardown the whole servers, including Kafka broker and Zookeeper */
def teardown(): Unit = {
brokerReady = false
zkReady = false
if (producer != null) {
producer.close()
producer = null
}
if (server != null) {
server.shutdown()
server = null
}
brokerConf.logDirs.foreach { f => Utils.deleteRecursively(new File(f)) }
if (zkUtils != null) {
zkUtils.close()
zkUtils = null
}
if (zookeeper != null) {
zookeeper.shutdown()
zookeeper = null
}
}
/** Create a Kafka topic and wait until it is propagated to the whole cluster */
def createTopic(topic: String, partitions: Int): Unit = {
AdminUtils.createTopic(zkUtils, topic, partitions, 1)
// wait until metadata is propagated
(0 until partitions).foreach { p =>
waitUntilMetadataIsPropagated(topic, p)
}
}
/** Create a Kafka topic and wait until it is propagated to the whole cluster */
def createTopic(topic: String): Unit = {
createTopic(topic, 1)
}
/** Java-friendly function for sending messages to the Kafka broker */
def sendMessages(topic: String, messageToFreq: JMap[String, JInt]): Unit = {
sendMessages(topic, Map(messageToFreq.asScala.mapValues(_.intValue()).toSeq: _*))
}
/** Send the messages to the Kafka broker */
def sendMessages(topic: String, messageToFreq: Map[String, Int]): Unit = {
val messages = messageToFreq.flatMap { case (s, freq) => Seq.fill(freq)(s) }.toArray
sendMessages(topic, messages)
}
/** Send the array of messages to the Kafka broker */
def sendMessages(topic: String, messages: Array[String]): Unit = {
producer = new KafkaProducer[String, String](producerConfiguration)
messages.foreach { message =>
producer.send(new ProducerRecord[String, String](topic, message))
}
producer.close()
producer = null
}
private def brokerConfiguration: Properties = {
val props = new Properties()
props.put("broker.id", "0")
props.put("host.name", "localhost")
props.put("port", brokerPort.toString)
props.put("log.dir", Utils.createTempDir().getAbsolutePath)
props.put("zookeeper.connect", zkAddress)
props.put("log.flush.interval.messages", "1")
props.put("replica.socket.timeout.ms", "1500")
props
}
private def producerConfiguration: Properties = {
val props = new Properties()
props.put("bootstrap.servers", brokerAddress)
props.put("value.serializer", classOf[StringSerializer].getName)
// Key serializer is required.
props.put("key.serializer", classOf[StringSerializer].getName)
// wait for all in-sync replicas to ack sends
props.put("acks", "all")
props
}
// A simplified version of scalatest eventually, rewritten here to avoid adding extra test
// dependency
def eventually[T](timeout: Time, interval: Time)(func: => T): T = {
def makeAttempt(): Either[Throwable, T] = {
try {
Right(func)
} catch {
case e if NonFatal(e) => Left(e)
}
}
val startTime = System.currentTimeMillis()
@tailrec
def tryAgain(attempt: Int): T = {
makeAttempt() match {
case Right(result) => result
case Left(e) =>
val duration = System.currentTimeMillis() - startTime
if (duration < timeout.milliseconds) {
Thread.sleep(interval.milliseconds)
} else {
throw new TimeoutException(e.getMessage)
}
tryAgain(attempt + 1)
}
}
tryAgain(1)
}
private def waitUntilMetadataIsPropagated(topic: String, partition: Int): Unit = {
def isPropagated = server.apis.metadataCache.getPartitionInfo(topic, partition) match {
case Some(partitionState) =>
val leaderAndInSyncReplicas = partitionState.leaderIsrAndControllerEpoch.leaderAndIsr
zkUtils.getLeaderForPartition(topic, partition).isDefined &&
Request.isValidBrokerId(leaderAndInSyncReplicas.leader) &&
leaderAndInSyncReplicas.isr.size >= 1
case _ =>
false
}
eventually(Time(10000), Time(100)) {
assert(isPropagated, s"Partition [$topic, $partition] metadata not propagated after timeout")
}
}
private class EmbeddedZookeeper(val zkConnect: String) {
val snapshotDir = Utils.createTempDir()
val logDir = Utils.createTempDir()
val zookeeper = new ZooKeeperServer(snapshotDir, logDir, 500)
val (ip, port) = {
val splits = zkConnect.split(":")
(splits(0), splits(1).toInt)
}
val factory = new NIOServerCnxnFactory()
factory.configure(new InetSocketAddress(ip, port), 16)
factory.startup(zookeeper)
val actualPort = factory.getLocalPort
def shutdown() {
factory.shutdown()
Utils.deleteRecursively(snapshotDir)
Utils.deleteRecursively(logDir)
}
}
}
|
gioenn/xSpark
|
external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/KafkaTestUtils.scala
|
Scala
|
apache-2.0
| 9,198 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl._
import org.scalatest.{FlatSpec, Matchers}
import scala.math.abs
@com.intel.analytics.bigdl.tags.Parallel
class TanhSpec extends FlatSpec with Matchers {
"A Tanh Module " should "generate correct output and grad" in {
val module = new Tanh[Double]()
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.17020166106522
input(Array(1, 1, 2)) = 0.57785657607019
input(Array(1, 2, 1)) = -1.3404131438583
input(Array(1, 2, 2)) = 1.0938102817163
input(Array(2, 1, 1)) = 1.120370157063
input(Array(2, 1, 2)) = -1.5014141565189
input(Array(2, 2, 1)) = 0.3380249235779
input(Array(2, 2, 2)) = -0.625677742064
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.79903302760795
gradOutput(Array(1, 1, 2)) = 0.019753993256018
gradOutput(Array(1, 2, 1)) = 0.63136631483212
gradOutput(Array(1, 2, 2)) = 0.29849314852618
gradOutput(Array(2, 1, 1)) = 0.94380705454387
gradOutput(Array(2, 1, 2)) = 0.030344664584845
gradOutput(Array(2, 2, 1)) = 0.33804601291195
gradOutput(Array(2, 2, 2)) = 0.8807330634445
val expectedOutput = Tensor[Double](2, 2, 2)
expectedOutput(Array(1, 1, 1)) = -0.16857698275003
expectedOutput(Array(1, 1, 2)) = 0.52110579963112
expectedOutput(Array(1, 2, 1)) = -0.87177144344863
expectedOutput(Array(1, 2, 2)) = 0.79826462420686
expectedOutput(Array(2, 1, 1)) = 0.80769763073281
expectedOutput(Array(2, 1, 2)) = -0.90540347425835
expectedOutput(Array(2, 2, 1)) = 0.32571298952384
expectedOutput(Array(2, 2, 2)) = -0.55506882753488
val expectedGrad = Tensor[Double](2, 2, 2)
expectedGrad(Array(1, 1, 1)) = 0.77632594793144
expectedGrad(Array(1, 1, 2)) = 0.014389771607755
expectedGrad(Array(1, 2, 1)) = 0.15153710218424
expectedGrad(Array(1, 2, 2)) = 0.1082854310036
expectedGrad(Array(2, 1, 1)) = 0.32809049064441
expectedGrad(Array(2, 1, 2)) = 0.0054694603766104
expectedGrad(Array(2, 2, 1)) = 0.3021830658283
expectedGrad(Array(2, 2, 2)) = 0.6093779706637
val inputOrg = input.clone()
val gradOutputOrg = gradOutput.clone()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
expectedOutput.map(output, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
expectedGrad.map(gradInput, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
}
"A Tanh Module " should "be good in gradient check" in {
val module = new Tanh[Double]()
val input = Tensor[Double](2, 2, 2).rand()
val checker = new GradientChecker(1e-4, 1e-2)
checker.checkLayer[Double](module, input) should be(true)
}
}
|
SeaOfOcean/BigDL
|
dl/src/test/scala/com/intel/analytics/bigdl/nn/TanhSpec.scala
|
Scala
|
apache-2.0
| 3,659 |
package com.wandoulabs.onecache.core
import com.wandoulabs.onecache.core.cache.Cache
trait CacheManager {
def getCache(name: Any): Cache
}
|
cowboy129/onecache
|
onecache-core/src/main/scala/com/wandoulabs/onecache/core/CacheManager.scala
|
Scala
|
apache-2.0
| 143 |
package berlin.jentsch.modelchecker.akka.example
import akka.Done
import akka.actor.typed._
import akka.actor.typed.scaladsl.Behaviors._
import berlin.jentsch.modelchecker.akka._
import berlin.jentsch.modelchecker.akka.example.Philosophers.stickFree
object Philosophers {
def apply(): Behavior[Unit] = setup { ctx =>
val stick1 = ctx.spawn(stick, "Stick1")
val stick2 = ctx.spawn(stick, "Stick2")
val stick3 = ctx.spawn(stick, "Stick3")
ctx.spawn(philosophers(stick1, stick2), "Philosopher1")
ctx.spawn(philosophers(stick2, stick3), "Philosopher2")
ctx.spawn(philosophers(stick1, stick3), "Philosopher3")
empty
}
def deadlock: Behavior[Unit] = setup { ctx =>
val stick1 = ctx.spawn(stick, "Stick1")
val stick2 = ctx.spawn(stick, "Stick2")
val stick3 = ctx.spawn(stick, "Stick3")
ctx.spawn(philosophers(stick1, stick2), "Philosopher1")
ctx.spawn(philosophers(stick2, stick3), "Philosopher2")
ctx.spawn(philosophers(stick3, stick1), "Philosopher3")
empty
}
sealed trait Messages
case class Req(sender: ActorRef[Done]) extends Messages
case object Free extends Messages
def stick: Behavior[Messages] = stickFree
lazy val stickFree: Behavior[Messages] = receiveMessagePartial {
case Req(sender) =>
sender ! Done
stickInUse
}
lazy val stickInUse: Behavior[Messages] = receiveMessage {
case Free => stickFree
case Req(sender) =>
receiveMessagePartial {
case Free =>
sender ! Done
stickInUse
}
}
def philosophers(
stick1: ActorRef[Messages],
stick2: ActorRef[Messages]
): Behavior[Done] = {
def acquireFirstStick: Behavior[Done] = setup { ctx =>
stick1 ! Req(ctx.self)
receiveMessage { case Done => acquireSecondStick }
}
def acquireSecondStick: Behavior[Done] = setup { ctx =>
stick2 ! Req(ctx.self)
receiveMessage { case Done => release }
}
def release: Behavior[Done] = setup { _ =>
stick2 ! Free
stick1 ! Free
acquireFirstStick
}
acquireFirstStick
}
}
class PhilosophersSpec extends AkkaSpec {
behavior of "philosophers"
Philosophers() should "always progress" in (
invariantly(progressIsPossible),
alwaysEventually(root / "Stick1" is stickFree)
)
Philosophers.deadlock should "deadlock sometimes" in
potentially(!progressIsPossible)
}
|
Jentsch/modelchecker
|
akka/src/test/scala/berlin/jentsch/modelchecker/akka/example/Philosophers.scala
|
Scala
|
mit
| 2,404 |
/*
* Copyright (c) 2013-2014 TelefΓ³nica InvestigaciΓ³n y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.common
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
/** Implicit method extensions that can be used to do short circuit logical operations
* on Boolean futures only evaluating the right part of the operation if necessary.
*/
object BooleanFutures {
implicit class BooleanFutureOps(val left: Future[Boolean]) extends AnyVal {
/** An OR, logical inclusive disjunction operation.
* Maps this future to the right part only if this future's value evaluates to false.
* @param right the right boolean future of the OR
* @return true if at least one of the futures is true
*/
def or(right: => Future[Boolean]): Future[Boolean] =
left.flatMap(value => if (value) Future.successful(value) else right)
/** An AND, logical conjunction operation.
* Maps this future to the right part only if this future's value evaluates to true.
* @param right the right boolean future of the AND
* @return true if at both one of the futures are true
*/
def and(right: => Future[Boolean]): Future[Boolean] =
left.flatMap(value => if (!value) Future.successful(value) else right)
}
}
|
telefonicaid/fiware-cosmos-platform
|
common/src/main/scala/es/tid/cosmos/common/BooleanFutures.scala
|
Scala
|
apache-2.0
| 1,851 |
package es.weso.typing
import util._
case class ReasonTypingAsMap[Node, Label, ReasonPos, ReasonNeg](
m: Map[Node,ReasonTypeRow[Label,ReasonPos,ReasonNeg]]
) extends ReasonPosNegTyping[Node,Label,ReasonPos,ReasonNeg] {
override def nodes = m.keys.toSeq
override def addPosTypeReason(n: Node, label: Label, reason: ReasonPos) = {
val typeRow =
if (m contains n) m(n)
else ReasonTypeRow.empty[Label,ReasonPos,ReasonNeg]
for {
tr <- typeRow.addPos(label,reason)
} yield {
ReasonTypingAsMap(m + (n -> tr))
}
}
override def addNegTypeReason(n: Node, label: Label, reason: ReasonNeg) = {
val typeRow =
if (m contains n) m(n)
else ReasonTypeRow.empty[Label,ReasonPos,ReasonNeg]
for {
tr <- typeRow.addNeg(label,reason)
} yield {
ReasonTypingAsMap(m + (n -> tr))
}
}
override def getPosTypesReason(node: Node): Seq[(Label,ReasonPos)] = {
m.get(node).getOrElse(ReasonTypeRow.empty).pos.toSeq
}
override def getNegTypesReason(node: Node): Seq[(Label,ReasonNeg)] = {
m.get(node).getOrElse(ReasonTypeRow.empty).neg.toSeq
}
override def getAllTypesReason(node: Node): (Seq[(Label,ReasonPos)],Seq[(Label,ReasonNeg)]) = {
(getPosTypesReason(node),getNegTypesReason(node))
}
override def toString: String = {
"Typing:" + m.toString
}
}
|
labra/ShExcala
|
src/main/scala/es/weso/typing/ReasonTypingAsMap.scala
|
Scala
|
mit
| 1,382 |
package org.scalacoin.script.crypto
import org.scalacoin.script.ScriptOperationFactory
/**
* Created by chris on 1/8/16.
*/
trait CryptoOperationFactory extends ScriptOperationFactory[CryptoOperation] {
override def operations = Seq(OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSIG, OP_CHECKSIGVERIFY,
OP_CODESEPARATOR, OP_HASH160, OP_HASH256, OP_RIPEMD160, OP_SHA1, OP_SHA256)
}
object CryptoOperationFactory extends CryptoOperationFactory
|
TomMcCabe/scalacoin
|
src/main/scala/org/scalacoin/script/crypto/CryptoOperationFactory.scala
|
Scala
|
mit
| 457 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Explicitly
import org.scalactic.StringNormalizations._
import org.scalactic.Uniformity
import org.scalactic.Prettifier
import SharedHelpers._
import Matchers._
class TheSameElementsInOrderAsContainMatcherDeciderSpec extends FunSpec with Explicitly {
private val prettifier = Prettifier.default
val incremented: Uniformity[Int] =
new Uniformity[Int] {
var count = 0
def normalized(s: Int): Int = {
count += 1
s + count
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[Int]
def normalizedOrSame(b: Any) =
b match {
case i: Int => normalized(i)
case _ => b
}
}
val lowerCaseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.toLowerCase == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val reverseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.reverse == (right match {
case s: String => s.toLowerCase
case other => other
})
}
describe("theSameElementsInOrderAs ") {
def checkShouldContainStackDepth(e: exceptions.StackDepthException, left: Any, right: Any, lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
val rightText = FailureMessages.decorateToStringValue(prettifier, right)
e.message should be (Some(leftText + " did not contain the same elements in the same (iterated) order as " + rightText))
e.failedCodeFileName should be (Some("TheSameElementsInOrderAsContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
def checkShouldNotContainStackDepth(e: exceptions.StackDepthException, left: Any, right: Any, lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
val rightText = FailureMessages.decorateToStringValue(prettifier, right)
e.message should be (Some(leftText + " contained the same elements in the same (iterated) order as " + rightText))
e.failedCodeFileName should be (Some("TheSameElementsInOrderAsContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
it("should take specified normalization when 'should contain' is used") {
(List("1 ", " 2", "3 ") should contain theSameElementsInOrderAs List(" 1", "2 ", " 3")) (after being trimmed)
(Array("1 ", " 2", "3 ") should contain theSameElementsInOrderAs List(" 1", "2 ", " 3")) (after being trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("1 ", " 2", "3 ") should contain theSameElementsInOrderAs List(" 1", "2 ", " 3")) (after being trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified normalization when 'should not contain' is used") {
(List(1, 2, 3) should not contain theSameElementsInOrderAs (List(1, 2, 3))) (after being incremented)
(Array(1, 2, 3) should not contain theSameElementsInOrderAs (List(1, 2, 3))) (after being incremented)
// SKIP-SCALATESTJS,NATIVE-START
(javaList(1, 2, 3) should not contain theSameElementsInOrderAs (List(1, 2, 3))) (after being incremented)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified normalization") {
val left1 = List(1, 2, 3)
val right1 = List(1, 2, 3)
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain theSameElementsInOrderAs right1) (after being incremented)
}
checkShouldContainStackDepth(e1, left1, right1, thisLineNumber - 2)
val left2 = Array(1, 2, 3)
val right2 = List(1, 2, 3)
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain theSameElementsInOrderAs right2) (after being incremented)
}
checkShouldContainStackDepth(e2, left2, right2, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList(1, 2, 3)
val right3 = List(1, 2, 3)
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain theSameElementsInOrderAs right3) (after being incremented)
}
checkShouldContainStackDepth(e3, left3, right3, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified normalization") {
val left1 = List("1 ", " 2", "3 ")
val right1 = List(" 1", "2 ", " 3")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain theSameElementsInOrderAs (right1)) (after being trimmed)
}
checkShouldNotContainStackDepth(e1, left1, right1, thisLineNumber - 2)
val left2 = Array("1 ", " 2", "3 ")
val right2 = List(" 1", "2 ", " 3")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain theSameElementsInOrderAs (right2)) (after being trimmed)
}
checkShouldNotContainStackDepth(e2, left2, right2, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("1 ", " 2", "3 ")
val right3 = List(" 1", "2 ", " 3")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain theSameElementsInOrderAs (right3)) (after being trimmed)
}
checkShouldNotContainStackDepth(e3, left3, right3, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take passed in custom explicit equality when 'should contain' is used") {
(List("A ", " B", "C ") should contain theSameElementsInOrderAs List(" a", "b ", " c")) (decided by lowerCaseEquality afterBeing trimmed)
(Array("A ", " B", "C ") should contain theSameElementsInOrderAs List(" a", "b ", " c")) (decided by lowerCaseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("A ", " B", "C ") should contain theSameElementsInOrderAs List(" a", "b ", " c")) (decided by lowerCaseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take passed in custom explicit equality when 'should not contain' is used") {
(List("one ", " two", "three ") should not contain theSameElementsInOrderAs (List(" one", "two ", " three"))) (decided by reverseEquality afterBeing trimmed)
(Array("one ", " two", "three ") should not contain theSameElementsInOrderAs (List(" one", "two ", " three"))) (decided by reverseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("one ", " two", "three ") should not contain theSameElementsInOrderAs (List(" one", "two ", " three"))) (decided by reverseEquality afterBeing trimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with custom explicit equality") {
val left1 = List("one ", " two", "three ")
val right1 = List(" one", "two ", " three")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain theSameElementsInOrderAs right1) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e1, left1, right1, thisLineNumber - 2)
val left2 = Array("one ", " two", "three ")
val right2 = List(" one", "two ", " three")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain theSameElementsInOrderAs right2) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e2, left2, right2, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("one ", " two", "three ")
val right3 = List(" one", "two ", " three")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain theSameElementsInOrderAs right3) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e3, left3, right3, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with custom explicit equality") {
val left1 = List("ONE ", " TWO", "THREE ")
val right1 = List("one", "two ", " three")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain theSameElementsInOrderAs (right1)) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e1, left1, right1, thisLineNumber - 2)
val left2 = Array("ONE ", " TWO", "THREE ")
val right2 = List("one", "two ", " three")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain theSameElementsInOrderAs (right2)) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e2, left2, right2, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left3 = javaList("ONE ", " TWO", "THREE ")
val right3 = List("one", "two ", " three")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain theSameElementsInOrderAs (right3)) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e3, left3, right3, thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
}
}
|
dotty-staging/scalatest
|
scalatest-test/src/test/scala/org/scalatest/TheSameElementsInOrderAsContainMatcherDeciderSpec.scala
|
Scala
|
apache-2.0
| 10,359 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.abstractnn._
import com.intel.analytics.bigdl.dllib.nn.VolumetricMaxPooling
import com.intel.analytics.bigdl.dllib.nn.{Sequential => TSequential}
import com.intel.analytics.bigdl.dllib.nn.internal.GlobalPooling3D
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.Net
import com.intel.analytics.bigdl.dllib.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Applies global max pooling operation for 3D data.
* Data format currently supported for this layer is 'CHANNEL_FIRST' (dimOrdering='th').
* Border mode currently supported for this layer is 'valid'.
* The input of this layer should be 5D.
*
* When you use this layer as the first layer of a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param dimOrdering Format of input data. Please use 'CHANNEL_FIRST' (dimOrdering='th').
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class GlobalMaxPooling3D[T: ClassTag](
override val dimOrdering: String = "CHANNEL_FIRST",
override val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends GlobalPooling3D[T](dimOrdering, inputShape) with Net {
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val input = inputShape.toSingle().toArray
val model = TSequential[T]()
val layer = VolumetricMaxPooling(
kT = input(2),
kW = input(4),
kH = input(3),
dT = 1,
dW = 1,
dH = 1)
model.add(layer)
model.add(com.intel.analytics.bigdl.dllib.nn.Squeeze(5))
model.add(com.intel.analytics.bigdl.dllib.nn.Squeeze(4))
model.add(com.intel.analytics.bigdl.dllib.nn.Squeeze(3))
model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
object GlobalMaxPooling3D {
def apply[@specialized(Float, Double) T: ClassTag](
dimOrdering: String = "th",
inputShape: Shape = null)(implicit ev: TensorNumeric[T]) : GlobalMaxPooling3D[T] = {
new GlobalMaxPooling3D[T](KerasUtils.toBigDLFormat5D(dimOrdering), inputShape)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/keras/layers/GlobalMaxPooling3D.scala
|
Scala
|
apache-2.0
| 3,033 |
// -----------------------------------------------------------------------------
//
// Scalax - The Scala Community Library
// Copyright (c) 2005-8 The Scalax Project. All rights reserved.
//
// The primary distribution site is http://scalax.scalaforge.org/
//
// This software is released under the terms of the Revised BSD License.
// There is NO WARRANTY. See the file LICENSE for the full text.
//
// -----------------------------------------------------------------------------
package com.codahale.jerkson.util
package scalax
package rules
import language.postfixOps
import scala.collection.immutable.ArraySeq
/**
* A workaround for the difficulties of dealing with
* a contravariant 'In' parameter type...
*/
class InRule[In, +Out, +A, +X](rule: Rule[In, Out, A, X]) {
def mapRule[Out2, B, Y](f: Result[Out, A, X] => In => Result[Out2, B, Y]): Rule[In, Out2, B, Y] = rule.factory.rule {
in: In => f(rule(in))(in)
}
/** Creates a rule that succeeds only if the original rule would fail on the given context. */
def unary_! : Rule[In, In, Unit, Nothing] = mapRule {
case Success(_, _) => in: In => Failure
case _ => in: In => Success(in, ())
}
/** Creates a rule that succeeds if the original rule succeeds, but returns the original input. */
def & : Rule[In, In, A, X] = mapRule {
case Success(_, a) => in: In => Success(in, a)
case Failure => in: In => Failure
case Error(x) => in: In => Error(x)
}
}
class SeqRule[S, +A, +X](rule: Rule[S, S, A, X]) {
import rule.factory._
def ? = rule mapRule {
case Success(out, a) => in: S => Success(out, Some(a))
case Failure => in: S => Success(in, None)
case Error(x) => in: S => Error(x)
}
/** Creates a rule that always succeeds with a Boolean value.
* Value is 'true' if this rule succeeds, 'false' otherwise */
def -? = ? map (_.isDefined)
def * = from[S] {
// tail-recursive function with reverse list accumulator
def rep(in: S, acc: List[A]): Result[S, List[A], X] = rule(in) match {
case Success(out, a) => rep(out, a :: acc)
case Failure => Success(in, acc.reverse)
case err: Error[_] => err
}
in => rep(in, Nil)
}
def + = rule ~++ *
def ~>?[B >: A, X2 >: X](f: => Rule[S, S, B => B, X2]) = for (a <- rule; fs <- f?) yield fs.foldLeft[B](a) { (b, f) => f(b) }
def ~>*[B >: A, X2 >: X](f: => Rule[S, S, B => B, X2]) = for (a <- rule; fs <- f*) yield fs.foldLeft[B](a) { (b, f) => f(b) }
def ~*~[B >: A, X2 >: X](join: => Rule[S, S, (B, B) => B, X2]) = {
this ~>* (for (f <- join; a <- rule) yield f(_: B, a))
}
/** Repeats this rule one or more times with a separator (which is discarded) */
def +/[X2 >: X](sep: => Rule[S, S, Any, X2]) = rule ~++ (sep -~ rule *)
/** Repeats this rule zero or more times with a separator (which is discarded) */
def */[X2 >: X](sep: => Rule[S, S, Any, X2]) = +/(sep) | state[S].nil
def *~-[Out, X2 >: X](end: => Rule[S, Out, Any, X2]) = (rule - end *) ~- end
def +~-[Out, X2 >: X](end: => Rule[S, Out, Any, X2]) = (rule - end +) ~- end
/** Repeats this rule num times */
def times(num: Int): Rule[S, S, Seq[A], X] = from[S] {
val result = new Array[AnyRef](num)
// more compact using HoF but written this way so it's tail-recursive
def rep(i: Int, in: S): Result[S, Seq[A], X] = {
if (i == num) Success(in, ArraySeq.unsafeWrapArray(result).asInstanceOf[ArraySeq[A]])
else rule(in) match {
case Success(out, a) => {
result(i) = a.asInstanceOf[AnyRef]
rep(i + 1, out)
}
case Failure => Failure
case err: Error[_] => err
}
}
in => rep(0, in)
}
}
|
rememberthemilk/jerkson
|
src/main/scala/com/codahale/jerkson/util/scalax/rules/SeqRule.scala
|
Scala
|
mit
| 3,683 |
package org.jetbrains.plugins.scala
package editor.enterHandler
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.Result
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegateAdapter
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.editor.actionSystem.EditorActionHandler
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.util.{Ref, TextRange}
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.scala.format.StringConcatenationParser
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.util.MultilineStringSettings
import org.jetbrains.plugins.scala.util.MultilineStringUtil._
/**
* User: Dmitry Naydanov
* Date: 2/27/12
*/
class MultilineStringEnterHandler extends EnterHandlerDelegateAdapter {
private var wasInMultilineString: Boolean = false
private var whiteSpaceAfterCaret: String = ""
override def preprocessEnter(file: PsiFile, editor: Editor, caretOffsetRef: Ref[Integer], caretAdvance: Ref[Integer],
dataContext: DataContext, originalHandler: EditorActionHandler): Result = {
val document = editor.getDocument
val text = document.getText
val caretOffset = caretOffsetRef.get.intValue
if (caretOffset == 0 || caretOffset >= text.length()) return Result.Continue
val element = file findElementAt caretOffset
if (!inMultilineString(element)) return Result.Continue
else wasInMultilineString = true
val ch1 = text.charAt(caretOffset - 1)
val ch2 = text.charAt(caretOffset)
whiteSpaceAfterCaret = text.substring(caretOffset).takeWhile(c => c == ' ' || c == '\\t')
document.deleteString(caretOffset, caretOffset + whiteSpaceAfterCaret.length)
if ((ch1 != '(' || ch2 != ')')&&(ch1 != '{' || ch2 != '}') || !CodeInsightSettings.getInstance.SMART_INDENT_ON_ENTER)
return Result.Continue
originalHandler.execute(editor, editor.getCaretModel.getCurrentCaret, dataContext)
Result.DefaultForceIndent
}
override def postProcessEnter(file: PsiFile, editor: Editor, dataContext: DataContext): Result = {
if (!file.isInstanceOf[ScalaFile]) return Result.Continue
val caretModel = editor.getCaretModel
val document = editor.getDocument
val offset = caretModel.getOffset
val caretMarker = document.createRangeMarker(offset, offset)
caretMarker.setGreedyToRight(true)
def caretOffset = caretMarker.getEndOffset
val project = file.getProject
val element = file.findElementAt(offset)
if (!wasInMultilineString) return Result.Continue
wasInMultilineString = false
val marginChar = getMarginChar(element)
val settings = new MultilineStringSettings(project)
import settings._
val literal = findParentMLString(element).getOrElse(return Result.Continue)
val literalOffset = literal.getTextRange.getStartOffset
val firstMLQuote = interpolatorPrefix(literal) + multilineQuotes
val firstMLQuoteLength = firstMLQuote.length
if (supportLevel == ScalaCodeStyleSettings.MULTILINE_STRING_NONE || offset - literalOffset < firstMLQuoteLength) return Result.Continue
def getLineByNumber(number: Int): String =
document.getText(new TextRange(document.getLineStartOffset(number), document.getLineEndOffset(number)))
def getSpaces(count: Int) = StringUtil.repeat(" ", count)
def getSmartSpaces(count: Int) = if (useTabs) {
StringUtil.repeat("\\t", count/tabSize) + StringUtil.repeat(" ", count%tabSize)
} else {
StringUtil.repeat(" ", count)
}
def getSmartLength(line: String) = if (useTabs) line.length + line.count(_ == '\\t')*(tabSize - 1) else line.length
def insertNewLine(nlOffset: Int, indent: Int, trimPreviousLine: Boolean) {
document.insertString(nlOffset, "\\n")
forceIndent(nlOffset + 1, indent, None)
if (trimPreviousLine) {
val line = getLineByNumber(document.getLineNumber(nlOffset))
var i = 0
def charToCheck = line.charAt(line.length - 1 - i)
while (i <= line.length - 1 && (charToCheck == ' ' || charToCheck == '\\t')) {
i += 1
}
document.deleteString(nlOffset - i, nlOffset)
}
}
def forceIndent(offset: Int, indent: Int, marginChar: Option[Char]) {
val lineNumber = document.getLineNumber(offset)
val lineStart = document.getLineStartOffset(lineNumber)
val line = getLineByNumber(lineNumber)
val wsPrefix = line.takeWhile(c => c == ' ' || c == '\\t')
document.replaceString(lineStart, lineStart + wsPrefix.length, getSmartSpaces(indent) + marginChar.getOrElse(""))
}
extensions inWriteAction {
val prevLineNumber = document.getLineNumber(offset) - 1
assert(prevLineNumber >= 0)
val prevLine = getLineByNumber(prevLineNumber)
val currentLine = getLineByNumber(prevLineNumber + 1)
val nextLine = if (document.getLineCount > prevLineNumber + 2) getLineByNumber(prevLineNumber + 2) else ""
def prevLinePrefixAfterDelimiter(offsetInLine: Int): Int =
if (prevLine.length > offsetInLine) prevLine.substring(offsetInLine).prefixLength(c => c == ' ' || c == '\\t') else 0
val wasSingleLine = literal.getText.indexOf("\\n") == literal.getText.lastIndexOf("\\n")
val lines = literal.getText.split("\\n")
val marginCharOpt = if (needAddByType(literal)) selectBySettings[Option[Char]](None)(Some(marginChar)) else None
if (wasSingleLine || lines.length == 3 &&
(lines(0).endsWith("(") && lines(2).trim.startsWith(")") || lines(0).endsWith("{") && lines(2).trim.startsWith("}"))) {
val trimmedStartLine = getLineByNumber(document.getLineNumber(offset) - 1).trim()
val inConcatenation = literal.getParent match {
case ScInfixExpr(lit: ScLiteral, op, `literal`) if op.refName == "+" && lit.isString => Option(lit)
case ScInfixExpr(expr, op, `literal`) if op.refName == "+" && StringConcatenationParser.isString(expr) => Option(expr)
case _ => None
}
val needInsertNLBefore = (!trimmedStartLine.startsWith(firstMLQuote) || inConcatenation.isDefined) && quotesOnNewLine
selectBySettings()(if (needAddByType(literal)) insertStripMargin(document, literal, marginChar))
val prevIndent =
if (inConcatenation.isDefined) inConcatenation.map { expr =>
val exprStart = expr.getTextRange.getStartOffset
val lineStart = document.getLineStartOffset(document.getLineNumber(exprStart))
getSmartLength(document.getText.substring(lineStart, exprStart))
}.get
else prefixLength(prevLine)
val needInsertIndentInt =
if (needInsertNLBefore && !inConcatenation.isDefined) regularIndent
else 0
if (needInsertNLBefore) {
insertNewLine(literalOffset, prevIndent + needInsertIndentInt, trimPreviousLine = true)
}
val indentSize = prevIndent + needInsertIndentInt + interpolatorPrefixLength(literal) + marginIndent
if (literal.getText.substring(offset - literalOffset) == multilineQuotes) {
forceIndent(caretOffset, indentSize, marginCharOpt)
caretMarker.setGreedyToRight(false)
insertNewLine(caretOffset, indentSize - marginIndent, trimPreviousLine = false)
caretMarker.setGreedyToRight(true)
} else {
forceIndent(caretOffset, indentSize, marginCharOpt)
}
if (!wasSingleLine) {
val currentPrefix = getPrefix(getLineByNumber(document.getLineNumber(caretOffset)))
forceIndent(caretOffset + 1, getSmartLength(currentPrefix), marginCharOpt)
}
} else {
val isCurrentLineEmpty = currentLine.trim.length == 0
val currentLineOffset = document.getLineStartOffset(prevLineNumber + 1)
val isPrevLineFirst = prevLine startsWith firstMLQuote
val isPrevLineTrimmedFirst = prevLine.trim startsWith firstMLQuote
val prevLineStartOffset = document getLineStartOffset prevLineNumber
val wsPrefix =
if (isPrevLineFirst) prevLinePrefixAfterDelimiter(firstMLQuoteLength) + firstMLQuoteLength
else prevLine.prefixLength(c => c == ' ' || c == '\\t')
val prefixStriped = prevLine.substring(wsPrefix)
if (supportLevel == ScalaCodeStyleSettings.MULTILINE_STRING_QUOTES_AND_INDENT ||
!prefixStriped.startsWith(Seq(marginChar)) && !prefixStriped.startsWith(firstMLQuote) ||
!lines.map(_.trim).exists(_.startsWith(Seq(marginChar)))) {
if (prevLineStartOffset < literalOffset) {
val beforeQuotes = prevLinePrefixAfterDelimiter(0)
val elementStart = prevLine.indexOf(firstMLQuote) + firstMLQuoteLength
val prevLineWsPrefixAfterQuotes = prevLinePrefixAfterDelimiter(elementStart)
val spacesToInsert =
if (isPrevLineTrimmedFirst) beforeQuotes + firstMLQuoteLength + prevLineWsPrefixAfterQuotes
else (if (isCurrentLineEmpty) elementStart else elementStart - wsPrefix) + prevLineWsPrefixAfterQuotes
forceIndent(currentLineOffset, getSmartLength(getSmartSpaces(spacesToInsert)), None)
}
else if (isCurrentLineEmpty && prevLine.length > 0)
forceIndent(caretOffset, wsPrefix, None)
else if (prevLine.trim.length == 0)
forceIndent(caretOffset, prevLine.length, None)
else if (isPrevLineTrimmedFirst) {
val wsAfterQuotes = prevLinePrefixAfterDelimiter(wsPrefix + firstMLQuoteLength) + firstMLQuoteLength
forceIndent(caretOffset, wsAfterQuotes, None)
}
} else {
val wsAfterMargin =
if (isPrevLineFirst) firstMLQuoteLength else prevLinePrefixAfterDelimiter(wsPrefix + 1)
if (!currentLine.trim.startsWith(Seq(marginChar))) {
val inBraces = prevLine.endsWith("{") && nextLine.trim.startsWith("}") || prevLine.endsWith("(") && nextLine.trim.startsWith(")")
val prefix =
if (inBraces) getPrefix(nextLine)
else if (prevLine.trim.startsWith(Seq(marginChar))) getPrefix(prevLine)
else if (nextLine.trim.startsWith(Seq(marginChar))) getPrefix(nextLine)
else getPrefix(currentLine)
forceIndent(caretOffset, getSmartLength(prefix), marginCharOpt)
document.insertString(caretOffset, getSpaces(wsAfterMargin))
if (inBraces) {
val nextLineOffset = document.getLineStartOffset(prevLineNumber + 2)
forceIndent(nextLineOffset, 0, None)
document.insertString(nextLineOffset, marginChar + getSpaces(wsAfterMargin))
forceIndent(nextLineOffset, getSmartLength(prefix), None)
}
}
}
}
document.insertString(caretOffset, whiteSpaceAfterCaret)
caretModel.moveToOffset(caretOffset)
caretMarker.dispose()
}
Result.Stop
}
}
|
SergeevPavel/intellij-scala
|
src/org/jetbrains/plugins/scala/editor/enterHandler/MultilineStringEnterHandler.scala
|
Scala
|
apache-2.0
| 11,342 |
package cassandra.fixtures
case class Pair(inner:(String, Int, Boolean))
|
fabianmurariu/cassandra-scala-nuveau-driver
|
cql/app/src/test/scala/cassandra/fixtures/Pair.scala
|
Scala
|
apache-2.0
| 74 |
package cromwell.backend.impl.jes.io
import cats.data.Validated._
import cats.syntax.apply._
import cats.syntax.validated._
import com.google.api.services.genomics.model.Disk
import cromwell.core.path.{DefaultPathBuilder, Path}
import common.exception.MessageAggregation
import common.validation.ErrorOr._
import wom.values._
import scala.util.Try
import scala.util.matching.Regex
object JesAttachedDisk {
val Identifier = "[a-zA-Z0-9-_]+"
val Directory = """/[^\\s]+"""
val Integer = "[1-9][0-9]*"
val WorkingDiskPattern: Regex = s"""${JesWorkingDisk.Name}\\\\s+($Integer)\\\\s+($Identifier)""".r
val MountedDiskPattern: Regex = s"""($Directory)\\\\s+($Integer)\\\\s+($Identifier)""".r
def parse(s: String): Try[JesAttachedDisk] = {
def sizeGbValidation(sizeGbString: String): ErrorOr[Int] = validateLong(sizeGbString).map(_.toInt)
def diskTypeValidation(diskTypeString: String): ErrorOr[DiskType] = validateDiskType(diskTypeString)
val validation: ErrorOr[JesAttachedDisk] = s match {
case WorkingDiskPattern(sizeGb, diskType) => (validateDiskType(diskType), sizeGbValidation(sizeGb)) mapN { JesWorkingDisk.apply }
case MountedDiskPattern(mountPoint, sizeGb, diskType) => (sizeGbValidation(sizeGb), diskTypeValidation(diskType)) mapN { (s, dt) => JesEmptyMountedDisk(dt, s, DefaultPathBuilder.get(mountPoint)) }
case _ => s"Disk strings should be of the format 'local-disk SIZE TYPE' or '/mount/point SIZE TYPE' but got: '$s'".invalidNel
}
Try(validation match {
case Valid(localDisk) => localDisk
case Invalid(nels) =>
throw new UnsupportedOperationException with MessageAggregation {
val exceptionContext = ""
val errorMessages: List[String] = nels.toList
}
})
}
private def validateDiskType(diskTypeName: String): ErrorOr[DiskType] = {
DiskType.values().find(_.diskTypeName == diskTypeName) match {
case Some(diskType) => diskType.validNel
case None =>
val diskTypeNames = DiskType.values.map(_.diskTypeName).mkString(", ")
s"Disk TYPE $diskTypeName should be one of $diskTypeNames".invalidNel
}
}
private def validateLong(value: String): ErrorOr[Long] = {
try {
value.toLong.validNel
} catch {
case _: IllegalArgumentException => s"$value not convertible to a Long".invalidNel
}
}
}
trait JesAttachedDisk {
def name: String
def diskType: DiskType
def sizeGb: Int
def mountPoint: Path
def toGoogleDisk: Disk = {
new Disk().setName(name)
.setType(diskType.googleTypeName)
.setAutoDelete(true)
.setSizeGb(sizeGb)
.setMountPoint(mountPoint.toAbsolutePath.pathAsString)
}
}
case class JesEmptyMountedDisk(diskType: DiskType, sizeGb: Int, mountPoint: Path) extends JesAttachedDisk {
val name = s"d-${mountPoint.pathAsString.md5Sum}"
override def toString: String = s"$mountPoint $sizeGb ${diskType.diskTypeName}"
}
object JesWorkingDisk {
val MountPoint: Path = DefaultPathBuilder.get("/cromwell_root")
val Name = "local-disk"
val Default = JesWorkingDisk(DiskType.SSD, 10)
}
case class JesWorkingDisk(diskType: DiskType, sizeGb: Int) extends JesAttachedDisk {
val mountPoint = JesWorkingDisk.MountPoint
val name = JesWorkingDisk.Name
override def toString: String = s"$name $sizeGb ${diskType.diskTypeName}"
}
|
ohsu-comp-bio/cromwell
|
supportedBackends/jes/src/main/scala/cromwell/backend/impl/jes/io/JesAttachedDisk.scala
|
Scala
|
bsd-3-clause
| 3,344 |
package de.ummels.prioritymap
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.{PropSpecLike, prop, Matchers}
trait PropertySpec extends PropSpecLike with prop.PropertyChecks with Matchers with prop.Configuration {
override implicit val generatorDrivenConfig: PropertyCheckConfiguration =
PropertyCheckConfiguration(minSuccessful = 100)
private val ord1 = Ordering.Tuple2(Ordering.Int, Ordering.Int)
private val ord2 = Ordering.by[(Int, Int), Int](x => x._1)
type Key = Int
type Value = (Int, Int)
def genOrd: Gen[Ordering[Value]] = Gen.oneOf(ord1, ord2)
def genKey: Gen[Key] = Gen.choose(-10, 10)
def genValue: Gen[Value] = Gen.zip(genKey, genKey)
def genKeyValue: Gen[(Key, Value)] = Gen.zip(genKey, genValue)
def genPriorityMap: Gen[PriorityMap[Key, Value]] = for {
ord <- genOrd
kvs <- Gen.listOf(genKeyValue)
m = PriorityMap.empty(ord) ++ kvs
default <- Arbitrary.arbitrary[Option[Value]]
pred <- Arbitrary.arbitrary[Option[Key => Boolean]]
} yield (default, pred) match {
case (None, None) => m
case (Some(d), None) => m withDefaultValue d
case (None, Some(p)) => m filterKeys p
case (Some(d), Some(p)) => (m withDefaultValue d) filterKeys p
}
}
|
ummels/scala-prioritymap
|
tests/shared/src/test/scala/de/ummels/prioritymap/PropertySpec.scala
|
Scala
|
isc
| 1,246 |
package com.krux.hyperion.contrib.activity.file
import java.io.File
import java.nio.file.{ AtomicMoveNotSupportedException, Files, Paths, StandardCopyOption }
case class FileRepartitioner(options: Options) {
def repartition(): Boolean = moveFiles(nameFiles(split(merge())))
private def merge(): File = options.inputs match {
case Seq(one) => one
case files =>
val destination: File = File.createTempFile("merge-", if (options.compressed) ".gz" else ".tmp", options.temporaryDirectory.get)
destination.deleteOnExit()
// If we are simply merging files then the merge step needs to add the header.
val headers = options.numberOfFiles match {
case Some(1) => options.header
case _ => None
}
FileMerger(destination, options.skipFirstLine, headers).merge(options.inputs: _*)
}
private def split(file: File): Seq[File] = options.numberOfFiles match {
case Some(1) => Seq(file)
case None =>
new FileSplitter(
header = options.header,
numberOfLinesPerFile = options.numberOfLinesPerFile.getOrElse(Long.MaxValue),
numberOfBytesPerFile = options.numberOfBytesPerFile.getOrElse(Long.MaxValue),
bufferSize = options.bufferSize,
compressed = options.compressed,
temporaryDirectory = options.temporaryDirectory.get
).split(file)
case Some(n) =>
new FileSplitter(
header = options.header,
numberOfLinesPerFile = Long.MaxValue,
numberOfBytesPerFile = file.length() / n,
bufferSize = options.bufferSize,
compressed = options.compressed,
temporaryDirectory = options.temporaryDirectory.get
).split(file)
}
private def nameFiles(files: Seq[File]): Map[File, String] = files match {
case Seq(f) =>
Map(f -> options.output)
case mergedFiles =>
val fmt = s"%0${options.suffixLength}d"
mergedFiles.zipWithIndex.flatMap { case (f, i) =>
options.output.split('.').toList match {
case h :: Nil => Option(f -> s"$h-${fmt.format(i)}")
case h :: t => Option(f -> s"$h-${fmt.format(i)}.${t.mkString(".")}")
case Nil => None
}
}.toMap
}
private def moveFiles(files: Map[File, String]): Boolean = options.outputDirectory.forall { dir =>
files.foreach { case (f, output) =>
val source = Paths.get(f.getAbsolutePath)
val dest = Paths.get(dir.getAbsolutePath, output)
if (options.outputDirectory.size == 1) {
try {
// First try to atomically move
Files.move(source, dest, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE)
} catch {
case e: AtomicMoveNotSupportedException =>
// Try to non-atomically move
Files.move(source, dest, StandardCopyOption.REPLACE_EXISTING)
}
} else if (options.link) {
Files.createSymbolicLink(dest, source)
} else {
Files.copy(source, dest, StandardCopyOption.REPLACE_EXISTING)
}
}
// Mark a successful job
if (options.markSuccessfulJobs) {
Paths.get(dir.getPath, "_SUCCESS").toFile.createNewFile()
}
true
} match {
case true if !options.link && options.outputDirectory.size > 1 => files.keys.forall(f => f.delete())
case x => x
}
}
|
hoangelos/hyperion
|
contrib/activity/file/src/main/scala/com/krux/hyperion/contrib/activity/file/FileRepartitioner.scala
|
Scala
|
apache-2.0
| 3,319 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2018
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy.protobuf.component
import akka.NotUsed
import akka.stream.scaladsl.{Flow, Framing}
import akka.util.ByteString
import io.techcode.streamy.component.TestTransformer
import io.techcode.streamy.event.StreamEvent
import io.techcode.streamy.protobuf.Data
import io.techcode.streamy.protobuf.Data.{Pkt, Pkts}
import io.techcode.streamy.util.json._
import scala.language.postfixOps
/**
* Protobuf transformer spec.
*/
class ProtobufTransformerSpec extends TestTransformer {
"Protobuf transformer" should {
"parser data correctly" in {
except(
ProtobufTransformerSpec.Parser.Transformer.Simple,
ProtobufTransformerSpec.Parser.Input.Simple,
ProtobufTransformerSpec.Parser.Output.Simple
)
}
"print data correctly" in {
except(
ProtobufTransformerSpec.Printer.Transformer.Simple,
ProtobufTransformerSpec.Printer.Input.Simple,
ProtobufTransformerSpec.Printer.Output.Simple
)
}
}
}
object ProtobufTransformerSpec {
object Parser {
object Input {
val Simple: ByteString = ByteString(Pkts.newBuilder().addPkt(Pkt.newBuilder().putAttrs("test", "test")).build().toByteArray)
}
object Transformer {
val Simple: Flow[ByteString, StreamEvent, NotUsed] =
Framing.simpleFramingProtocolEncoder(Int.MaxValue - 4)
.via(ProtobufTransformer.parser(ProtobufTransformer.Parser.Config(
proto = Data.Pkts.getDefaultInstance,
decoder = (pkts: Pkts) => StreamEvent(Json.obj("test" -> pkts.getPkt(0).getAttrsMap.get("test")))
)))
}
object Output {
val Simple: StreamEvent = StreamEvent(Json.obj("test" -> "test"))
}
}
object Printer {
object Input {
val Simple: StreamEvent = StreamEvent(Json.obj("test" -> "test"))
}
object Transformer {
val Simple: Flow[StreamEvent, ByteString, NotUsed] =
ProtobufTransformer.printer(ProtobufTransformer.Printer.Config(
proto = Data.Pkts.getDefaultInstance,
encoder = (evt: StreamEvent) => Pkts.newBuilder().addPkt(Pkt.newBuilder().putAttrs("test", evt.payload.evaluate(Root / "test").get[String])).build()
)).via(Framing.simpleFramingProtocolDecoder(Int.MaxValue - 4))
}
object Output {
val Simple: ByteString = ByteString(Pkts.newBuilder().addPkt(Pkt.newBuilder().putAttrs("test", "test")).build().toByteArray)
}
}
}
|
amannocci/streamy
|
plugin-protobuf/src/test/scala/io/techcode/streamy/protobuf/component/ProtobufTransformerSpec.scala
|
Scala
|
mit
| 3,602 |
package im.actor.acl
import java.nio.ByteBuffer
import java.security.MessageDigest
import akka.actor.ActorSystem
import im.actor.concurrent.{ ThreadLocalMD5, ThreadLocalSHA256 }
import im.actor.util.ThreadLocalSecureRandom
trait ACLBase {
private def getMDInstance = ThreadLocalMD5.current()
private def getSHA256Instance = ThreadLocalSHA256.current()
def secretKey()(implicit s: ActorSystem): String =
s.settings.config.getString("secret")
// deprecated: use hash with sha256 digest instead
def hashObsolete(s: String): Long = hash(s, getMDInstance)
def hash(s: String, md: MessageDigest = getSHA256Instance): Long =
ByteBuffer.wrap(md.digest(s.getBytes)).getLong
def randomLong(): Long = randomLong(ThreadLocalSecureRandom.current())
def randomLong(rng: ThreadLocalSecureRandom): Long = rng.nextLong()
def randomString(): String = randomString(ThreadLocalSecureRandom.current())
def randomString(rng: ThreadLocalSecureRandom): String = rng.nextLong().toString
def nextAccessSalt(rng: ThreadLocalSecureRandom): String = randomString(rng)
def nextAccessSalt(): String = {
nextAccessSalt(ThreadLocalSecureRandom.current())
}
}
|
EaglesoftZJ/actor-platform
|
actor-server/actor-runtime/src/main/scala/im/actor/acl/ACLBase.scala
|
Scala
|
agpl-3.0
| 1,179 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014-2015 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company Β«MEZHGALAKTICHESKIJ TORGOVYJ ALIANSΒ»,
* Limited Liability Company Β«MEZHGALAKTICHESKIJ TORGOVYJ ALIANSΒ» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.core.keyring
import java.io.{ ByteArrayInputStream, ByteArrayOutputStream }
import java.util.{ ArrayList, Date }
import org.bouncycastle.bcpg.{ HashAlgorithmTags, PublicKeyAlgorithmTags, SymmetricKeyAlgorithmTags }
import org.bouncycastle.bcpg.sig.{ Features, KeyFlags }
import org.bouncycastle.openpgp.{ PGPKeyRingGenerator, PGPPrivateKey, PGPPublicKeyRingCollection, PGPSecretKey, PGPSecretKeyRingCollection, PGPSignature, PGPSignatureSubpacketGenerator }
import org.bouncycastle.openpgp.operator.bc.{ BcKeyFingerprintCalculator, BcPBESecretKeyDecryptorBuilder, BcPBESecretKeyEncryptorBuilder, BcPGPContentSignerBuilder, BcPGPDigestCalculatorProvider, BcPGPKeyPair }
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.keyring.generator.Generator
import org.digimead.tabuddy.desktop.core.keyring.generator.api.XGenerator
import org.eclipse.core.runtime.NullProgressMonitor
/**
* General part for keyring implementation.
*/
trait KeyRingGeneral {
this: KeyRing.Implementation with XLoggable β
/** PublicKeyRingCollection with the latest content. */
protected var actualPublicKeyRingCollection = new PGPPublicKeyRingCollection(new ArrayList())
/** SecretKeyRingCollection with the latest content. */
protected var actualSecretKeyRingCollection = new PGPSecretKeyRingCollection(new ArrayList())
/** PublicKeyRingCollection access lock. */
protected val publicKeyRingCollectionLock = new Object
/** SecretKeyRingCollection access lock. */
protected val secretKeyRingCollectionLock = new Object
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String): PGPKeyRingGenerator =
createPGPKeyRingGenerator(Generator.default, userID, KeyRing.defaultPassPhrase, 0xc0, new Date)
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String, generator: XGenerator.AsymmetricCipherKeyPairGenerator): PGPKeyRingGenerator =
createPGPKeyRingGenerator(generator, userID, KeyRing.defaultPassPhrase, 0xc0, new Date)
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String, passPrase: String): PGPKeyRingGenerator =
createPGPKeyRingGenerator(Generator.default, userID, passPrase, 0xc0, new Date)
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String, passPrase: String, generator: XGenerator.AsymmetricCipherKeyPairGenerator): PGPKeyRingGenerator =
createPGPKeyRingGenerator(generator, userID, passPrase, 0xc0, new Date)
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String, passPrase: String, now: Date): PGPKeyRingGenerator =
createPGPKeyRingGenerator(Generator.default, userID, passPrase, 0xc0, now)
/** Create new PGP keyring generator. */
def createPGPKeyRingGenerator(userID: String, passPrase: String, now: Date, generator: XGenerator.AsymmetricCipherKeyPairGenerator): PGPKeyRingGenerator =
createPGPKeyRingGenerator(generator, userID, passPrase, 0xc0, now)
/** Create new PGP keyring generator. */
// Note: s2kcount is a number between 0 and 0xff that controls the
// number of times to iterate the password hash before use. More
// iterations are useful against offline attacks, as it takes more
// time to check each password. The actual number of iterations is
// rather complex, and also depends on the hash function in use.
// Refer to Section 3.7.1.3 in rfc4880.txt. Bigger numbers give
// you more iterations. As a rough rule of thumb, when using
// SHA256 as the hashing function, 0x10 gives you about 64
// iterations, 0x20 about 128, 0x30 about 256 and so on till 0xf0,
// or about 1 million iterations. The maximum you can go to is
// 0xff, or about 2 million iterations. We may use 0xc0 as a
// default -- about 130,000 iterations.
def createPGPKeyRingGenerator(generator: XGenerator.AsymmetricCipherKeyPairGenerator, userID: String,
passPrase: String = KeyRing.defaultPassPhrase, s2kcount: Int = 0xc0, now: Date = new Date): PGPKeyRingGenerator = {
// First create the master (signing) key with the generator.
val masterKeyForSign = new BcPGPKeyPair(generator.signAlgorithm, generator.ackpg.generateKeyPair(), now)
// Add a self-signature on the id.
val signSSGen = new PGPSignatureSubpacketGenerator
// Add signed metadata on the signature.
// 1. Declare its purpose.
signSSGen.setKeyFlags(true, KeyFlags.CERTIFY_OTHER | KeyFlags.SIGN_DATA)
// 2. Set preferences for secondary crypto algorithms to use
// when sending messages to this key.
signSSGen.setPreferredSymmetricAlgorithms(false, Array[Int](
SymmetricKeyAlgorithmTags.AES_256,
SymmetricKeyAlgorithmTags.AES_192,
SymmetricKeyAlgorithmTags.AES_128))
signSSGen.setPreferredHashAlgorithms(false, Array[Int](
HashAlgorithmTags.SHA256,
HashAlgorithmTags.SHA1,
HashAlgorithmTags.SHA384,
HashAlgorithmTags.SHA512,
HashAlgorithmTags.SHA224))
// 3. Request senders add additional checksums to the
// message (useful when verifying unsigned messages.)
signSSGen.setFeature(false, Features.FEATURE_MODIFICATION_DETECTION)
// Objects used to encrypt the secret key.
val sha1Calc = new BcPGPDigestCalculatorProvider().get(HashAlgorithmTags.SHA1)
val sha256Calc = new BcPGPDigestCalculatorProvider().get(HashAlgorithmTags.SHA256)
val keyEncryptor = new BcPBESecretKeyEncryptorBuilder(SymmetricKeyAlgorithmTags.AES_256, sha256Calc, s2kcount).
setSecureRandom(KeyRing.random).build(passPrase.toCharArray())
val keySignerBuilder = new BcPGPContentSignerBuilder(masterKeyForSign.getPublicKey().getAlgorithm(), HashAlgorithmTags.SHA1)
// Finally, create the keyring itself. The constructor
// takes parameters that allow it to generate the self
// signature.
new PGPKeyRingGenerator(PGPSignature.POSITIVE_CERTIFICATION, masterKeyForSign, userID,
sha1Calc, signSSGen.generate, null, keySignerBuilder, keyEncryptor)
}
/** Create new PGP encryption subkey. */
def createPGPEncriptionSubKey(generator: XGenerator.AsymmetricCipherKeyPairGenerator = Generator.default,
now: Date = new Date): (BcPGPKeyPair, PGPSignatureSubpacketGenerator) = {
// Then an encryption subkey.
val subKeyForEnc = new BcPGPKeyPair(generator.encAlgorithm, generator.ackpg.generateKeyPair(), now)
// Create a signature on the encryption subkey.
val encSSGen = new PGPSignatureSubpacketGenerator()
// Add metadata to declare its purpose.
encSSGen.setKeyFlags(false, KeyFlags.ENCRYPT_COMMS | KeyFlags.ENCRYPT_STORAGE)
(subKeyForEnc, encSSGen)
}
/** Get private key from secret key. */
def getPGPPrivateKey(secretKey: PGPSecretKey, passPhrase: String = KeyRing.defaultPassPhrase): PGPPrivateKey =
secretKey.extractPrivateKey(new BcPBESecretKeyDecryptorBuilder(new BcPGPDigestCalculatorProvider()).
build(passPhrase.toCharArray()))
/** Get public key algorithm name. */
def getPublicKeyAlgorithmName(key: Int) = key match {
case PublicKeyAlgorithmTags.DIFFIE_HELLMAN β "Diffie Hellman"
case PublicKeyAlgorithmTags.DSA β "DSA"
case PublicKeyAlgorithmTags.EC β "EC"
case PublicKeyAlgorithmTags.ECDSA β "ECDSA"
case PublicKeyAlgorithmTags.ELGAMAL_ENCRYPT β "Elgamal (Encrypt)"
case PublicKeyAlgorithmTags.ELGAMAL_GENERAL β "Elgamal (Encrypt & Sign)"
case PublicKeyAlgorithmTags.RSA_ENCRYPT β "RSA (Encrypt)"
case PublicKeyAlgorithmTags.RSA_GENERAL β "RSA (Encrypt & Sign)"
case PublicKeyAlgorithmTags.RSA_SIGN β "RSA (Sign)"
case _ β "Unknown"
}
/** Load collection of public keyrings. */
def loadPublicKeyRingCollection(): PGPPublicKeyRingCollection =
publicKeyRingCollectionLock.synchronized {
log.info(s"Open public keyring collection.")
if (!KeyRing.container.isOpen())
throw new IllegalStateException("Workspace is not available.")
val publicKeyRingResource = KeyRing.container.getFile(KeyRing.publicKeyRingName) // throws IllegalStateException: Workspace is closed.
val result = if (publicKeyRingResource.exists()) {
new PGPPublicKeyRingCollection(publicKeyRingResource.getContents(), new BcKeyFingerprintCalculator())
} else {
new PGPPublicKeyRingCollection(new ArrayList())
}
actualPublicKeyRingCollection = result
result
}
/** Load collection of secret keyrings. */
def loadSecretKeyRingCollection(): PGPSecretKeyRingCollection =
secretKeyRingCollectionLock.synchronized {
log.info(s"Open public keyring collection.")
if (!KeyRing.container.isOpen())
throw new IllegalStateException("Workspace is not available.")
val privateKeyRingResource = KeyRing.container.getFile(KeyRing.secretKeyRingName) // throws IllegalStateException: Workspace is closed.
val result = if (privateKeyRingResource.exists()) {
new PGPSecretKeyRingCollection(privateKeyRingResource.getContents(), new BcKeyFingerprintCalculator())
} else {
new PGPSecretKeyRingCollection(new ArrayList())
}
actualSecretKeyRingCollection = result
result
}
/** Get collection of public keyrings. */
def publicKeyRingCollection: PGPPublicKeyRingCollection =
publicKeyRingCollectionLock.synchronized { actualPublicKeyRingCollection }
/** Save collection of public keyrings. */
def savePublicKeyRingCollection(collection: PGPPublicKeyRingCollection) =
publicKeyRingCollectionLock.synchronized {
log.info(s"Save public keyring collection.")
if (!KeyRing.container.isOpen())
throw new IllegalStateException("Workspace is not available.")
val publicKeyRingResource = KeyRing.container.getFile(KeyRing.publicKeyRingName) // throws IllegalStateException: Workspace is closed.
val bOut = new ByteArrayOutputStream()
collection.encode(bOut)
bOut.close()
val bIn = new ByteArrayInputStream(bOut.toByteArray())
if (publicKeyRingResource.exists())
publicKeyRingResource.setContents(bIn, true, false, new NullProgressMonitor)
else
publicKeyRingResource.create(bIn, true, new NullProgressMonitor)
actualPublicKeyRingCollection = collection
}
/** Save collection of secret keyrings. */
def saveSecretKeyRingCollection(collection: PGPSecretKeyRingCollection) =
secretKeyRingCollectionLock.synchronized {
log.info(s"Save secret keyring collection.")
if (!KeyRing.container.isOpen())
throw new IllegalStateException("Workspace is not available.")
val secretKeyRingResource = KeyRing.container.getFile(KeyRing.secretKeyRingName) // throws IllegalStateException: Workspace is closed.
val bOut = new ByteArrayOutputStream()
collection.encode(bOut)
bOut.close()
val bIn = new ByteArrayInputStream(bOut.toByteArray())
if (secretKeyRingResource.exists())
secretKeyRingResource.setContents(bIn, true, false, new NullProgressMonitor)
else
secretKeyRingResource.create(bIn, true, new NullProgressMonitor)
actualSecretKeyRingCollection = collection
}
/** Get collection of secret keyrings. */
def secretKeyRingCollection: PGPSecretKeyRingCollection =
secretKeyRingCollectionLock.synchronized { actualSecretKeyRingCollection }
}
|
digimead/digi-TABuddy-desktop
|
part-core-keyring/src/main/scala/org/digimead/tabuddy/desktop/core/keyring/KeyRingGeneral.scala
|
Scala
|
agpl-3.0
| 13,602 |
package mesosphere.marathon
package core.storage.store.impl.zk
import java.util.concurrent.TimeUnit
import akka.Done
import akka.actor.{ ActorSystem, Scheduler }
import akka.stream.{ ActorMaterializer, Materializer }
import scala.concurrent.ExecutionContext.Implicits.global
import mesosphere.marathon.core.base.LifecycleState
import mesosphere.marathon.storage.{ CuratorZk, StorageConf }
import mesosphere.marathon.storage.repository.StoredGroup
import mesosphere.marathon.storage.store.ZkStoreSerialization
import mesosphere.marathon.upgrade.DependencyGraphBenchmark
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import scala.async.Async._
import scala.concurrent.{ Await, Future, Promise }
import scala.concurrent.duration._
object ZkPresistenceStoreBenchmark {
implicit lazy val system: ActorSystem = ActorSystem()
implicit lazy val scheduler: Scheduler = system.scheduler
implicit lazy val mat: Materializer = ActorMaterializer()
object Conf extends StorageConf with NetworkConf {
override def availableFeatures: Set[String] = Set.empty
}
Conf.verify()
val lifecycleState = LifecycleState.WatchingJVM
val curator = CuratorZk(Conf, lifecycleState)
val zkStore = curator.leafStore
val rootGroup = DependencyGraphBenchmark.rootGroup
val storedGroup = StoredGroup.apply(rootGroup)
}
@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@BenchmarkMode(Array(Mode.Throughput, Mode.AverageTime))
@Fork(1)
class ZkPresistenceStoreBenchmark {
import ZkPresistenceStoreBenchmark._
import ZkStoreSerialization._
@Benchmark
def storeAndRemoveGroup(hole: Blackhole): Unit = {
val done = Promise[Done]
val pipeline: Future[Done] = async {
await(zkStore.store(storedGroup.id, storedGroup))
val delete = Future.sequence(rootGroup.groupsById.keys.map { id => zkStore.deleteAll(id)(appDefResolver) })
await(delete)
Done
}
done.completeWith(pipeline)
// Poll until we are done
while (!done.isCompleted) { Thread.sleep(100) }
}
@TearDown(Level.Trial)
def shutdown(): Unit = {
println("Shutting down...")
curator.client.close()
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
}
|
guenter/marathon
|
benchmark/src/main/scala/mesosphere/marathon/core/storage/store/impl/zk/ZkPresistenceStoreBenchmark.scala
|
Scala
|
apache-2.0
| 2,239 |
package com.sksamuel.elastic4s.searches.queries
import com.sksamuel.exts.OptionImplicits._
case class RangeQueryDefinition(field: String,
boost: Option[Double] = None,
timeZone: Option[String] = None,
includeUpper: Option[Boolean] = None,
includeLower: Option[Boolean] = None,
lte: Option[Any] = None,
gte: Option[Any] = None,
gt: Option[Any] = None,
lt: Option[Any] = None,
format: Option[String] = None,
queryName: Option[String] = None)
extends MultiTermQueryDefinition {
def boost(boost: Double): RangeQueryDefinition = copy(boost = boost.some)
def gt(f: Long): RangeQueryDefinition = copy(gt = f.some)
def lt(to: Long): RangeQueryDefinition = copy(lt = to.some)
def gt(f: Double): RangeQueryDefinition = copy(gt = f.some)
def lt(to: Double): RangeQueryDefinition = copy(lt = to.some)
def gt(f: String): RangeQueryDefinition = copy(gt = f.some)
def lt(to: String): RangeQueryDefinition = copy(lt = to.some)
def gte(gte: String): RangeQueryDefinition = copy(gte = gte.some)
def lte(lte: String): RangeQueryDefinition = copy(lte = lte.some)
def gte(gte: Double): RangeQueryDefinition = copy(gte = gte.some)
def lte(lte: Double): RangeQueryDefinition = copy(lte = lte.some)
def gte(gte: Long): RangeQueryDefinition = copy(gte = gte.some)
def lte(lte: Long): RangeQueryDefinition = copy(lte = lte.some)
def format(fmt: String): RangeQueryDefinition = copy(format = fmt.some)
@deprecated("use lte or lt", "5.3.1")
def includeUpper(includeUpper: Boolean): RangeQueryDefinition = copy(includeUpper = includeUpper.some)
@deprecated("use gte or gt", "5.3.1")
def includeLower(includeLower: Boolean): RangeQueryDefinition = copy(includeLower = includeLower.some)
@deprecated("use gte or gt", "5.3.1")
def from(v: Any): RangeQueryDefinition = gt(v.toString)
@deprecated("use lte or lt", "5.3.1")
def to(v: Any): RangeQueryDefinition = lt(v.toString)
def timeZone(timeZone: String): RangeQueryDefinition = copy(timeZone = timeZone.some)
def queryName(queryName: String): RangeQueryDefinition = copy(queryName = queryName.some)
}
case class RawQueryDefinition(json: String) extends QueryDefinition
|
aroundus-inc/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/RangeQueryDefinition.scala
|
Scala
|
apache-2.0
| 2,475 |
package controllers
import play.api.Play.current
import play.api.libs.iteratee.Concurrent.Channel
import play.api.libs.json.Json
import play.api.libs.ws.WS
trait ETCDWatcher {
def start(uri : String, index : Int)
def addListener(channel : Channel[String]) : Boolean
def removeListener(channel : Channel[String]) : Boolean
}
object ETCDWatcher extends ETCDWatcher {
implicit val context = play.api.libs.concurrent.Execution.Implicits.defaultContext
var feedConnections : List[Channel[String]] = Nil
/**
* Add a listener to this server
* @param channel websocket channel
* @return whether adding was successful
*/
def addListener(channel : Channel[String]) : Boolean = {
feedConnections::=channel
true
}
/**
* Removes a listener to this server
* @param channel websocket channel
* @return whether removal was successful
*/
def removeListener(channel : Channel[String]) : Boolean = {
feedConnections = feedConnections.filterNot( _ == channel)
true
}
/**
* Starts infinite polling loop
* @param uri to listen at
*/
def start(uri : String, index : Int): Unit = {
// TODO: add index to uri
var newindex = index + 1
WS.url(uri).get().map(response => {
// TODO: check if index change of index is necessary in response headers
val obj = response.json
(obj \\ "action").get.toString() match {
case "\\"set\\"" =>
val node = obj \\ "node"
val prevnode = obj \\ "prevNode"
val curval = (prevnode \\ "value").getOrElse(Json.parse("-1")).toString().stripPrefix("\\"").stripSuffix("\\"")
val nextval = (node \\ "value").get.toString().stripPrefix("\\"").stripSuffix("\\"")
if (!curval.equals(nextval)) {
// Send update
val polestring = (node \\ "key").get.toString().stripPrefix("\\"/poles/").stripSuffix("\\"")
val id = polestring.substring(polestring.indexOf("/") + 1)
feedConnections.foreach(_.push("{\\"id\\":\\"" + id + "\\",\\"status\\":" + nextval + "}"))
}
case "\\"expire\\"" =>
// Send expiration to each connected client
val node = obj \\ "node"
val polestring = (node \\ "key").get.toString().stripPrefix("\\"/poles/").stripSuffix("\\"")
val id = polestring.substring(polestring.indexOf("/") + 1)
feedConnections.foreach(_.push("{\\"id\\":\\"" + id + "\\",\\"status\\":0}"))
case _ =>
println("Unknown action")
}
// Start listening again
start(uri, newindex)
})
}
}
|
KlaasYK/wacc2015
|
front-end/app/controllers/ETCDWatcher.scala
|
Scala
|
mit
| 2,566 |
package quizleague.web.model
import quizleague.web.util.rx.RefObservable
import scalajs.js
class CompetitionStatistics(
val id: String,
val competitionName: String,
val results: js.Array[ResultEntry] = js.Array()
)extends Model
class ResultEntry(
var seasonText: String,
val season: RefObservable[Season],
var competition:RefObservable[Competition],
var teamText: String,
val team: RefObservable[Team],
val position: Int = 1
) extends js.Object
|
gumdrop/quizleague-maintain
|
js/src/main/scala/quizleague/web/model/CompetitionStatistics.scala
|
Scala
|
mit
| 725 |
package it.agilelab.bigdata.spark.search.serialization
import com.esotericsoftware.kryo.Kryo
import it.agilelab.bigdata.spark.search.impl._
import org.apache.spark.serializer.KryoRegistrator
class SparkSearchKryoRegistrator extends KryoRegistrator {
override def registerClasses(kryo: Kryo): Unit = {
kryo.register(classOf[PartitionsIndexLuceneRDD[_]])
kryo.register(classOf[LuceneIndexedPartition[_]])
kryo.register(classOf[LuceneIndex])
kryo.register(classOf[GlobalIDFLuceneIndex])
kryo.register(classOf[LuceneConfig])
}
}
|
agile-lab-dev/sparksearchengine
|
src/main/scala/it/agilelab/bigdata/spark/search/serialization/SparkSearchKryoRegistrator.scala
|
Scala
|
apache-2.0
| 540 |
package io.getquill.codegen.model
import io.getquill.codegen.gen.HasBasicMeta
import io.getquill.codegen.model.Stereotyper.Namespacer
object Stereotyper {
type Namespacer[TableMeta] = TableMeta => String
type Expresser[TableMeta, ColumnMeta] = (RawSchema[TableMeta, ColumnMeta]) => TableStereotype[TableMeta, ColumnMeta]
type Fuser[TableMeta, ColumnMeta] = (Seq[TableStereotype[TableMeta, ColumnMeta]]) => TableStereotype[TableMeta, ColumnMeta]
}
trait Stereotyper extends HasBasicMeta {
def namespacer: Namespacer[JdbcTableMeta]
def nameParser: NameParser
def stereotype(schemas: Seq[RawSchema[TableMeta, ColumnMeta]]): Seq[TableStereotype[TableMeta, ColumnMeta]]
}
|
getquill/quill
|
quill-codegen/src/main/scala/io/getquill/codegen/model/Stereotyper.scala
|
Scala
|
apache-2.0
| 682 |
package uima.cc.qalab
import java.util.Locale
import org.apache.uima.cas.CAS
import org.apache.uima.collection.CasConsumer_ImplBase
import org.apache.uima.resource.ResourceProcessException
import uima.cc.qalab.en.{EnglishQALabExtractionSubtaskDocumentCasConsumer, EnglishQALabExtractionSubtaskSentenceCasConsumer}
import uima.cc.qalab.ja.{JapaneseQALabExtractionSubtaskDocumentCasConsumer, JapaneseQALabExtractionSubtaskSentenceCasConsumer}
import us.feliscat.util.uima.JCasID
/**
* <pre>
* Created on 2017/02/14.
* </pre>
*
* @author K.Sakamoto
*/
class QALabExtractionSubtaskCasConsumer extends CasConsumer_ImplBase {
override def initialize(): Unit = {
println(">> QA Lab Extraction Subtask Cas Consumer Initializing")
super.initialize()
}
@throws[ResourceProcessException]
override def processCas(aCAS: CAS): Unit = {
println(">> QA Lab Extraction Subtask Cas Consumer Processing")
JapaneseQALabExtractionSubtaskSentenceCasConsumer.process(aCAS)(JCasID(Locale.JAPANESE.getLanguage))
EnglishQALabExtractionSubtaskSentenceCasConsumer.process(aCAS)(JCasID(Locale.ENGLISH.getLanguage))
JapaneseQALabExtractionSubtaskDocumentCasConsumer.process(aCAS)(JCasID(Locale.JAPANESE.getLanguage))
EnglishQALabExtractionSubtaskDocumentCasConsumer.process(aCAS)(JCasID(Locale.ENGLISH.getLanguage))
}
}
|
ktr-skmt/FelisCatusZero-multilingual
|
src/main/scala/uima/cc/qalab/QALabExtractionSubtaskCasConsumer.scala
|
Scala
|
apache-2.0
| 1,346 |
package sbt
import java.io.File
import sbt.internal.util.AttributeKey
import sbt.internal.inc.classpath.ClassLoaderCache
import sbt.librarymanagement.ModuleID
object BasicKeys {
val historyPath = AttributeKey[Option[File]](
"history",
"The location where command line history is persisted.",
40)
val shellPrompt = AttributeKey[State => String](
"shell-prompt",
"The function that constructs the command prompt from the current build state.",
10000)
val watch = AttributeKey[Watched]("watch", "Continuous execution configuration.", 1000)
val serverPort =
AttributeKey[Int]("server-port", "The port number used by server command.", 10000)
private[sbt] val interactive = AttributeKey[Boolean](
"interactive",
"True if commands are currently being entered from an interactive environment.",
10)
private[sbt] val classLoaderCache = AttributeKey[ClassLoaderCache](
"class-loader-cache",
"Caches class loaders based on the classpath entries and last modified times.",
10)
private[sbt] val OnFailureStack = AttributeKey[List[Option[Exec]]](
"on-failure-stack",
"Stack that remembers on-failure handlers.",
10)
private[sbt] val explicitGlobalLogLevels = AttributeKey[Boolean](
"explicit-global-log-levels",
"True if the global logging levels were explicitly set by the user.",
10)
private[sbt] val templateResolverInfos = AttributeKey[Seq[TemplateResolverInfo]](
"templateResolverInfos",
"List of template resolver infos.",
1000)
}
case class TemplateResolverInfo(module: ModuleID, implementationClass: String)
|
Duhemm/sbt
|
main-command/src/main/scala/sbt/BasicKeys.scala
|
Scala
|
bsd-3-clause
| 1,611 |
import scala.tools.partest.BytecodeTest
import scala.tools.testkit.ASMConverters
import ASMConverters._
class D {
// This is compiled with `A_1.class` on the classpath. When inlining `flop` (which invokes
// `A_1.bar`), the inliner can check that the call to `A_1.bar` can be safely inlined into a
// different classfile (D). See also comment in B_1.scala.
def m(b: B) = b.flop
}
object Test extends BytecodeTest {
def show: Unit = {
val gIns = instructionsFromMethod(getMethod(loadClassNode("B"), "g"))
val hIns = instructionsFromMethod(getMethod(loadClassNode("C"), "h"))
for (i <- List(gIns, hIns)) {
assert(i exists {
// `flop` is not inlined
case Invoke(_, _, "flop", "()I", _) => true
case _ => false
}, i mkString "\\n")
}
val mIns = instructionsFromMethod(getMethod(loadClassNode("D"), "m"))
assert(mIns exists {
// `flop` is inlined, we get a call to `bar`
case Invoke(_, _, "bar", "()I", _) => true
case _ => false
}, mIns mkString "\\n")
}
}
|
martijnhoekstra/scala
|
test/files/run/bcodeInlinerMixed/Test_2.scala
|
Scala
|
apache-2.0
| 1,048 |
package ru.fediq.scrapingkit.tools
import akka.http.scaladsl.model.Uri
import ru.fediq.scrapingkit.backend.LinksHistory
import scala.concurrent.{ExecutionContext, Future}
class LinksHistoryFiller(
linksHistory: LinksHistory
) {
def add(uri: Uri)(implicit ec: ExecutionContext): Future[Any] = {
linksHistory.isKnown(uri).flatMap {
case true => Future.successful()
case false => linksHistory.addKnown(uri)
}
}
def fill(uris: Traversable[Uri])(implicit ec: ExecutionContext): Future[Any] = {
val futures = uris.map(add)
Future.sequence(futures)
}
}
|
fediq/scraping-kit
|
scraping-kit-platform/src/main/scala/ru/fediq/scrapingkit/tools/LinksHistoryFiller.scala
|
Scala
|
mit
| 588 |
package com.getjenny.starchat.utils
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class SystemConfigurationTest extends AnyWordSpec with Matchers with ScalatestRouteTest{
"System configuration" should {
"create hashmap containing configuration in a path" in {
val conf = SystemConfiguration.createMapFromPath("starchat.atom-values")
conf.foreach{case (k, v) => println(s"$k -> $v")}
conf.contains("http-atom.default-timeout") shouldBe true
}
}
}
|
GetJenny/starchat
|
src/test/scala/com/getjenny/starchat/utils/SystemConfigurationTest.scala
|
Scala
|
gpl-2.0
| 574 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.net.InetAddress
import java.util.Locale
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* A wrapper for both GatewayServer, and ClientServer to pin Python thread to JVM thread.
*/
private[spark] class Py4JServer(sparkConf: SparkConf) extends Logging {
private[spark] val secret: String = Utils.createSecret(sparkConf)
// Launch a Py4J gateway or client server for the process to connect to; this will let it see our
// Java system properties and such
private val localhost = InetAddress.getLoopbackAddress()
private[spark] val server = if (sys.env.getOrElse(
"PYSPARK_PIN_THREAD", "true").toLowerCase(Locale.ROOT) == "true") {
new py4j.ClientServer.ClientServerBuilder()
.authToken(secret)
.javaPort(0)
.javaAddress(localhost)
.build()
} else {
new py4j.GatewayServer.GatewayServerBuilder()
.authToken(secret)
.javaPort(0)
.javaAddress(localhost)
.callbackClient(py4j.GatewayServer.DEFAULT_PYTHON_PORT, localhost, secret)
.build()
}
def start(): Unit = server match {
case clientServer: py4j.ClientServer => clientServer.startServer()
case gatewayServer: py4j.GatewayServer => gatewayServer.start()
case other => throw new RuntimeException(s"Unexpected Py4J server ${other.getClass}")
}
def getListeningPort: Int = server match {
case clientServer: py4j.ClientServer => clientServer.getJavaServer.getListeningPort
case gatewayServer: py4j.GatewayServer => gatewayServer.getListeningPort
case other => throw new RuntimeException(s"Unexpected Py4J server ${other.getClass}")
}
def shutdown(): Unit = server match {
case clientServer: py4j.ClientServer => clientServer.shutdown()
case gatewayServer: py4j.GatewayServer => gatewayServer.shutdown()
case other => throw new RuntimeException(s"Unexpected Py4J server ${other.getClass}")
}
}
|
wangmiao1981/spark
|
core/src/main/scala/org/apache/spark/api/python/Py4JServer.scala
|
Scala
|
apache-2.0
| 2,789 |
package hyperdrive.cj.http
import hyperdrive.cj.model._
import java.net.URI
import spray.json._
object CollectionJsonProtocol extends DefaultJsonProtocol {
implicit val uriFormat = new JsonFormat[URI] {
def write(o:URI) = JsString(o.toString())
def read(value:JsValue) = new URI(value.toString())
}
implicit val dataValueFormat = new JsonFormat[DataValue] {
override def write(obj: DataValue): JsValue = obj match {
case BigDecimalDataValue(v) => JsNumber(v)
case StringDataValue(v) => JsString(v)
case BooleanDataValue(v) => JsBoolean(v)
}
override def read(json: JsValue): DataValue = json match {
case JsString(v) => StringDataValue(v)
case JsNumber(v) => BigDecimalDataValue(v)
case JsBoolean(v) => BooleanDataValue(v)
}
}
implicit val queryDataFormat = jsonFormat2(QueryData)
implicit val queryFormat = jsonFormat5(Query)
implicit val linkFormat = jsonFormat5(Link)
implicit val dataFormat = jsonFormat3(Data)
implicit val itemFormat = jsonFormat3(Item)
implicit val errorFormat = jsonFormat3(Error)
implicit val templateFormat = jsonFormat1(Template.apply)
implicit val collectionFormat = jsonFormat7(Collection)
implicit val collectionJsonFormat = jsonFormat1(CollectionJson.apply)
implicit val addItemRequestFormat = jsonFormat1(AddItemRequest)
implicit val updateItemRequestFormat = jsonFormat1(UpdateItemRequest)
}
|
ScalaConsultants/hyperdrive
|
src/main/scala/hyperdrive/cj/http/CollectionJsonProtocol.scala
|
Scala
|
unlicense
| 1,434 |
/*
* Copyright (c) 2012-13 Crown Copyright
* Animal Health and Veterinary Laboratories Agency
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sampler.cluster.abc.algorithm
import akka.actor.ActorRef
import sampler.cluster.abc.actor.Tagged
import sampler.cluster.abc.Model
import sampler.cluster.abc.config.ABCConfig
import sampler.cluster.abc.actor.ScoredParticles
import sampler.cluster.abc.actor.WeighedParticles
import sampler.cluster.abc.Scored
import sampler.cluster.abc.Weighted
import scala.collection.immutable.Queue
case class Generation[P](
model: Model[P],
dueWeighing: ScoredParticles[P],
weighted: WeighedParticles[P],
idsObserved: Queue[Long],
currentTolerance: Double,
currentIteration: Int,
prevWeightsTable: Map[P, Double]
){
def emptyWeighingBuffer = copy(dueWeighing = dueWeighing.empty)
}
object Generation {
def init[P](model: Model[P], abcParameters: ABCConfig): Generation[P] = {
val numParticles = abcParameters.job.numParticles
val uniformProb = 1.0 / numParticles
val weightsTable = (1 to numParticles)
.par
.map(i => model.prior.sample() -> uniformProb)
.seq
.toMap
Generation(
model,
ScoredParticles(Seq.empty[Tagged[Scored[P]]]),
WeighedParticles(Seq.empty[Tagged[Weighted[P]]]),
Queue.empty[Long],
Double.MaxValue,
0,
weightsTable
)
}
}
|
tsaratoon/Sampler
|
sampler-cluster/src/main/scala/sampler/cluster/abc/algorithm/Generation.scala
|
Scala
|
apache-2.0
| 1,877 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.Since
import org.apache.spark.ml.{Estimator, Model, Pipeline, PipelineModel, PipelineStage, Transformer}
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.linalg.{Vector, VectorUDT}
import org.apache.spark.ml.param.{BooleanParam, Param, ParamMap, ParamValidators}
import org.apache.spark.ml.param.shared.{HasFeaturesCol, HasHandleInvalid, HasLabelCol}
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types._
/**
* Base trait for [[RFormula]] and [[RFormulaModel]].
*/
private[feature] trait RFormulaBase extends HasFeaturesCol with HasLabelCol with HasHandleInvalid {
/**
* R formula parameter. The formula is provided in string form.
* @group param
*/
@Since("1.5.0")
val formula: Param[String] = new Param(this, "formula", "R model formula")
/** @group getParam */
@Since("1.5.0")
def getFormula: String = $(formula)
/**
* Force to index label whether it is numeric or string type.
* Usually we index label only when it is string type.
* If the formula was used by classification algorithms,
* we can force to index label even it is numeric type by setting this param with true.
* Default: false.
* @group param
*/
@Since("2.1.0")
val forceIndexLabel: BooleanParam = new BooleanParam(this, "forceIndexLabel",
"Force to index label whether it is numeric or string")
/** @group getParam */
@Since("2.1.0")
def getForceIndexLabel: Boolean = $(forceIndexLabel)
/**
* Param for how to handle invalid data (unseen or NULL values) in features and label column
* of string type. Options are 'skip' (filter out rows with invalid data),
* 'error' (throw an error), or 'keep' (put invalid data in a special additional
* bucket, at index numLabels).
* Default: "error"
* @group param
*/
@Since("2.3.0")
override val handleInvalid: Param[String] = new Param[String](this, "handleInvalid",
"How to handle invalid data (unseen or NULL values) in features and label column of string " +
"type. Options are 'skip' (filter out rows with invalid data), error (throw an error), " +
"or 'keep' (put invalid data in a special additional bucket, at index numLabels).",
ParamValidators.inArray(StringIndexer.supportedHandleInvalids))
/**
* Param for how to order categories of a string FEATURE column used by `StringIndexer`.
* The last category after ordering is dropped when encoding strings.
* Supported options: 'frequencyDesc', 'frequencyAsc', 'alphabetDesc', 'alphabetAsc'.
* The default value is 'frequencyDesc'. When the ordering is set to 'alphabetDesc', `RFormula`
* drops the same category as R when encoding strings.
*
* The options are explained using an example `'b', 'a', 'b', 'a', 'c', 'b'`:
* {{{
* +-----------------+---------------------------------------+----------------------------------+
* | Option | Category mapped to 0 by StringIndexer | Category dropped by RFormula |
* +-----------------+---------------------------------------+----------------------------------+
* | 'frequencyDesc' | most frequent category ('b') | least frequent category ('c') |
* | 'frequencyAsc' | least frequent category ('c') | most frequent category ('b') |
* | 'alphabetDesc' | last alphabetical category ('c') | first alphabetical category ('a')|
* | 'alphabetAsc' | first alphabetical category ('a') | last alphabetical category ('c') |
* +-----------------+---------------------------------------+----------------------------------+
* }}}
* Note that this ordering option is NOT used for the label column. When the label column is
* indexed, it uses the default descending frequency ordering in `StringIndexer`.
*
* @group param
*/
@Since("2.3.0")
final val stringIndexerOrderType: Param[String] = new Param(this, "stringIndexerOrderType",
"How to order categories of a string FEATURE column used by StringIndexer. " +
"The last category after ordering is dropped when encoding strings. " +
s"Supported options: ${StringIndexer.supportedStringOrderType.mkString(", ")}. " +
"The default value is 'frequencyDesc'. When the ordering is set to 'alphabetDesc', " +
"RFormula drops the same category as R when encoding strings.",
ParamValidators.inArray(StringIndexer.supportedStringOrderType))
/** @group getParam */
@Since("2.3.0")
def getStringIndexerOrderType: String = $(stringIndexerOrderType)
setDefault(forceIndexLabel -> false, handleInvalid -> StringIndexer.ERROR_INVALID,
stringIndexerOrderType -> StringIndexer.frequencyDesc)
protected def hasLabelCol(schema: StructType): Boolean = {
schema.map(_.name).contains($(labelCol))
}
}
/**
* Implements the transforms required for fitting a dataset against an R model formula. Currently
* we support a limited subset of the R operators, including '~', '.', ':', '+', '-', '*' and '^'.
* Also see the R formula docs here:
* http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html
*
* The basic operators are:
* - `~` separate target and terms
* - `+` concat terms, "+ 0" means removing intercept
* - `-` remove a term, "- 1" means removing intercept
* - `:` interaction (multiplication for numeric values, or binarized categorical values)
* - `.` all columns except target
* - `*` factor crossing, includes the terms and interactions between them
* - `^` factor crossing to a specified degree
*
* Suppose `a` and `b` are double columns, we use the following simple examples
* to illustrate the effect of `RFormula`:
* - `y ~ a + b` means model `y ~ w0 + w1 * a + w2 * b` where `w0` is the intercept and `w1, w2`
* are coefficients.
* - `y ~ a + b + a:b - 1` means model `y ~ w1 * a + w2 * b + w3 * a * b` where `w1, w2, w3`
* are coefficients.
* - `y ~ a * b` means model `y ~ w0 + w1 * a + w2 * b + w3 * a * b` where `w0` is the
* intercept and `w1, w2, w3` are coefficients
* - `y ~ (a + b)^2` means model `y ~ w0 + w1 * a + w2 * b + w3 * a * b` where `w0` is the
* intercept and `w1, w2, w3` are coefficients
*
* RFormula produces a vector column of features and a double or string column of label.
* Like when formulas are used in R for linear regression, string input columns will be one-hot
* encoded, and numeric columns will be cast to doubles.
* If the label column is of type string, it will be first transformed to double with
* `StringIndexer`. If the label column does not exist in the DataFrame, the output label column
* will be created from the specified response variable in the formula.
*/
@Since("1.5.0")
class RFormula @Since("1.5.0") (@Since("1.5.0") override val uid: String)
extends Estimator[RFormulaModel] with RFormulaBase with DefaultParamsWritable {
@Since("1.5.0")
def this() = this(Identifiable.randomUID("rFormula"))
/**
* Sets the formula to use for this transformer. Must be called before use.
* @group setParam
* @param value an R formula in string form (e.g. "y ~ x + z")
*/
@Since("1.5.0")
def setFormula(value: String): this.type = set(formula, value)
/** @group setParam */
@Since("2.3.0")
def setHandleInvalid(value: String): this.type = set(handleInvalid, value)
/** @group setParam */
@Since("1.5.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("1.5.0")
def setLabelCol(value: String): this.type = set(labelCol, value)
/** @group setParam */
@Since("2.1.0")
def setForceIndexLabel(value: Boolean): this.type = set(forceIndexLabel, value)
/** @group setParam */
@Since("2.3.0")
def setStringIndexerOrderType(value: String): this.type = set(stringIndexerOrderType, value)
/** Whether the formula specifies fitting an intercept. */
private[ml] def hasIntercept: Boolean = {
require(isDefined(formula), "Formula must be defined first.")
RFormulaParser.parse($(formula)).hasIntercept
}
@Since("2.0.0")
override def fit(dataset: Dataset[_]): RFormulaModel = {
transformSchema(dataset.schema, logging = true)
require(isDefined(formula), "Formula must be defined first.")
val parsedFormula = RFormulaParser.parse($(formula))
val resolvedFormula = parsedFormula.resolve(dataset.schema)
val encoderStages = ArrayBuffer[PipelineStage]()
val oneHotEncodeColumns = ArrayBuffer[(String, String)]()
val prefixesToRewrite = mutable.Map[String, String]()
val tempColumns = ArrayBuffer[String]()
def tmpColumn(category: String): String = {
val col = Identifiable.randomUID(category)
tempColumns += col
col
}
val terms = resolvedFormula.terms.flatten.distinct.sorted
lazy val firstRow = dataset.select(terms.map(col): _*).first()
// First we index each string column referenced by the input terms.
val indexed = terms.zipWithIndex.map { case (term, i) =>
dataset.schema(term).dataType match {
case _: StringType =>
val indexCol = tmpColumn("stridx")
encoderStages += new StringIndexer()
.setInputCol(term)
.setOutputCol(indexCol)
.setStringOrderType($(stringIndexerOrderType))
.setHandleInvalid($(handleInvalid))
prefixesToRewrite(indexCol + "_") = term + "_"
(term, indexCol)
case _: VectorUDT =>
val group = AttributeGroup.fromStructField(dataset.schema(term))
val size = if (group.size < 0) {
firstRow.getAs[Vector](i).size
} else {
group.size
}
encoderStages += new VectorSizeHint(uid)
.setHandleInvalid("optimistic")
.setInputCol(term)
.setSize(size)
(term, term)
case _ =>
(term, term)
}
}.toMap
// Then we handle one-hot encoding and interactions between terms.
var keepReferenceCategory = false
val encodedTerms = resolvedFormula.terms.map {
case Seq(term) if dataset.schema(term).dataType == StringType =>
val encodedCol = tmpColumn("onehot")
// Formula w/o intercept, one of the categories in the first category feature is
// being used as reference category, we will not drop any category for that feature.
if (!hasIntercept && !keepReferenceCategory) {
encoderStages += new OneHotEncoder(uid)
.setInputCols(Array(indexed(term)))
.setOutputCols(Array(encodedCol))
.setDropLast(false)
keepReferenceCategory = true
} else {
oneHotEncodeColumns += indexed(term) -> encodedCol
}
prefixesToRewrite(encodedCol + "_") = term + "_"
encodedCol
case Seq(term) =>
term
case terms =>
val interactionCol = tmpColumn("interaction")
encoderStages += new Interaction()
.setInputCols(terms.map(indexed).toArray)
.setOutputCol(interactionCol)
prefixesToRewrite(interactionCol + "_") = ""
interactionCol
}
if (oneHotEncodeColumns.nonEmpty) {
val (inputCols, outputCols) = oneHotEncodeColumns.toArray.unzip
encoderStages += new OneHotEncoder(uid)
.setInputCols(inputCols)
.setOutputCols(outputCols)
.setDropLast(true)
}
encoderStages += new VectorAssembler(uid)
.setInputCols(encodedTerms.toArray)
.setOutputCol($(featuresCol))
.setHandleInvalid($(handleInvalid))
encoderStages += new VectorAttributeRewriter($(featuresCol), prefixesToRewrite.toMap)
encoderStages += new ColumnPruner(tempColumns.toSet)
if ((dataset.schema.fieldNames.contains(resolvedFormula.label) &&
dataset.schema(resolvedFormula.label).dataType == StringType) || $(forceIndexLabel)) {
encoderStages += new StringIndexer()
.setInputCol(resolvedFormula.label)
.setOutputCol($(labelCol))
.setHandleInvalid($(handleInvalid))
}
val pipelineModel = new Pipeline(uid).setStages(encoderStages.toArray).fit(dataset)
copyValues(new RFormulaModel(uid, resolvedFormula, pipelineModel).setParent(this))
}
@Since("1.5.0")
// optimistic schema; does not contain any ML attributes
override def transformSchema(schema: StructType): StructType = {
require(!hasLabelCol(schema) || !$(forceIndexLabel),
"If label column already exists, forceIndexLabel can not be set with true.")
if (hasLabelCol(schema)) {
StructType(schema.fields :+ StructField($(featuresCol), new VectorUDT, true))
} else {
StructType(schema.fields :+ StructField($(featuresCol), new VectorUDT, true) :+
StructField($(labelCol), DoubleType, true))
}
}
@Since("1.5.0")
override def copy(extra: ParamMap): RFormula = defaultCopy(extra)
@Since("2.0.0")
override def toString: String = {
s"RFormula: uid=$uid" +
get(formula).map(f => s", formula = $f").getOrElse("")
}
}
@Since("2.0.0")
object RFormula extends DefaultParamsReadable[RFormula] {
@Since("2.0.0")
override def load(path: String): RFormula = super.load(path)
}
/**
* Model fitted by [[RFormula]]. Fitting is required to determine the factor levels of
* formula terms.
*
* @param resolvedFormula the fitted R formula.
* @param pipelineModel the fitted feature model, including factor to index mappings.
*/
@Since("1.5.0")
class RFormulaModel private[feature](
@Since("1.5.0") override val uid: String,
private[ml] val resolvedFormula: ResolvedRFormula,
private[ml] val pipelineModel: PipelineModel)
extends Model[RFormulaModel] with RFormulaBase with MLWritable {
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
checkCanTransform(dataset.schema)
transformLabel(pipelineModel.transform(dataset))
}
@Since("1.5.0")
override def transformSchema(schema: StructType): StructType = {
checkCanTransform(schema)
val withFeatures = pipelineModel.transformSchema(schema)
if (resolvedFormula.label.isEmpty || hasLabelCol(withFeatures)) {
withFeatures
} else if (schema.exists(_.name == resolvedFormula.label)) {
val nullable = schema(resolvedFormula.label).dataType match {
case _: NumericType | BooleanType => false
case _ => true
}
StructType(withFeatures.fields :+ StructField($(labelCol), DoubleType, nullable))
} else {
// Ignore the label field. This is a hack so that this transformer can also work on test
// datasets in a Pipeline.
withFeatures
}
}
@Since("1.5.0")
override def copy(extra: ParamMap): RFormulaModel = {
val copied = new RFormulaModel(uid, resolvedFormula, pipelineModel).setParent(parent)
copyValues(copied, extra)
}
@Since("2.0.0")
override def toString: String = {
s"RFormulaModel: uid=$uid, resolvedFormula=$resolvedFormula"
}
private def transformLabel(dataset: Dataset[_]): DataFrame = {
val labelName = resolvedFormula.label
if (labelName.isEmpty || hasLabelCol(dataset.schema)) {
dataset.toDF
} else if (dataset.schema.exists(_.name == labelName)) {
dataset.schema(labelName).dataType match {
case _: NumericType | BooleanType =>
dataset.withColumn($(labelCol), dataset(labelName).cast(DoubleType))
case other =>
throw new IllegalArgumentException("Unsupported type for label: " + other)
}
} else {
// Ignore the label field. This is a hack so that this transformer can also work on test
// datasets in a Pipeline.
dataset.toDF
}
}
private def checkCanTransform(schema: StructType): Unit = {
val columnNames = schema.map(_.name)
require(!columnNames.contains($(featuresCol)), "Features column already exists.")
require(
!columnNames.contains($(labelCol)) || schema($(labelCol)).dataType.isInstanceOf[NumericType],
s"Label column already exists and is not of type ${NumericType.simpleString}.")
}
@Since("2.0.0")
override def write: MLWriter = new RFormulaModel.RFormulaModelWriter(this)
}
@Since("2.0.0")
object RFormulaModel extends MLReadable[RFormulaModel] {
@Since("2.0.0")
override def read: MLReader[RFormulaModel] = new RFormulaModelReader
@Since("2.0.0")
override def load(path: String): RFormulaModel = super.load(path)
/** [[MLWriter]] instance for [[RFormulaModel]] */
private[RFormulaModel] class RFormulaModelWriter(instance: RFormulaModel) extends MLWriter {
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: resolvedFormula
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(instance.resolvedFormula))
.repartition(1).write.parquet(dataPath)
// Save pipeline model
val pmPath = new Path(path, "pipelineModel").toString
instance.pipelineModel.save(pmPath)
}
}
private class RFormulaModelReader extends MLReader[RFormulaModel] {
/** Checked against metadata when loading model */
private val className = classOf[RFormulaModel].getName
override def load(path: String): RFormulaModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath).select("label", "terms", "hasIntercept").head()
val label = data.getString(0)
val terms = data.getSeq[scala.collection.Seq[String]](1).map(_.toSeq)
val hasIntercept = data.getBoolean(2)
val resolvedRFormula = ResolvedRFormula(label, terms, hasIntercept)
val pmPath = new Path(path, "pipelineModel").toString
val pipelineModel = PipelineModel.load(pmPath)
val model = new RFormulaModel(metadata.uid, resolvedRFormula, pipelineModel)
metadata.getAndSetParams(model)
model
}
}
}
/**
* Utility transformer for removing temporary columns from a DataFrame.
* TODO(ekl) make this a public transformer
*/
private class ColumnPruner(override val uid: String, val columnsToPrune: Set[String])
extends Transformer with MLWritable {
def this(columnsToPrune: Set[String]) =
this(Identifiable.randomUID("columnPruner"), columnsToPrune)
override def transform(dataset: Dataset[_]): DataFrame = {
val columnsToKeep = dataset.columns.filter(!columnsToPrune.contains(_))
dataset.select(columnsToKeep.map(dataset.col): _*)
}
override def transformSchema(schema: StructType): StructType = {
StructType(schema.fields.filter(col => !columnsToPrune.contains(col.name)))
}
override def copy(extra: ParamMap): ColumnPruner = defaultCopy(extra)
override def write: MLWriter = new ColumnPruner.ColumnPrunerWriter(this)
}
private object ColumnPruner extends MLReadable[ColumnPruner] {
override def read: MLReader[ColumnPruner] = new ColumnPrunerReader
override def load(path: String): ColumnPruner = super.load(path)
/** [[MLWriter]] instance for [[ColumnPruner]] */
private[ColumnPruner] class ColumnPrunerWriter(instance: ColumnPruner) extends MLWriter {
private case class Data(columnsToPrune: Seq[String])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: columnsToPrune
val data = Data(instance.columnsToPrune.toSeq)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class ColumnPrunerReader extends MLReader[ColumnPruner] {
/** Checked against metadata when loading model */
private val className = classOf[ColumnPruner].getName
override def load(path: String): ColumnPruner = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath).select("columnsToPrune").head()
val columnsToPrune = data.getAs[Seq[String]](0).toSet
val pruner = new ColumnPruner(metadata.uid, columnsToPrune)
metadata.getAndSetParams(pruner)
pruner
}
}
}
/**
* Utility transformer that rewrites Vector attribute names via prefix replacement. For example,
* it can rewrite attribute names starting with 'foo_' to start with 'bar_' instead.
*
* @param vectorCol name of the vector column to rewrite.
* @param prefixesToRewrite the map of string prefixes to their replacement values. Each attribute
* name defined in vectorCol will be checked against the keys of this
* map. When a key prefixes a name, the matching prefix will be replaced
* by the value in the map.
*/
private class VectorAttributeRewriter(
override val uid: String,
val vectorCol: String,
val prefixesToRewrite: Map[String, String])
extends Transformer with MLWritable {
def this(vectorCol: String, prefixesToRewrite: Map[String, String]) =
this(Identifiable.randomUID("vectorAttrRewriter"), vectorCol, prefixesToRewrite)
override def transform(dataset: Dataset[_]): DataFrame = {
val metadata = {
val group = AttributeGroup.fromStructField(dataset.schema(vectorCol))
val attrs = group.attributes.get.map { attr =>
if (attr.name.isDefined) {
val name = prefixesToRewrite.foldLeft(attr.name.get) { case (curName, (from, to)) =>
curName.replace(from, to)
}
attr.withName(name)
} else {
attr
}
}
new AttributeGroup(vectorCol, attrs).toMetadata()
}
val otherCols = dataset.columns.filter(_ != vectorCol).map(dataset.col)
val rewrittenCol = dataset.col(vectorCol).as(vectorCol, metadata)
dataset.select(otherCols :+ rewrittenCol : _*)
}
override def transformSchema(schema: StructType): StructType = {
StructType(
schema.fields.filter(_.name != vectorCol) ++
schema.fields.filter(_.name == vectorCol))
}
override def copy(extra: ParamMap): VectorAttributeRewriter = defaultCopy(extra)
override def write: MLWriter = new VectorAttributeRewriter.VectorAttributeRewriterWriter(this)
}
private object VectorAttributeRewriter extends MLReadable[VectorAttributeRewriter] {
override def read: MLReader[VectorAttributeRewriter] = new VectorAttributeRewriterReader
override def load(path: String): VectorAttributeRewriter = super.load(path)
/** [[MLWriter]] instance for [[VectorAttributeRewriter]] */
private[VectorAttributeRewriter]
class VectorAttributeRewriterWriter(instance: VectorAttributeRewriter) extends MLWriter {
private case class Data(vectorCol: String, prefixesToRewrite: Map[String, String])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: vectorCol, prefixesToRewrite
val data = Data(instance.vectorCol, instance.prefixesToRewrite)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class VectorAttributeRewriterReader extends MLReader[VectorAttributeRewriter] {
/** Checked against metadata when loading model */
private val className = classOf[VectorAttributeRewriter].getName
override def load(path: String): VectorAttributeRewriter = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath).select("vectorCol", "prefixesToRewrite").head()
val vectorCol = data.getString(0)
val prefixesToRewrite = data.getAs[Map[String, String]](1)
val rewriter = new VectorAttributeRewriter(metadata.uid, vectorCol, prefixesToRewrite)
metadata.getAndSetParams(rewriter)
rewriter
}
}
}
|
ueshin/apache-spark
|
mllib/src/main/scala/org/apache/spark/ml/feature/RFormula.scala
|
Scala
|
apache-2.0
| 25,072 |
package chapter15
/*
* 15.5 λ΄μΈν ν΄λμ€
*
* μ΄λ»κ² ν΄μΌ λͺ¨λ κ°λ₯μ±μ λ€ μ²λ¦¬νλ€λ μμ ν λλμ λ°μ μ μλκ°?
*
*/
sealed abstract class Expr2
case class Var2(name: String) extends Expr2
case class Number2(num: Double) extends Expr2
case class UnOp2(operator: String, arg: Expr2) extends Expr2
case class BinOp2(operator: String, left: Expr2, right: Expr2) extends Expr2
object c15_i05 extends App {
//
//
//
/*
* match may not be exhaustive
* μ΄ κ²½κ³ λ μ μ¬μ μΈ μ€ν μμ μ€λ₯μ κ·Όμμ μλ €μ€λ€.
* κ·Έλ¬λ μ½λ νλ¦ μμμ, 결ꡬ λκ°μ§ λμ μ΄μΈμ μΌμ΄ μλ€κ³ μκ°ν΄λ³΄μ..
* μ»΄νμΌλ¬λ₯Ό μ‘°μ©νκ² νλ €λ©΄ matchμ μ
λ ν°μ @uncheckedλ₯Ό μ¨
* λν΄νΈ μΌμ΄μ€λ μ΄μ°¨νΌ λ
Όλ¦¬μ μΌλ‘ unreachableμ΄λ€.
*/
def describe(e: Expr2): String = (e: @unchecked) match {
case Number2(_) => "a number"
case Var2(_) => "a variable"
}
}
|
seraekim/srkim-lang-scala
|
src/main/java/chapter15/c15_i05.scala
|
Scala
|
bsd-3-clause
| 992 |
package org.jetbrains.plugins.scala.testingSupport.specs2
import org.jetbrains.plugins.scala.lang.structureView.element.Test._
/**
* @author Roman.Shein
* @since 20.04.2015.
*/
abstract class Specs2FileStructureViewTest extends Specs2TestCase {
private def prepareAndRunTestInner(status: Int, tests: String*): Unit = {
runFileStructureViewTest("SpecsFileStrctureViewTest", status, tests:_*)
}
addSourceFile("SpecsFileStrctureViewTest.scala",
"""|import org.specs2.mutable.Specification
|
|class SpecsFileStrctureViewTest extends Specification {
|
| "parent" should {
| "child1" ! {
| success
| }
| "child2" >> {
| success
| }
|
| "child3" in {
| success
| }
|
| "pending" in {
| success
| }.pendingUntilFixed
|
| "pending2" in {
| success
| }.pendingUntilFixed("message")
| }
|
| "parent2" can {
| "child" in {
| success
| }
| }
|}
""".stripMargin
)
def testShouldView(): Unit = prepareAndRunTestInner(NormalStatusId, "\\"parent\\"")
def testExclamationView(): Unit = prepareAndRunTestInner(NormalStatusId, "\\"child1\\"")
def testGreaterView(): Unit = prepareAndRunTestInner(NormalStatusId, "\\"child2\\"")
def testInView(): Unit = prepareAndRunTestInner(NormalStatusId, "\\"child3\\"")
def testCanView(): Unit = prepareAndRunTestInner(NormalStatusId, "\\"parent2\\"")
def testPending(): Unit = prepareAndRunTestInner(PendingStatusId, "\\"pending\\"", "\\"pending2\\"")
def testHierarchy(): Unit = {
runFileStructureViewTest("SpecsFileStrctureViewTest", "\\"child1\\"", Some("\\"parent\\""))
}
}
|
jastice/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/specs2/Specs2FileStructureViewTest.scala
|
Scala
|
apache-2.0
| 1,783 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import org.apache.spark.SparkFunSuite
class CodeFormatterSuite extends SparkFunSuite {
def testCase(name: String)(input: String)(expected: String): Unit = {
test(name) {
val sourceCode = new CodeAndComment(input, Map.empty)
assert(CodeFormatter.format(sourceCode).trim === expected.trim)
}
}
testCase("basic example") {
"""class A {
|blahblah;
|}""".stripMargin
}{
"""
|/* 001 */ class A {
|/* 002 */ blahblah;
|/* 003 */ }
""".stripMargin
}
testCase("nested example") {
"""class A {
| if (c) {
|duh;
|}
|}""".stripMargin
} {
"""
|/* 001 */ class A {
|/* 002 */ if (c) {
|/* 003 */ duh;
|/* 004 */ }
|/* 005 */ }
""".stripMargin
}
testCase("single line") {
"""class A {
| if (c) {duh;}
|}""".stripMargin
}{
"""
|/* 001 */ class A {
|/* 002 */ if (c) {duh;}
|/* 003 */ }
""".stripMargin
}
testCase("if else on the same line") {
"""class A {
| if (c) {duh;} else {boo;}
|}""".stripMargin
}{
"""
|/* 001 */ class A {
|/* 002 */ if (c) {duh;} else {boo;}
|/* 003 */ }
""".stripMargin
}
testCase("function calls") {
"""foo(
|a,
|b,
|c)""".stripMargin
}{
"""
|/* 001 */ foo(
|/* 002 */ a,
|/* 003 */ b,
|/* 004 */ c)
""".stripMargin
}
}
|
haowu80s/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeFormatterSuite.scala
|
Scala
|
apache-2.0
| 2,320 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.database
sealed abstract class ArtifactStoreException(message: String) extends Exception(message)
case class NoDocumentException(message: String) extends ArtifactStoreException(message)
case class DocumentConflictException(message: String) extends ArtifactStoreException(message)
case class DocumentTypeMismatchException(message: String) extends ArtifactStoreException(message)
case class DocumentUnreadable(message: String) extends ArtifactStoreException(message)
case class PutException(message: String) extends ArtifactStoreException(message)
|
duynguyen/incubator-openwhisk
|
common/scala/src/main/scala/whisk/core/database/ArtifactStoreExceptions.scala
|
Scala
|
apache-2.0
| 1,374 |
package CQLConnect
import java.sql
import java.util.Date
import scala.reflect.ClassTag
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import org.apache.spark.sql.cassandra._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import CQLConnect.DateUtils.DateFormatter
object CassandraTable {
// Map Cassandra elements to Spark
def convertToSpark(element:Any): Any = element match {
case time: org.joda.time.LocalDate => new sql.Date(time.toDateTimeAtStartOfDay().getMillis) // Convert to java.sql.Date
case date: java.util.Date => new sql.Timestamp(date.getTime)
case uuid: java.util.UUID => uuid.toString()
case other => other
}
// Get a Cassandra table schema
def get_schema(sc: SparkContext, keyspace: String, table: String) = {
val spark = SparkSession.builder().getOrCreate()
spark.read.cassandraFormat(table, keyspace).load.schema
}
def get_table(sc: SparkContext, keyspace: String, table: String, select_cols: Array[String]) = {
val spark = SparkSession.builder().getOrCreate()
import spark.implicits._
// Cassandra schema and table
var schema = get_schema(sc, keyspace, table)
var cass_table = sc.cassandraTable(keyspace, table)
// Select columns is necessary
if (select_cols.length > 0) {
schema = StructType(schema.filter(x => select_cols.contains(x.name)))
cass_table = cass_table.select(select_cols.map(ColumnName(_)):_*)
}
// Convert Cassandra values to Spark and return
val spk_cass = cass_table.map{ case cassandraRow => Row(cassandraRow.columnValues.map(convertToSpark):_*) }
spark.createDataFrame(spk_cass, schema)
}
// Hardcode function to pull obc_model
// - Read in partitions model from C*
// - Map into a table with an added date
// - Repartition by obc_model partitioner to distribute load correctly
// - Join with C8 table to return the subset RDD
// - Filter if necessary
// - Map C* values to Spark Dataset to hand to Sparklyr
def get_obc_model ( sc: SparkContext, keyspace: String, int_line: Int, str_date: String, select_cols: Array[String]) = {
val spark = SparkSession.builder().getOrCreate()
import spark.implicits._
var date: sql.Date = new sql.Date(DateFormatter.parse(str_date).getTime())
var schema = get_schema(sc, keyspace, "obc_model")
// Repartitioning to the spark cluster as
// partitions_obc_model is only distributed across 2 nodes
var cass_join =
sc.cassandraTable(keyspace, "partitions_obc_model")
.where("line = ?", int_line)
// TODO: Case class this
.map{ case cassandraRow => (
cassandraRow.getInt("line"),
cassandraRow.getInt("vehicle_id_command"),
date,
cassandraRow.getInt("vcc"),
cassandraRow.getInt("channel")
)}
.repartitionByCassandraReplica(keyspace, "obc_model")
.joinWithCassandraTable(keyspace, "obc_model")
if (select_cols.length > 0) {
schema = StructType(schema.filter(x => select_cols.contains(x.name)))
cass_join = cass_join.select(select_cols.map(ColumnName(_)):_*)
}
val spk_cass_join = cass_join.map{ case(_, cassandraRow) => Row(cassandraRow.columnValues.map(convertToSpark):_*)}
spark.createDataFrame(spk_cass_join, schema)
}
// Can the partitions table be supplied via Sparklyr?
def joinWithRTable[T <: Serializable: ClassTag] (
sc: SparkContext,
dataset: Dataset[T],
keyspace: String,
table: String,
select_cols: Array[String])(
implicit
rwf: writer.RowWriterFactory[T],
rrf: rdd.reader.RowReaderFactory[T]
) = {
val spark = SparkSession.builder().getOrCreate()
import spark.implicits._
val my_rdd = dataset.rdd
var schema = get_schema(sc, keyspace, table)
var cass_join =
my_rdd
.repartitionByCassandraReplica(keyspace, table)
.joinWithCassandraTable(keyspace, table)
if (select_cols.length > 0) {
schema = StructType(schema.filter(x => select_cols.contains(x.name)))
cass_join = cass_join.select(select_cols.map(ColumnName(_)):_*)
}
cass_join
}
}
|
AkhilNairAmey/CQLConnect
|
java/CassandraTable.scala
|
Scala
|
mit
| 4,279 |
/**
* Copyright 2009 Latterfrosken Software Development Limited
*
* This file is part of Lafros GUI-Alerts.
*
* Lafros GUI-Alerts is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* Lafros GUI-Alerts is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with Lafros GUI-Alerts. If not, see <http://www.gnu.org/licenses/>. */
package com.lafros.gui.alerts
import java.awt.{Color, Component, Container, Dimension}
import javax.swing.{Icon, JLabel}
import javax.swing.border.TitledBorder
import scala.swing.{Alignment, Label, Swing}
/**
* titled label. Both the title and text will be centred, and the preferred
* width set to that of the wider of the two.
*
* @param _border the <tt>javax.swing.border.TitledBorder</tt> to be used
* @author Rob Dickens */
class JTitledConstrainableLabel(
private val _border: TitledBorder) extends JConstrainableLabel {
setHorizontalAlignment(Alignment.Center.id)
_border.setTitleJustification(TitledBorder.CENTER)
setBorder(_border)
/**
* also initialises the <tt>templateText</tt> property to <tt>templateText0</tt>. */
def this(tb: TitledBorder, templateText0: String) {
this(tb)
templateText = templateText0
}
/**
* a black <tt>javax.swing.border.LineBorder</tt> will be added, and the
* <tt>title</tt> set to <tt>title0</tt>. */
def this(title0: String) =
this(Swing.TitledBorder(Swing.LineBorder(Color.black), title0))
/**
* calls <tt>this(title0)</tt>, and sets the <tt>templateText</tt> property to
* <tt>templateText0</tt>. */
def this(title0: String, templateText0: String) {
this(title0)
templateText = templateText0
}
/**
* takes into account the <tt>title</tt> as well as the <tt>templateText</tt>. */
override def getPreferredSize = {
val d1 = super.getPreferredSize
val d2 = _border.getMinimumSize(this)
val w = Math.max(d1.width, d2.width)
d1.setSize(w, d1.height)
//println("preferred size: "+ d1.width +", "+ d1.height)
d1
}
/**
* forwards to getPreferredSize (which appears to be a requirement if the
* container is a <tt>BoxPanel</tt>). */
override def getMaximumSize = getPreferredSize
/**
* as displayed above the label. */
def title = _border.getTitle
/**
* sets this property. */
def title_=(arg: String) = if (arg != _border.getTitle) {
// preferred size now depends on title as well as label itself;
// if template in use, should apply that before recalculating preferred size;
_border.setTitle(arg)
templateText = templateText
}
}
|
robcd/lafros-gui
|
lafros-gui-alerts/src/main/scala/com/lafros/gui/alerts/JTitledConstrainableLabel.scala
|
Scala
|
gpl-3.0
| 2,985 |
package webserviceclients.dispose
import play.api.libs.json.Json
import uk.gov.dvla.vehicles.presentation.common.webserviceclients.common.MicroserviceResponse
final case class DisposeResponse(transactionId: String,
registrationNumber: String,
auditId: String)
final case class DisposeResponseDto(response: Option[MicroserviceResponse], disposeResponse: DisposeResponse)
object DisposeResponse {
implicit val JsonFormat = Json.format[DisposeResponse]
}
object DisposeResponseDto {
implicit val JsonFormat = Json.format[DisposeResponseDto]
}
|
dvla/vehicles-online
|
app/webserviceclients/dispose/DisposeResponseDto.scala
|
Scala
|
mit
| 614 |
package gsd.linux.tools
import java.io.PrintStream
import util.parsing.combinator.{JavaTokenParsers, RegexParsers, PackratParsers}
/**
* TODO
*/
class REPLParser extends RegexParsers with JavaTokenParsers with PackratParsers {
val num: Parser[Int] = "[0-9]+".r ^^ { _.toInt }
val id: Parser[String] = "\\\\w+".r
val filename: Parser[String] = stringLiteral
def descCmd(out: PrintStream) = //TODO
"descendants" ~> opt("\\\\w".r) ~ opt(num) ^^ {
case Some(node) ~ None =>
case None ~ Some(depth) =>
case Some(node) ~ Some(depth) =>
case None ~ None =>
failure("descendants requires at least one argument")
}
def saveCmd(out: PrintStream): PackratParser[Unit] =
"save" ~> ((filename ^^ { new PrintStream(_) }) into cmd)
def cmd(out: PrintStream): PackratParser[Unit] =
saveCmd(out)
def process(line: String): Unit =
parseAll(cmd(System.out), line) match {
case Success(res,in) if in.atEnd =>
case Success(res,in) =>
println("[Warn] Extraneous input: " + in)
case fail =>
println(fail)
}
}
|
scas-mdd/linux-variability-analysis-tools.fm-translation
|
src/main/scala/gsd/linux/tools/REPLParser.scala
|
Scala
|
gpl-3.0
| 1,096 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import java.io.File
import java.net.URI
import org.apache.log4j.Level
import org.scalatest.PrivateMethodTester
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.execution.{CollectLimitExec, CommandResultExec, LocalTableScanExec, PartialReducerPartitionSpec, QueryExecution, ReusedSubqueryExec, ShuffledRowRDD, SortExec, SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.command.DataWritingCommandExec
import org.apache.spark.sql.execution.datasources.noop.NoopDataSource
import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ENSURE_REQUIREMENTS, Exchange, REPARTITION_BY_COL, REPARTITION_BY_NUM, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike, ShuffleOrigin}
import org.apache.spark.sql.execution.joins.{BaseJoinExec, BroadcastHashJoinExec, ShuffledHashJoinExec, ShuffledJoin, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLShuffleReadMetricsReporter
import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.TestData
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.QueryExecutionListener
import org.apache.spark.util.Utils
class AdaptiveQueryExecSuite
extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper
with PrivateMethodTester {
import testImplicits._
setupTestData()
private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = {
var finalPlanCnt = 0
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, sparkPlanInfo) =>
if (sparkPlanInfo.simpleString.startsWith(
"AdaptiveSparkPlan isFinalPlan=true")) {
finalPlanCnt += 1
}
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
val dfAdaptive = sql(query)
val planBefore = dfAdaptive.queryExecution.executedPlan
assert(planBefore.toString.startsWith("AdaptiveSparkPlan isFinalPlan=false"))
val result = dfAdaptive.collect()
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
val df = sql(query)
checkAnswer(df, result)
}
val planAfter = dfAdaptive.queryExecution.executedPlan
assert(planAfter.toString.startsWith("AdaptiveSparkPlan isFinalPlan=true"))
val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
spark.sparkContext.listenerBus.waitUntilEmpty()
// AQE will post `SparkListenerSQLAdaptiveExecutionUpdate` twice in case of subqueries that
// exist out of query stages.
val expectedFinalPlanCnt = adaptivePlan.find(_.subqueries.nonEmpty).map(_ => 2).getOrElse(1)
assert(finalPlanCnt == expectedFinalPlanCnt)
spark.sparkContext.removeSparkListener(listener)
val exchanges = adaptivePlan.collect {
case e: Exchange => e
}
assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.")
(dfAdaptive.queryExecution.sparkPlan, adaptivePlan)
}
private def findTopLevelBroadcastHashJoin(plan: SparkPlan): Seq[BroadcastHashJoinExec] = {
collect(plan) {
case j: BroadcastHashJoinExec => j
}
}
private def findTopLevelSortMergeJoin(plan: SparkPlan): Seq[SortMergeJoinExec] = {
collect(plan) {
case j: SortMergeJoinExec => j
}
}
private def findTopLevelShuffledHashJoin(plan: SparkPlan): Seq[ShuffledHashJoinExec] = {
collect(plan) {
case j: ShuffledHashJoinExec => j
}
}
private def findTopLevelBaseJoin(plan: SparkPlan): Seq[BaseJoinExec] = {
collect(plan) {
case j: BaseJoinExec => j
}
}
private def findTopLevelSort(plan: SparkPlan): Seq[SortExec] = {
collect(plan) {
case s: SortExec => s
}
}
private def findTopLevelLimit(plan: SparkPlan): Seq[CollectLimitExec] = {
collect(plan) {
case l: CollectLimitExec => l
}
}
private def findReusedExchange(plan: SparkPlan): Seq[ReusedExchangeExec] = {
collectWithSubqueries(plan) {
case ShuffleQueryStageExec(_, e: ReusedExchangeExec, _) => e
case BroadcastQueryStageExec(_, e: ReusedExchangeExec, _) => e
}
}
private def findReusedSubquery(plan: SparkPlan): Seq[ReusedSubqueryExec] = {
collectWithSubqueries(plan) {
case e: ReusedSubqueryExec => e
}
}
private def checkNumLocalShuffleReads(
plan: SparkPlan, numShufflesWithoutLocalRead: Int = 0): Unit = {
val numShuffles = collect(plan) {
case s: ShuffleQueryStageExec => s
}.length
val numLocalReads = collect(plan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
numLocalReads.foreach { r =>
val rdd = r.execute()
val parts = rdd.partitions
assert(parts.forall(rdd.preferredLocations(_).nonEmpty))
}
assert(numShuffles === (numLocalReads.length + numShufflesWithoutLocalRead))
}
private def checkInitialPartitionNum(df: Dataset[_], numPartition: Int): Unit = {
// repartition obeys initialPartitionNum when adaptiveExecutionEnabled
val plan = df.queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
val shuffle = plan.asInstanceOf[AdaptiveSparkPlanExec].executedPlan.collect {
case s: ShuffleExchangeExec => s
}
assert(shuffle.size == 1)
assert(shuffle(0).outputPartitioning.numPartitions == numPartition)
}
test("Change merge join to broadcast join") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("Reuse the parallelism of coalesced shuffle in local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReads = collect(adaptivePlan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
assert(localReads.length == 2)
val localShuffleRDD0 = localReads(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReads(1).execute().asInstanceOf[ShuffledRowRDD]
// The pre-shuffle partition size is [0, 0, 0, 72, 0]
// We exclude the 0-size partitions, so only one partition, advisoryParallelism = 1
// the final parallelism is
// advisoryParallelism = 1 since advisoryParallelism < numMappers
// and the partitions length is 1
assert(localShuffleRDD0.getPartitions.length == 1)
// The pre-shuffle partition size is [0, 72, 0, 72, 126]
// We exclude the 0-size partitions, so only 3 partition, advisoryParallelism = 3
// the final parallelism is
// advisoryParallelism / numMappers: 3/2 = 1 since advisoryParallelism >= numMappers
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD1.getPartitions.length == 2)
}
}
test("Reuse the default parallelism in local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReads = collect(adaptivePlan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
assert(localReads.length == 2)
val localShuffleRDD0 = localReads(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReads(1).execute().asInstanceOf[ShuffledRowRDD]
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD0.getPartitions.length == 4)
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD1.getPartitions.length == 4)
}
}
test("Empty stage coalesced to 1-partition RDD") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
val df1 = spark.range(10).withColumn("a", 'id)
val df2 = spark.range(10).withColumn("b", 'id)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[SortMergeJoinExec]).isDefined)
val coalescedReads = collect(plan) {
case r: AQEShuffleReadExec => r
}
assert(coalescedReads.length == 3)
coalescedReads.foreach(r => assert(r.partitionSpecs.length == 1))
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[BroadcastHashJoinExec]).isDefined)
val coalescedReads = collect(plan) {
case r: AQEShuffleReadExec => r
}
assert(coalescedReads.length == 3, s"$plan")
coalescedReads.foreach(r => assert(r.isLocalRead || r.partitionSpecs.length == 1))
}
}
}
test("Scalar subquery") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("Scalar subquery in later stages") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where (value + a) = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("multiple joins") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN testData3 t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// After applied the 'OptimizeShuffleWithLocalRead' rule, we can convert all the four
// shuffle read to local shuffle read in the bottom two 'BroadcastHashJoin'.
// For the top level 'BroadcastHashJoin', the probe side is not shuffle query stage
// and the build side shuffle query stage is also converted to local shuffle read.
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("multiple joins with aggregate") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, sum(b) from testData3 group by a
| ) t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 1)
}
}
test("multiple joins with aggregate 2") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "500") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, max(b) b from testData2 group by a
| ) t3 ON t2.n = t3.b
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON value = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- Filter
// +- HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 1)
}
}
test("Exchange reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT value FROM testData join testData2 ON key = a " +
"join (SELECT value v from testData join testData3 ON key = a) on value = v")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 2)
// There is still a SMJ, and its two shuffles can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 2)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse with subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Subquery reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.isEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.nonEmpty)
}
}
test("Broadcast exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "20000000",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (" +
"SELECT /*+ broadcast(testData2) */ max(key) from testData join testData2 ON key = a) " +
"and a <= (" +
"SELECT /*+ broadcast(testData2) */ max(value) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
assert(ex.head.child.isInstanceOf[BroadcastExchangeExec])
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Union/Except/Intersect queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData
|EXCEPT
|SELECT * FROM testData2
|UNION ALL
|SELECT * FROM testData
|INTERSECT ALL
|SELECT * FROM testData2
""".stripMargin)
}
}
test("Subquery de-correlation in Union queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("a", "b") {
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("a")
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("b")
runAdaptiveAndVerifyResult(
"""
|SELECT id,num,source FROM (
| SELECT id, num, 'a' as source FROM a
| UNION ALL
| SELECT id, num, 'b' as source FROM b
|) AS c WHERE c.id IN (SELECT id FROM b WHERE num = 2)
""".stripMargin)
}
}
}
test("Avoid plan change if cost is greater") {
val origPlan = sql("SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1").queryExecution.executedPlan
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT.key -> "0") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val smj2 = findTopLevelSortMergeJoin(adaptivePlan)
assert(smj2.size == 2, origPlan.toString)
}
}
test("Change merge join to broadcast join without local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.LOCAL_SHUFFLE_READER_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "40") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData t1 join testData2 t2
|ON t1.key = t2.a join testData3 t3 on t2.a = t3.a
|where t1.value = 1
""".stripMargin
)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
// There is still a SMJ, and its two shuffles can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 2)
}
}
test("Avoid changing merge join to broadcast join if too many empty partitions on build plan") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN.key -> "0.5") {
// `testData` is small enough to be broadcast but has empty partition ratio over the config.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.isEmpty)
}
// It is still possible to broadcast `testData2`.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2000") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
assert(bhj.head.buildSide == BuildRight)
}
}
}
test("SPARK-29906: AQE should not introduce extra shuffle for outermost limit") {
var numStages = 0
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numStages = jobStart.stageInfos.length
}
}
try {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
spark.sparkContext.addSparkListener(listener)
spark.range(0, 100, 1, numPartitions = 10).take(1)
spark.sparkContext.listenerBus.waitUntilEmpty()
// Should be only one stage since there is no shuffle.
assert(numStages == 1)
}
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
test("SPARK-30524: Do not optimize skew join if introduce additional shuffle") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "100",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 3 as key1", "id as value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 1 as key2", "id as value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(query: String, optimizeSkewJoin: Boolean): Unit = {
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(query)
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
assert(innerSmj.size == 1 && innerSmj.head.isSkewJoin == optimizeSkewJoin)
}
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2", true)
// Additional shuffle introduced, so disable the "OptimizeSkewedJoin" optimization
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 GROUP BY key1", false)
}
}
}
test("SPARK-29544: adaptive skew join with different join types") {
Seq("SHUFFLE_MERGE", "SHUFFLE_HASH").foreach { joinHint =>
def getJoinNode(plan: SparkPlan): Seq[ShuffledJoin] = if (joinHint == "SHUFFLE_MERGE") {
findTopLevelSortMergeJoin(plan)
} else {
findTopLevelShuffledHashJoin(plan)
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(
joins: Seq[ShuffledJoin],
leftSkewNum: Int,
rightSkewNum: Int): Unit = {
assert(joins.size == 1 && joins.head.isSkewJoin)
assert(joins.head.left.collect {
case r: AQEShuffleReadExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == leftSkewNum)
assert(joins.head.right.collect {
case r: AQEShuffleReadExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == rightSkewNum)
}
// skewed inner join optimization
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData1) */ * FROM skewData1 " +
"JOIN skewData2 ON key1 = key2")
val inner = getJoinNode(innerAdaptivePlan)
checkSkewJoin(inner, 2, 1)
// skewed left outer join optimization
val (_, leftAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData2) */ * FROM skewData1 " +
"LEFT OUTER JOIN skewData2 ON key1 = key2")
val leftJoin = getJoinNode(leftAdaptivePlan)
checkSkewJoin(leftJoin, 2, 0)
// skewed right outer join optimization
val (_, rightAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData1) */ * FROM skewData1 " +
"RIGHT OUTER JOIN skewData2 ON key1 = key2")
val rightJoin = getJoinNode(rightAdaptivePlan)
checkSkewJoin(rightJoin, 0, 1)
}
}
}
}
test("SPARK-30291: AQE should catch the exceptions when doing materialize") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTable("bucketed_table") {
val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val warehouseFilePath = new URI(spark.sessionState.conf.warehousePath).getPath
val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val aggregated = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[Exception] {
aggregated.count()
}
assert(error.toString contains "Invalid bucket file")
assert(error.getSuppressed.size === 0)
}
}
}
test("SPARK-30403: AQE should handle InSubquery") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult("SELECT * FROM testData LEFT OUTER join testData2" +
" ON key = a AND key NOT IN (select a from testData3) where value = '1'"
)
}
}
test("force apply AQE") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
test("SPARK-30719: do not log warning if intentionally skip AQE") {
val testAppender = new LogAppender("aqe logging warning test when skip")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(!plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
assert(!testAppender.loggingEvents
.exists(msg => msg.getRenderedMessage.contains(
s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is" +
s" enabled but is not supported for")))
}
test("test log level") {
def verifyLog(expectedLevel: Level): Unit = {
val logAppender = new LogAppender("adaptive execution")
withLogAppender(
logAppender,
loggerNames = Seq(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
level = Some(Level.TRACE)) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData join testData2 ON key = a where value = '1'").collect()
}
}
Seq("Plan changed", "Final plan").foreach { msg =>
assert(
logAppender.loggingEvents.exists { event =>
event.getRenderedMessage.contains(msg) && event.getLevel == expectedLevel
})
}
}
// Verify default log level
verifyLog(Level.DEBUG)
// Verify custom log level
val levels = Seq(
"TRACE" -> Level.TRACE,
"trace" -> Level.TRACE,
"DEBUG" -> Level.DEBUG,
"debug" -> Level.DEBUG,
"INFO" -> Level.INFO,
"info" -> Level.INFO,
"WARN" -> Level.WARN,
"warn" -> Level.WARN,
"ERROR" -> Level.ERROR,
"error" -> Level.ERROR,
"deBUG" -> Level.DEBUG)
levels.foreach { level =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
verifyLog(level._2)
}
}
}
test("tree string output") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = sql("SELECT * FROM testData join testData2 ON key = a where value = '1'")
val planBefore = df.queryExecution.executedPlan
assert(!planBefore.toString.contains("== Current Plan =="))
assert(!planBefore.toString.contains("== Initial Plan =="))
df.collect()
val planAfter = df.queryExecution.executedPlan
assert(planAfter.toString.contains("== Final Plan =="))
assert(planAfter.toString.contains("== Initial Plan =="))
}
}
test("SPARK-31384: avoid NPE in OptimizeSkewedJoin when there's 0 partition plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t2") {
// create DataFrame with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
// should run successfully without NPE
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
}
}
}
test("SPARK-34682: AQEShuffleReadExec operating on canonicalized plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
val c = read.canonicalized.asInstanceOf[AQEShuffleReadExec]
// we can't just call execute() because that has separate checks for canonicalized plans
val ex = intercept[IllegalStateException] {
val doExecute = PrivateMethod[Unit](Symbol("doExecute"))
c.invokePrivate(doExecute())
}
assert(ex.getMessage === "operating on canonicalized plan")
}
}
test("metrics of the shuffle read") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
assert(!read.isLocalRead)
assert(!read.hasSkewedPartition)
assert(read.hasCoalescedPartition)
assert(read.metrics.keys.toSeq.sorted == Seq(
"numCoalescedPartitions", "numPartitions", "partitionDataSize"))
assert(read.metrics("numCoalescedPartitions").value == 1)
assert(read.metrics("numPartitions").value == read.partitionSpecs.length)
assert(read.metrics("partitionDataSize").value > 0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val join = collect(adaptivePlan) {
case j: BroadcastHashJoinExec => j
}.head
assert(join.buildSide == BuildLeft)
val reads = collect(join.right) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
assert(read.isLocalRead)
assert(read.metrics.keys.toSeq == Seq("numPartitions"))
assert(read.metrics("numPartitions").value == read.partitionSpecs.length)
}
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1000") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
reads.foreach { read =>
assert(!read.isLocalRead)
assert(read.hasCoalescedPartition)
assert(read.hasSkewedPartition)
assert(read.metrics.contains("numSkewedPartitions"))
}
assert(reads(0).metrics("numSkewedPartitions").value == 2)
assert(reads(0).metrics("numSkewedSplits").value == 11)
assert(reads(1).metrics("numSkewedPartitions").value == 1)
assert(reads(1).metrics("numSkewedSplits").value == 9)
}
}
}
}
test("control a plan explain mode in listeners via SQLConf") {
def checkPlanDescription(mode: String, expected: Seq[String]): Unit = {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, planDescription, _) =>
assert(expected.forall(planDescription.contains))
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode,
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val dfAdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'")
try {
checkAnswer(dfAdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil)
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
Seq(("simple", Seq("== Physical Plan ==")),
("extended", Seq("== Parsed Logical Plan ==", "== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==", "== Physical Plan ==")),
("codegen", Seq("WholeStageCodegen subtrees")),
("cost", Seq("== Optimized Logical Plan ==", "Statistics(sizeInBytes")),
("formatted", Seq("== Physical Plan ==", "Output", "Arguments"))).foreach {
case (mode, expected) =>
checkPlanDescription(mode, expected)
}
}
test("SPARK-30953: InsertAdaptiveSparkPlan should apply AQE on child plan of write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
val plan = sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").queryExecution.executedPlan
assert(plan.isInstanceOf[CommandResultExec])
val commandResultExec = plan.asInstanceOf[CommandResultExec]
assert(commandResultExec.commandPhysicalPlan.isInstanceOf[DataWritingCommandExec])
assert(commandResultExec.commandPhysicalPlan.asInstanceOf[DataWritingCommandExec]
.child.isInstanceOf[AdaptiveSparkPlanExec])
}
}
}
test("AQE should set active session during execution") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.range(10).select(sum('id))
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
SparkSession.setActiveSession(null)
checkAnswer(df, Seq(Row(45)))
SparkSession.setActiveSession(spark) // recover the active session.
}
}
test("No deadlock in UI update") {
object TestStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _: Aggregate =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
spark.range(5).rdd
}
Nil
case _ => Nil
}
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val df = spark.range(10).groupBy('id).count()
df.collect()
} finally {
spark.experimental.extraStrategies = Nil
}
}
}
test("SPARK-31658: SQL UI should show write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, planInfo) =>
assert(planInfo.nodeName == "Execute CreateDataSourceTableAsSelectCommand")
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
try {
sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").collect()
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
}
test("SPARK-31220, SPARK-32056: repartition by expression with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).repartition($"id")
val df2 = spark.range(10).repartition($"id" + 1)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartition(10, $"id")
val df4 = spark.range(10).repartition(10)
assert(df3.rdd.collectPartitions().length == 10)
assert(df4.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition by range with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).toDF.repartitionByRange($"id".asc)
val df2 = spark.range(10).toDF.repartitionByRange(($"id" + 1).asc)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartitionByRange(10, $"id".asc)
assert(df3.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition using sql and hint with AQE") {
Seq(true, false).foreach { enableAQE =>
withTempView("test") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
spark.range(10).toDF.createTempView("test")
val df1 = spark.sql("SELECT /*+ REPARTITION(id) */ * from test")
val df2 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(id) */ * from test")
val df3 = spark.sql("SELECT * from test DISTRIBUTE BY id")
val df4 = spark.sql("SELECT * from test CLUSTER BY id")
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
val partitionsNum3 = df3.rdd.collectPartitions().length
val partitionsNum4 = df4.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
assert(partitionsNum3 < 10)
assert(partitionsNum4 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
checkInitialPartitionNum(df3, 10)
checkInitialPartitionNum(df4, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
assert(partitionsNum3 === 10)
assert(partitionsNum4 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df5 = spark.sql("SELECT /*+ REPARTITION(10, id) */ * from test")
val df6 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(10, id) */ * from test")
assert(df5.rdd.collectPartitions().length == 10)
assert(df6.rdd.collectPartitions().length == 10)
}
}
}
}
test("SPARK-32573: Eliminate NAAJ when BuildSide is HashedRelationWithAllNullKeys") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("SPARK-32717: AQEOptimizer should respect excludedRules configuration") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
// This test is a copy of test(SPARK-32573), in order to test the configuration
// `spark.sql.adaptive.optimizer.excludedRules` works as expect.
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
// this is different compares to test(SPARK-32573) due to the rule
// `EliminateUnnecessaryJoin` has been excluded.
assert(join.nonEmpty)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("SPARK-32649: Eliminate inner and semi join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
Seq(
// inner join (small table at right side)
"SELECT * FROM testData t1 join testData3 t2 ON t1.key = t2.a WHERE t2.b = 1",
// inner join (small table at left side)
"SELECT * FROM testData3 t1 join testData t2 ON t1.a = t2.key WHERE t1.b = 1",
// left semi join
"SELECT * FROM testData t1 left semi join testData3 t2 ON t1.key = t2.a AND t2.b = 1"
).foreach(query => {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReads(adaptivePlan)
})
}
}
test("SPARK-34533: Eliminate left anti join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// broadcast non-empty right side
("SELECT /*+ broadcast(testData3) */ * FROM testData LEFT ANTI JOIN testData3", true),
// broadcast empty right side
("SELECT /*+ broadcast(emptyTestData) */ * FROM testData LEFT ANTI JOIN emptyTestData",
true),
// broadcast left side
("SELECT /*+ broadcast(testData) */ * FROM testData LEFT ANTI JOIN testData3", false)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-34781: Eliminate left semi/anti join to its left side") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// left semi join and non-empty right side
("SELECT * FROM testData LEFT SEMI JOIN testData3", true),
// left semi join, non-empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT SEMI JOIN testData3 t2 ON t1.key = t2.a", false),
// left anti join and empty right side
("SELECT * FROM testData LEFT ANTI JOIN emptyTestData", true),
// left anti join, empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT ANTI JOIN emptyTestData t2 ON t1.key = t2.key", true)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-35455: Unify empty relation optimization between normal and AQE optimizer " +
"- single join") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
// left semi join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE value = '0')t1 LEFT SEMI JOIN " +
"testData2 t2 ON t1.key = t2.a", true),
// left anti join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE value = '0')t1 LEFT ANTI JOIN " +
"testData2 t2 ON t1.key = t2.a", true),
// left outer join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 LEFT JOIN testData2 t2 ON " +
"t1.key = t2.a", true),
// left outer join and non-empty left side
("SELECT * FROM testData t1 LEFT JOIN testData2 t2 ON " +
"t1.key = t2.a", false),
// right outer join and empty right side
("SELECT * FROM testData t1 RIGHT JOIN (SELECT * FROM testData2 WHERE b = 0)t2 ON " +
"t1.key = t2.a", true),
// right outer join and non-empty right side
("SELECT * FROM testData t1 RIGHT JOIN testData2 t2 ON " +
"t1.key = t2.a", false),
// full outer join and both side empty
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 FULL JOIN " +
"(SELECT * FROM testData2 WHERE b = 0)t2 ON t1.key = t2.a", true),
// full outer join and left side empty right side non-empty
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 FULL JOIN " +
"testData2 t2 ON t1.key = t2.a", true)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated, adaptivePlan)
}
}
}
test("SPARK-35455: Unify empty relation optimization between normal and AQE optimizer " +
"- multi join") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
"""
|SELECT * FROM testData t1
| JOIN (SELECT * FROM testData2 WHERE b = 0) t2 ON t1.key = t2.a
| LEFT JOIN testData2 t3 ON t1.key = t3.a
|""".stripMargin,
"""
|SELECT * FROM (SELECT * FROM testData WHERE key = 0) t1
| LEFT ANTI JOIN testData2 t2
| FULL JOIN (SELECT * FROM testData2 WHERE b = 0) t3 ON t1.key = t3.a
|""".stripMargin,
"""
|SELECT * FROM testData t1
| LEFT SEMI JOIN (SELECT * FROM testData2 WHERE b = 0)
| RIGHT JOIN testData2 t3 on t1.key = t3.a
|""".stripMargin
).foreach { query =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 2)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty)
}
}
}
test("SPARK-35585: Support propagate empty relation through project/filter") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val (plan1, adaptivePlan1) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData WHERE key = 0 ORDER BY key, value")
assert(findTopLevelSort(plan1).size == 1)
assert(stripAQEPlan(adaptivePlan1).isInstanceOf[LocalTableScanExec])
val (plan2, adaptivePlan2) = runAdaptiveAndVerifyResult(
"SELECT key FROM (SELECT * FROM testData WHERE value = 'no_match' ORDER BY key)" +
" WHERE key > rand()")
assert(findTopLevelSort(plan2).size == 1)
assert(stripAQEPlan(adaptivePlan2).isInstanceOf[LocalTableScanExec])
}
}
test("SPARK-32753: Only copy tags to node with no tags") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("v1") {
spark.range(10).union(spark.range(10)).createOrReplaceTempView("v1")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT id FROM v1 GROUP BY id DISTRIBUTE BY id")
assert(collect(adaptivePlan) {
case s: ShuffleExchangeExec => s
}.length == 1)
}
}
}
test("Logging plan changes for AQE") {
val testAppender = new LogAppender("plan changes")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "INFO",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData JOIN testData2 ON key = a " +
"WHERE value = (SELECT max(a) FROM testData3)").collect()
}
Seq("=== Result of Batch AQE Preparations ===",
"=== Result of Batch AQE Post Stage Creation ===",
"=== Result of Batch AQE Replanning ===",
"=== Result of Batch AQE Query Stage Optimization ===").foreach { expectedMsg =>
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
}
}
}
test("SPARK-32932: Do not use local shuffle read at final stage on write command") {
withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val data = for (
i <- 1L to 10L;
j <- 1L to 3L
) yield (i, j)
val df = data.toDF("i", "j").repartition($"j")
var noLocalread: Boolean = false
val listener = new QueryExecutionListener {
override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = {
qe.executedPlan match {
case plan@(_: DataWritingCommandExec | _: V2TableWriteExec) =>
assert(plan.asInstanceOf[UnaryExecNode].child.isInstanceOf[AdaptiveSparkPlanExec])
noLocalread = collect(plan) {
case exec: AQEShuffleReadExec if exec.isLocalRead => exec
}.isEmpty
case _ => // ignore other events
}
}
override def onFailure(funcName: String, qe: QueryExecution,
exception: Exception): Unit = {}
}
spark.listenerManager.register(listener)
withTable("t") {
df.write.partitionBy("j").saveAsTable("t")
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalread)
noLocalread = false
}
// Test DataSource v2
val format = classOf[NoopDataSource].getName
df.write.format(format).mode("overwrite").save()
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalread)
noLocalread = false
spark.listenerManager.unregister(listener)
}
}
test("SPARK-33494: Do not use local shuffle read for repartition") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.table("testData").repartition('key)
df.collect()
// local shuffle read breaks partitioning and shouldn't be used for repartition operation
// which is specified by users.
checkNumLocalShuffleReads(df.queryExecution.executedPlan, numShufflesWithoutLocalRead = 1)
}
}
test("SPARK-33551: Do not use AQE shuffle read for repartition") {
def hasRepartitionShuffle(plan: SparkPlan): Boolean = {
find(plan) {
case s: ShuffleExchangeLike =>
s.shuffleOrigin == REPARTITION_BY_COL || s.shuffleOrigin == REPARTITION_BY_NUM
case _ => false
}.isDefined
}
def checkBHJ(
df: Dataset[Row],
optimizeOutRepartition: Boolean,
probeSideLocalRead: Boolean,
probeSideCoalescedRead: Boolean): Unit = {
df.collect()
val plan = df.queryExecution.executedPlan
// There should be only one shuffle that can't do local read, which is either the top shuffle
// from repartition, or BHJ probe side shuffle.
checkNumLocalShuffleReads(plan, 1)
assert(hasRepartitionShuffle(plan) == !optimizeOutRepartition)
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.length == 1)
// Build side should do local read.
val buildSide = find(bhj.head.left)(_.isInstanceOf[AQEShuffleReadExec])
assert(buildSide.isDefined)
assert(buildSide.get.asInstanceOf[AQEShuffleReadExec].isLocalRead)
val probeSide = find(bhj.head.right)(_.isInstanceOf[AQEShuffleReadExec])
if (probeSideLocalRead || probeSideCoalescedRead) {
assert(probeSide.isDefined)
if (probeSideLocalRead) {
assert(probeSide.get.asInstanceOf[AQEShuffleReadExec].isLocalRead)
} else {
assert(probeSide.get.asInstanceOf[AQEShuffleReadExec].hasCoalescedPartition)
}
} else {
assert(probeSide.isEmpty)
}
}
def checkSMJ(
df: Dataset[Row],
optimizeOutRepartition: Boolean,
optimizeSkewJoin: Boolean,
coalescedRead: Boolean): Unit = {
df.collect()
val plan = df.queryExecution.executedPlan
assert(hasRepartitionShuffle(plan) == !optimizeOutRepartition)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.length == 1)
assert(smj.head.isSkewJoin == optimizeSkewJoin)
val aqeReads = collect(smj.head) {
case c: AQEShuffleReadExec => c
}
if (coalescedRead || optimizeSkewJoin) {
assert(aqeReads.length == 2)
if (coalescedRead) assert(aqeReads.forall(_.hasCoalescedPartition))
} else {
assert(aqeReads.isEmpty)
}
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "5") {
val df = sql(
"""
|SELECT * FROM (
| SELECT * FROM testData WHERE key = 1
|)
|RIGHT OUTER JOIN testData2
|ON value = b
""".stripMargin)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
// Repartition with no partition num specified.
checkBHJ(df.repartition('b),
// The top shuffle from repartition is optimized out.
optimizeOutRepartition = true, probeSideLocalRead = false, probeSideCoalescedRead = true)
// Repartition with default partition num (5 in test env) specified.
checkBHJ(df.repartition(5, 'b),
// The top shuffle from repartition is optimized out
// The final plan must have 5 partitions, no optimization can be made to the probe side.
optimizeOutRepartition = true, probeSideLocalRead = false, probeSideCoalescedRead = false)
// Repartition with non-default partition num specified.
checkBHJ(df.repartition(4, 'b),
// The top shuffle from repartition is not optimized out
optimizeOutRepartition = false, probeSideLocalRead = true, probeSideCoalescedRead = true)
// Repartition by col and project away the partition cols
checkBHJ(df.repartition('b).select('key),
// The top shuffle from repartition is not optimized out
optimizeOutRepartition = false, probeSideLocalRead = true, probeSideCoalescedRead = true)
}
// Force skew join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_ENABLED.key -> "true",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_FACTOR.key -> "0",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
// Repartition with no partition num specified.
checkSMJ(df.repartition('b),
// The top shuffle from repartition is optimized out.
optimizeOutRepartition = true, optimizeSkewJoin = false, coalescedRead = true)
// Repartition with default partition num (5 in test env) specified.
checkSMJ(df.repartition(5, 'b),
// The top shuffle from repartition is optimized out.
// The final plan must have 5 partitions, can't do coalesced read.
optimizeOutRepartition = true, optimizeSkewJoin = false, coalescedRead = false)
// Repartition with non-default partition num specified.
checkSMJ(df.repartition(4, 'b),
// The top shuffle from repartition is not optimized out.
optimizeOutRepartition = false, optimizeSkewJoin = true, coalescedRead = false)
// Repartition by col and project away the partition cols
checkSMJ(df.repartition('b).select('key),
// The top shuffle from repartition is not optimized out.
optimizeOutRepartition = false, optimizeSkewJoin = true, coalescedRead = false)
}
}
}
test("SPARK-34091: Batch shuffle fetch in AQE partition coalescing") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "10",
SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "true") {
withTable("t1") {
spark.range(100).selectExpr("id + 1 as a").write.format("parquet").saveAsTable("t1")
val query = "SELECT SUM(a) FROM t1 GROUP BY a"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val metricName = SQLShuffleReadMetricsReporter.LOCAL_BLOCKS_FETCHED
val blocksFetchedMetric = collectFirst(adaptivePlan) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric.isDefined)
val blocksFetched = blocksFetchedMetric.get.value
withSQLConf(SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "false") {
val (_, adaptivePlan2) = runAdaptiveAndVerifyResult(query)
val blocksFetchedMetric2 = collectFirst(adaptivePlan2) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric2.isDefined)
val blocksFetched2 = blocksFetchedMetric2.get.value
assert(blocksFetched < blocksFetched2)
}
}
}
}
test("SPARK-33933: Materialize BroadcastQueryStage first in AQE") {
val testAppender = new LogAppender("aqe query stage materialization order test")
val df = spark.range(1000).select($"id" % 26, $"id" % 10)
.toDF("index", "pv")
val dim = Range(0, 26).map(x => (x, ('a' + x).toChar.toString))
.toDF("index", "name")
val testDf = df.groupBy("index")
.agg(sum($"pv").alias("pv"))
.join(dim, Seq("index"))
val loggerNames =
Seq(classOf[BroadcastQueryStageExec].getName, classOf[ShuffleQueryStageExec].getName)
withLogAppender(testAppender, loggerNames, level = Some(Level.DEBUG)) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val result = testDf.collect()
assert(result.length == 26)
}
}
val materializeLogs = testAppender.loggingEvents
.map(_.getRenderedMessage)
.filter(_.startsWith("Materialize query stage"))
.toArray
assert(materializeLogs(0).startsWith("Materialize query stage BroadcastQueryStageExec"))
assert(materializeLogs(1).startsWith("Materialize query stage ShuffleQueryStageExec"))
}
test("SPARK-34899: Use origin plan if we can not coalesce shuffle partition") {
def checkNoCoalescePartitions(ds: Dataset[Row], origin: ShuffleOrigin): Unit = {
assert(collect(ds.queryExecution.executedPlan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
ds.collect()
val plan = ds.queryExecution.executedPlan
assert(collect(plan) {
case c: AQEShuffleReadExec => c
}.isEmpty)
assert(collect(plan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
checkAnswer(ds, testData)
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
// Pick a small value so that no coalesce can happen.
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
val df = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10).toDF()
// partition size [1420, 1420]
checkNoCoalescePartitions(df.repartition($"key"), REPARTITION_BY_COL)
// partition size [1140, 1119]
checkNoCoalescePartitions(df.sort($"key"), ENSURE_REQUIREMENTS)
}
}
test("SPARK-35239: Coalesce shuffle partition should handle empty input RDD") {
withTable("t") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
spark.sql("CREATE TABLE t (c1 int) USING PARQUET")
val (_, adaptive) = runAdaptiveAndVerifyResult("SELECT c1, count(*) FROM t GROUP BY c1")
assert(
collect(adaptive) {
case c @ AQEShuffleReadExec(_, partitionSpecs) if partitionSpecs.length == 1 =>
assert(c.hasCoalescedPartition)
c
}.length == 1
)
}
}
}
test("SPARK-35264: Support AQE side broadcastJoin threshold") {
withTempView("t1", "t2") {
def checkJoinStrategy(shouldBroadcast: Boolean): Unit = {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val (origin, adaptive) = runAdaptiveAndVerifyResult(
"SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin).size == 1)
if (shouldBroadcast) {
assert(findTopLevelBroadcastHashJoin(adaptive).size == 1)
} else {
assert(findTopLevelSortMergeJoin(adaptive).size == 1)
}
}
}
// t1: 1600 bytes
// t2: 160 bytes
spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10)
.toDF("c1", "c2").createOrReplaceTempView("t1")
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 5)
.toDF("c1", "c2").createOrReplaceTempView("t2")
checkJoinStrategy(false)
withSQLConf(SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
checkJoinStrategy(false)
}
withSQLConf(SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "160") {
checkJoinStrategy(true)
}
}
}
test("SPARK-35264: Support AQE side shuffled hash join formula") {
withTempView("t1", "t2") {
def checkJoinStrategy(shouldShuffleHashJoin: Boolean): Unit = {
Seq("100", "100000").foreach { size =>
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> size) {
val (origin1, adaptive1) = runAdaptiveAndVerifyResult(
"SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin1).size === 1)
if (shouldShuffleHashJoin && size.toInt < 100000) {
val shj = findTopLevelShuffledHashJoin(adaptive1)
assert(shj.size === 1)
assert(shj.head.buildSide == BuildRight)
} else {
assert(findTopLevelSortMergeJoin(adaptive1).size === 1)
}
}
}
// respect user specified join hint
val (origin2, adaptive2) = runAdaptiveAndVerifyResult(
"SELECT /*+ MERGE(t1) */ t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin2).size === 1)
assert(findTopLevelSortMergeJoin(adaptive2).size === 1)
}
spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10)
.toDF("c1", "c2").createOrReplaceTempView("t1")
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 5)
.toDF("c1", "c2").createOrReplaceTempView("t2")
// t1 partition size: [926, 729, 731]
// t2 partition size: [318, 120, 0]
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "3",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.PREFER_SORTMERGEJOIN.key -> "true") {
// check default value
checkJoinStrategy(false)
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "400") {
checkJoinStrategy(true)
}
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "300") {
checkJoinStrategy(false)
}
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "1000") {
checkJoinStrategy(true)
}
}
}
}
test("SPARK-35650: Coalesce number of partitions by AEQ") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1") {
Seq("REPARTITION", "REBALANCE(key)")
.foreach {repartition =>
val query = s"SELECT /*+ $repartition */ * FROM testData"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
} match {
case Seq(aqeShuffleRead) =>
assert(aqeShuffleRead.partitionSpecs.size === 1)
assert(!aqeShuffleRead.isLocalRead)
case _ =>
fail("There should be a AQEShuffleReadExec")
}
}
}
}
test("SPARK-35650: Use local shuffle read if can not coalesce number of partitions") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val query = "SELECT /*+ REPARTITION */ * FROM testData"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
} match {
case Seq(aqeShuffleRead) =>
assert(aqeShuffleRead.partitionSpecs.size === 4)
assert(aqeShuffleRead.isLocalRead)
case _ =>
fail("There should be a AQEShuffleReadExec")
}
}
}
test("SPARK-35725: Support optimize skewed partitions in RebalancePartitions") {
withTempView("v") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(if (i > 4) 5 else i, i.toString)), 3)
.toDF("c1", "c2").createOrReplaceTempView("v")
def checkPartitionNumber(
query: String, skewedPartitionNumber: Int, totalNumber: Int): Unit = {
val (_, adaptive) = runAdaptiveAndVerifyResult(query)
val read = collect(adaptive) {
case read: AQEShuffleReadExec => read
}
assert(read.size == 1)
assert(read.head.partitionSpecs.count(_.isInstanceOf[PartialReducerPartitionSpec]) ==
skewedPartitionNumber)
assert(read.head.partitionSpecs.size == totalNumber)
}
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "150") {
// partition size [0,258,72,72,72]
checkPartitionNumber("SELECT /*+ REBALANCE(c1) */ * FROM v", 2, 4)
// partition size [72,216,216,144,72]
checkPartitionNumber("SELECT /*+ REBALANCE */ * FROM v", 4, 7)
}
// no skewed partition should be optimized
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10000") {
checkPartitionNumber("SELECT /*+ REBALANCE(c1) */ * FROM v", 0, 1)
}
}
}
}
test("SPARK-35888: join with a 0-partition table") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
withTempView("t2") {
// create a temp view with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
val (_, adaptive) =
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
val aqeReads = collect(adaptive) {
case c: AQEShuffleReadExec => c
}
assert(aqeReads.length == 2)
aqeReads.foreach { c =>
val stats = c.child.asInstanceOf[QueryStageExec].getRuntimeStatistics
assert(stats.sizeInBytes >= 0)
assert(stats.rowCount.get >= 0)
}
}
}
}
test("SPARK-35968: AQE coalescing should not produce too small partitions by default") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptive) =
runAdaptiveAndVerifyResult("SELECT sum(id) FROM RANGE(10) GROUP BY id % 3")
val coalesceRead = collect(adaptive) {
case r: AQEShuffleReadExec if r.hasCoalescedPartition => r
}
assert(coalesceRead.length == 1)
// RANGE(10) is a very small dataset and AQE coalescing should produce one partition.
assert(coalesceRead.head.partitionSpecs.length == 1)
}
}
test("SPARK-35794: Allow custom plugin for cost evaluator") {
CostEvaluator.instantiate(
classOf[SimpleShuffleSortCostEvaluator].getCanonicalName, spark.sparkContext.getConf)
intercept[IllegalArgumentException] {
CostEvaluator.instantiate(
classOf[InvalidCostEvaluator].getCanonicalName, spark.sparkContext.getConf)
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val query = "SELECT * FROM testData join testData2 ON key = a where value = '1'"
withSQLConf(SQLConf.ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS.key ->
"org.apache.spark.sql.execution.adaptive.SimpleShuffleSortCostEvaluator") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
withSQLConf(SQLConf.ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS.key ->
"org.apache.spark.sql.execution.adaptive.InvalidCostEvaluator") {
intercept[IllegalArgumentException] {
runAdaptiveAndVerifyResult(query)
}
}
}
}
test("SPARK-36020: Check logical link in remove redundant projects") {
withTempView("t") {
spark.range(10).selectExpr("id % 10 as key", "cast(id * 2 as int) as a",
"cast(id * 3 as int) as b", "array(id, id + 1, id + 3) as c").createOrReplaceTempView("t")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "800") {
val query =
"""
|WITH tt AS (
| SELECT key, a, b, explode(c) AS c FROM t
|)
|SELECT t1.key, t1.c, t2.key, t2.c
|FROM (SELECT a, b, c, key FROM tt WHERE a > 1) t1
|JOIN (SELECT a, b, c, key FROM tt) t2
| ON t1.key = t2.key
|""".stripMargin
val (origin, adaptive) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelSortMergeJoin(origin).size == 1)
assert(findTopLevelBroadcastHashJoin(adaptive).size == 1)
}
}
}
test("SPARK-35874: AQE Shuffle should wait for its subqueries to finish before materializing") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val query = "SELECT b FROM testData2 DISTRIBUTE BY (b, (SELECT max(key) FROM testData))"
runAdaptiveAndVerifyResult(query)
}
}
test("SPARK-36032: Use inputPlan instead of currentPhysicalPlan to initialize logical link") {
withTempView("v") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 2)
.toDF("c1", "c2").createOrReplaceTempView("v")
Seq("-1", "10000").foreach { aqeBhj =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> aqeBhj,
SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val (origin, adaptive) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM v t1 JOIN (
| SELECT c1 + 1 as c3 FROM v
|)t2 ON t1.c1 = t2.c3
|SORT BY c1
""".stripMargin)
if (aqeBhj.toInt < 0) {
// 1 sort since spark plan has no shuffle for SMJ
assert(findTopLevelSort(origin).size == 1)
// 2 sorts in SMJ
assert(findTopLevelSort(adaptive).size == 2)
} else {
assert(findTopLevelSort(origin).size == 1)
// 1 sort at top node and BHJ has no sort
assert(findTopLevelSort(adaptive).size == 1)
}
}
}
}
}
test("SPARK-36424: Support eliminate limits in AQE Optimizer") {
withTempView("v") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, if (i > 2) "2" else i.toString)), 2)
.toDF("c1", "c2").createOrReplaceTempView("v")
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "3") {
val (origin1, adaptive1) = runAdaptiveAndVerifyResult(
"""
|SELECT c2, sum(c1) FROM v GROUP BY c2 LIMIT 5
""".stripMargin)
assert(findTopLevelLimit(origin1).size == 1)
assert(findTopLevelLimit(adaptive1).isEmpty)
// eliminate limit through filter
val (origin2, adaptive2) = runAdaptiveAndVerifyResult(
"""
|SELECT c2, sum(c1) FROM v GROUP BY c2 HAVING sum(c1) > 1 LIMIT 5
""".stripMargin)
assert(findTopLevelLimit(origin2).size == 1)
assert(findTopLevelLimit(adaptive2).isEmpty)
}
}
}
}
/**
* Invalid implementation class for [[CostEvaluator]].
*/
private class InvalidCostEvaluator() {}
/**
* A simple [[CostEvaluator]] to count number of [[ShuffleExchangeLike]] and [[SortExec]].
*/
private case class SimpleShuffleSortCostEvaluator() extends CostEvaluator {
override def evaluateCost(plan: SparkPlan): Cost = {
val cost = plan.collect {
case s: ShuffleExchangeLike => s
case s: SortExec => s
}.size
SimpleCost(cost)
}
}
|
hvanhovell/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
|
Scala
|
apache-2.0
| 84,288 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* @author Nikolay.Tropin
*/
@Category(Array(classOf[PerfCycleTests]))
class ContravarianceTest extends TypeInferenceTestBase {
def testScl4123() = {
val text =
s"""object Test {
| class A
| class C
| class B extends C
|
| class Z[-T] //in case of covariant or invariant, all is ok
|
| def goo[A, BB >: A](x: A): Z[BB] = new Z[BB]
| val zzzzzz = goo(new B) //here type is Z[Any], according to the compiler it's Z[B]
| ${START}zzzzzz$END
|}
|
|//Test.Z[B]""".stripMargin
doTest(text)
}
def testSCL10110(): Unit ={
doTest(
s"""
|object Error {
|
| class Foo[T](x: T)
|
| class LolArray[T](val arr: Array[Foo[T]])
|
| class LolImmutableHashMap[T](val arr: immutable.HashMap[Int, Foo[T]])
|
| //Full example with various collections in corresponded ticket
| def main(args: Array[String]) {
| val lolArray = new LolArray(${START}Array(new Foo(1))$END) // false error ( Array invariant )
| val lolImmutableHashMap = new LolImmutableHashMap(immutable.HashMap(1 -> new Foo(1))) // works ( mutable.HashMap covariant )
|
| // val lolArrayExplicit1 = new LolArray[Int](Array(new Foo(1))) // works
| // val lolArrayExplicit2 = new LolArray(Array[Foo[Int]](new Foo(1))) // works
| }
|}
|
|//Array[Error.Foo[NotInferedT]]
""".stripMargin)
}
def testSCL10238a(): Unit ={
doTest(
s"""
|class Foo[A](superfoos: Seq[Foo[_ >: A]]) |
|class Bar[A](superbars: Seq[Bar[_ >: A]]) extends Foo[A](${START}superbars$END)
|//Seq[Foo[_ >: A]]
""".stripMargin)
}
def testSCL10238b(): Unit ={
doTest(
s"""
|class Foo[A](foos: Seq[Foo[A]]) |
|class Bar[A](bars: Seq[Bar[A]]) extends Foo[A](${START}bars$END)
|//Seq[Foo[A]]
""".stripMargin)
}
def testSCL10238c(): Unit ={
doTest(
s"""
|class Foo[A](underfoos: Seq[Foo[_ <: A]]) |
|class Bar[A](underbars: Seq[Bar[_ <: A]]) extends Foo[A](${START}underbars$END)
|//Seq[Foo[_ <: A]]
""".stripMargin)
}
}
|
ilinum/intellij-scala
|
test/org/jetbrains/plugins/scala/failed/typeInference/ContravarianceTest.scala
|
Scala
|
apache-2.0
| 2,571 |
/**
* @(#) AutoCar.scala 2015εΉ΄3ζ4ζ₯
* TURBO CRAWLERι«ζ§θ½η½η»η¬θ«
*/
package turbo.crawler.sample
import java.util.Date
import javax.persistence.Column
import javax.persistence.Entity
import javax.persistence.GeneratedValue
import javax.persistence.Id
import javax.persistence.ManyToOne
import javax.persistence.Table
import javax.persistence.GenerationType
import turbo.crawler.Fetchable
/**
* @author Administrator
*
*/
@Entity
@Table(name = "AUTO_CAR")
class AutoCar extends Fetchable {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private var id = 0
def setId(id: Int) = this.id = id
def getId = id
@Column(name = "NAME", nullable = false)
private var name = ""
def setName(name: String) = this.name = name
def getName = name
@Column(name = "FETCHED_AT", nullable = false)
private var fetchedAt: Date = null
def setFetchedAt(date: Date) = this.fetchedAt = date
def getFetchedAt = fetchedAt
@Column(name = "GOV_PRICE")
private var govPrice: Double = 0
def setGovPrice(price: Double) = this.govPrice = price
def getGovPrice = this.govPrice
@Column(name = "URL")
private var url = ""
def setUrl(url: String) = this.url = url
def getUrl = this.url
override def getDirectUrl = url
}
|
fengshao0907/Mycat-spider
|
src/test/scala/turbo/crawler/sample/AutoCar.scala
|
Scala
|
apache-2.0
| 1,311 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.parquet
import java.nio.file.Files
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileContext, Path}
import org.geotools.data.Query
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.api.StorageMetadata.StorageFile
import org.locationtech.geomesa.fs.storage.api.{FileSystemContext, Metadata, NamedOptions}
import org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadataFactory
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.AllExpectations
@RunWith(classOf[JUnitRunner])
class CompactionTest extends Specification with AllExpectations {
sequential
val sft = SimpleFeatureTypes.createType("test", "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
val tempDir = Files.createTempDirectory("geomesa")
val fc = FileContext.getFileContext(tempDir.toUri)
"ParquetFileSystemStorage" should {
"compact partitions" >> {
val conf = new Configuration()
conf.set("parquet.compression", "gzip")
val context = FileSystemContext(fc, conf, new Path(tempDir.toUri))
val metadata =
new FileBasedMetadataFactory()
.create(context, Map.empty, Metadata(sft, "parquet", NamedOptions("daily"), leafStorage = true))
val fsStorage = new ParquetFileSystemStorageFactory().apply(context, metadata)
val dtg = "2017-01-01"
val sf1 = ScalaSimpleFeature.create(sft, "1", "first", 100, dtg, "POINT (10 10)")
val partition = fsStorage.metadata.scheme.getPartitionName(sf1)
partition mustEqual "2017/01/01"
def write(sf: SimpleFeature): Unit = {
val writer = fsStorage.getWriter(partition)
writer.write(sf)
writer.close()
}
// First simple feature goes in its own file
write(sf1)
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](1))
SelfClosingIterator(fsStorage.getReader(Query.ALL, Some(partition))).toList must haveSize(1)
// Second simple feature should be in a separate file
val sf2 = ScalaSimpleFeature.create(sft, "2", "second", 200, dtg, "POINT (10 10)")
write(sf2)
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](2))
SelfClosingIterator(fsStorage.getReader(Query.ALL, Some(partition))).toList must haveSize(2)
// Third feature in a third file
val sf3 = ScalaSimpleFeature.create(sft, "3", "third", 300, dtg, "POINT (10 10)")
write(sf3)
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](3))
SelfClosingIterator(fsStorage.getReader(Query.ALL, Some(partition))).toList must haveSize(3)
// Compact to create a single file
fsStorage.compact(Some(partition))
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](1))
SelfClosingIterator(fsStorage.getReader(Query.ALL, Some(partition))).toList must haveSize(3)
// delete a feature and compact again
WithClose(fsStorage.getWriter(ECQL.toFilter("IN ('2')"))) { writer =>
writer.hasNext must beTrue
writer.next
writer.remove()
}
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](2))
fsStorage.compact(Some(partition))
fsStorage.metadata.getPartition(partition).map(_.files) must beSome(haveSize[Seq[StorageFile]](1))
SelfClosingIterator(fsStorage.getReader(Query.ALL, Some(partition))).toList must haveSize(2)
}
}
step {
FileUtils.deleteDirectory(tempDir.toFile)
}
}
|
elahrvivaz/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-parquet/src/test/scala/org/locationtech/geomesa/parquet/CompactionTest.scala
|
Scala
|
apache-2.0
| 4,537 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.io.PrintStream
import java.util.Properties
import kafka.utils.{CommandDefaultOptions, CommandLineUtils, Json}
import org.apache.kafka.clients.admin.{AdminClientConfig, DescribeLogDirsResult, AdminClient => JAdminClient}
import org.apache.kafka.common.requests.DescribeLogDirsResponse.LogDirInfo
import org.apache.kafka.common.utils.Utils
import scala.collection.JavaConverters._
import scala.collection.Map
/**
* A command for querying log directory usage on the specified brokers
*/
object LogDirsCommand {
def main(args: Array[String]): Unit = {
describe(args, System.out)
}
def describe(args: Array[String], out: PrintStream): Unit = {
val opts = new LogDirsCommandOptions(args)
val adminClient = createAdminClient(opts)
val topicList = opts.options.valueOf(opts.topicListOpt).split(",").filter(!_.isEmpty)
val brokerList = Option(opts.options.valueOf(opts.brokerListOpt)) match {
case Some(brokerListStr) => brokerListStr.split(',').filter(!_.isEmpty).map(_.toInt)
case None => adminClient.describeCluster().nodes().get().asScala.map(_.id()).toArray
}
out.println("Querying brokers for log directories information")
val describeLogDirsResult: DescribeLogDirsResult = adminClient.describeLogDirs(brokerList.map(Integer.valueOf).toSeq.asJava)
val logDirInfosByBroker = describeLogDirsResult.all.get().asScala.mapValues(_.asScala)
out.println(s"Received log directory information from brokers ${brokerList.mkString(",")}")
out.println(formatAsJson(logDirInfosByBroker, topicList.toSet))
adminClient.close()
}
private def formatAsJson(logDirInfosByBroker: Map[Integer, Map[String, LogDirInfo]], topicSet: Set[String]): String = {
Json.encodeAsString(Map(
"version" -> 1,
"brokers" -> logDirInfosByBroker.map { case (broker, logDirInfos) =>
Map(
"broker" -> broker,
"logDirs" -> logDirInfos.map { case (logDir, logDirInfo) =>
Map(
"logDir" -> logDir,
"error" -> logDirInfo.error.exceptionName(),
"partitions" -> logDirInfo.replicaInfos.asScala.filter { case (topicPartition, _) =>
topicSet.isEmpty || topicSet.contains(topicPartition.topic)
}.map { case (topicPartition, replicaInfo) =>
Map(
"partition" -> topicPartition.toString,
"size" -> replicaInfo.size,
"offsetLag" -> replicaInfo.offsetLag,
"isFuture" -> replicaInfo.isFuture
).asJava
}.asJava
).asJava
}.asJava
).asJava
}.asJava
).asJava)
}
private def createAdminClient(opts: LogDirsCommandOptions): JAdminClient = {
val props = if (opts.options.has(opts.commandConfigOpt))
Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt))
else
new Properties()
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt))
props.putIfAbsent(AdminClientConfig.CLIENT_ID_CONFIG, "log-dirs-tool")
JAdminClient.create(props)
}
class LogDirsCommandOptions(args: Array[String]) extends CommandDefaultOptions(args){
val bootstrapServerOpt = parser.accepts("bootstrap-server", "REQUIRED: the server(s) to use for bootstrapping")
.withRequiredArg
.describedAs("The server(s) to use for bootstrapping")
.ofType(classOf[String])
val commandConfigOpt = parser.accepts("command-config", "Property file containing configs to be passed to Admin Client.")
.withRequiredArg
.describedAs("Admin client property file")
.ofType(classOf[String])
val describeOpt = parser.accepts("describe", "Describe the specified log directories on the specified brokers.")
val topicListOpt = parser.accepts("topic-list", "The list of topics to be queried in the form \\"topic1,topic2,topic3\\". " +
"All topics will be queried if no topic list is specified")
.withRequiredArg
.describedAs("Topic list")
.defaultsTo("")
.ofType(classOf[String])
val brokerListOpt = parser.accepts("broker-list", "The list of brokers to be queried in the form \\"0,1,2\\". " +
"All brokers in the cluster will be queried if no broker list is specified")
.withRequiredArg
.describedAs("Broker list")
.ofType(classOf[String])
options = parser.parse(args : _*)
CommandLineUtils.printHelpAndExitIfNeeded(this, "This tool helps to query log directory usage on the specified brokers.")
CommandLineUtils.checkRequiredArgs(parser, options, bootstrapServerOpt, describeOpt)
}
}
|
gf53520/kafka
|
core/src/main/scala/kafka/admin/LogDirsCommand.scala
|
Scala
|
apache-2.0
| 5,982 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.control.interactor
import java.awt.Point
import java.awt.event.{InputEvent, KeyEvent, MouseEvent, MouseWheelEvent}
import scalismo.ui.control.interactor.Interactor.Verdict.{Block, Pass}
import scalismo.ui.control.interactor.Interactor.{PimpedEvent, Verdict}
import scalismo.ui.model.capabilities.{Grouped, InverseTransformation}
import scalismo.ui.model.properties.Uncertainty
import scalismo.ui.model.{ImageNode, LandmarkNode, SceneNode, StatusMessage}
import scalismo.ui.rendering.RendererState.PointAndNode
import scalismo.ui.view.ViewportPanel2D
import scala.language.implicitConversions
/**
* This object provides recipes for Interactor implementations.
*
* Think of it as a "utils" class that provides commonly needed
* functionality.
*
* An alternative implementation would have been to use traits
* which override specific methods, but that would quickly get
* out of hand when combinations of functionality are needed.
*
* The objects defined here are all named after their purpose,
* and the methods they provide generally mimick the methods
* found in the Interactor trait.
*/
object Recipe {
implicit def pimpEvent[E <: InputEvent](event: E): PimpedEvent[E] = new PimpedEvent(event)
/**
* Request the window focus when the mouse enters the canvas.
*
* This ensures that subsequent key events are properly passed to the canvas.
*/
object RequestFocusOnEnter {
def mouseEntered(e: MouseEvent): Verdict = {
if (!e.canvas.hasFocus) {
e.canvas.requestFocusInWindow()
}
Pass
}
}
/**
* In 2D viewports, highlight the outline of the object that would be picked
* when a pick request comes in.
*/
object HighlightOutlineOfPickableObject {
var highlighted: Option[SceneNode] = None
def mouseMoved(e: MouseEvent, approve: SceneNode => Boolean): Verdict = {
e.viewport match {
case _2d: ViewportPanel2D =>
val state = _2d.rendererState
val newHighlighted = state.pointAndNodeAtPosition(e.getPoint).nodeOption match {
case Some(node) if state.isHighlightable(node) && approve(node) => Some(node)
case _ => None
}
if (newHighlighted != highlighted) {
highlighted.foreach { h =>
state.setHighlighted(h, onOff = false)
}
newHighlighted.foreach { h =>
state.setHighlighted(h, onOff = true)
}
highlighted = newHighlighted
}
case _ => // nothing
}
Pass
}
}
object AddLandmarkOnClick {
def mouseClicked(e: MouseEvent, uncertainty: Uncertainty = Uncertainty.DefaultUncertainty): Verdict = {
val pointAndNode = e.viewport.rendererState.pointAndNodeAtPosition(e.getPoint)
pointAndNode.nodeOption.foreach {
case _: LandmarkNode => None
case ok: Grouped with InverseTransformation =>
val name = ok.group.landmarks.nameGenerator.nextName()
val point = ok.inverseTransform(pointAndNode.pointOption.get)
ok.group.landmarks.add(point, name, uncertainty)
case ok: ImageNode =>
val name = ok.group.landmarks.nameGenerator.nextName()
// images don't support transformations
val point = pointAndNode.pointOption.get
ok.group.landmarks.add(point, name, uncertainty)
case _ =>
}
Pass
}
}
/**
* Enables movement in 2D viewports by using the mouse wheel.
*
* Essentially, this just maps scroll events to the +/- buttons
* which are present in a 2D viewport.
*/
object Scroll2D {
def mouseWheelMoved(e: MouseWheelEvent): Verdict = {
e.viewport match {
case _2d: ViewportPanel2D =>
val button = if (e.getWheelRotation > 0) _2d.positionMinusButton else _2d.positionPlusButton
button.action.apply()
case _ =>
}
Pass
}
}
/**
* Blocks rotation in a 2D viewport.
*
* This ensures that a 2D viewport camera remains
* at the correct angle and focuses the correct point.
* Zooming and translation are not affected (i.e., allowed).
*/
object Block2DRotation {
def mousePressed(e: MouseEvent): Verdict = {
e.viewport match {
case _: ViewportPanel2D if e.getButton == MouseEvent.BUTTON1 => Block
case _ => Pass
}
}
def mouseReleased(e: MouseEvent): Verdict = mousePressed(e)
}
object ShiftKeySetsSlicePosition {
private var active: Boolean = false
private var point = new Point
def keyPressedOrReleased(e: KeyEvent): Verdict = {
// 1 is the shift key
active = (e.getModifiers & 1) == 1
updateSlicePosition(e)
}
def mouseMoved(e: MouseEvent): Verdict = {
point = e.getPoint
updateSlicePosition(e)
}
def mouseExited(e: MouseEvent): Verdict = {
active = false
Pass
}
private def updateSlicePosition(e: InputEvent): Verdict = {
if (active) {
e.viewport.rendererState.pointAndNodeAtPosition(point).pointOption match {
case Some(position) => e.viewport.frame.sceneControl.slicingPosition.point = position
case _ =>
}
}
Pass
}
}
object ControlKeyShowsImageInformation {
private var active: Boolean = false
private var point = new Point
def keyPressedOrReleased(e: KeyEvent): Verdict = {
// 2 is the control key
active = (e.getModifiers & 2) == 2
showInformation(e)
}
def mouseMoved(e: MouseEvent): Verdict = {
point = e.getPoint
showInformation(e)
}
def mouseExited(e: MouseEvent): Verdict = {
active = false
Pass
}
private def showInformation(e: InputEvent): Verdict = {
if (active) {
val pointAndNode = e.viewport.rendererState.pointAndNodeAtPosition(point)
pointAndNode match {
case PointAndNode(Some(p3d), Some(img: ImageNode)) =>
val ptId = img.source.domain.pointSet.findClosestPoint(p3d)
val pt = ptId.point
val intensity = img.source(ptId.id)
val message =
StatusMessage(f"${img.name}(${pt.x}%2.2f,${pt.y}%2.2f,${pt.z}%2.2f) = $intensity%2.2f", log = false)
e.viewport.frame.status.set(message)
case _ =>
}
}
Pass
}
}
}
|
unibas-gravis/scalismo-ui
|
src/main/scala/scalismo/ui/control/interactor/Recipe.scala
|
Scala
|
gpl-3.0
| 7,231 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package org.ensime.sexp
class SexpPackageSpec extends SexpSpec {
val foostring = SexpString("foo")
val barstring = SexpString("bar")
val foosym = SexpSymbol("foo")
val barsym = SexpSymbol("bar")
val fookey = SexpSymbol(":foo")
val barkey = SexpSymbol(":bar")
"SexpList" should "create from varargs" in {
SexpList(foosym, barsym) should ===(SexpList(List(foosym, barsym)))
}
it should "unroll as basic" in {
SexpList(Nil) should ===(SexpNil)
SexpList(foosym) should ===(SexpCons(foosym, SexpNil))
SexpList(foosym, barsym) === (SexpCons(foosym, SexpCons(barsym, SexpNil)))
}
it should "match lists" in {
SexpCons(foosym, SexpNil) match {
case SexpList(els) if els == List(foosym) =>
case _ => fail()
}
SexpCons(foosym, SexpCons(barsym, SexpNil)) match {
case SexpList(els) if els == List(foosym, barsym) =>
case _ => fail()
}
SexpNil match {
case SexpList(_) => fail()
case _ =>
}
}
it should "support a list with 1000000 elements" in {
val data = List.fill(1000000)(SexpChar('a'))
SexpList(data) shouldBe a[Sexp]
}
"SexpData" should "create from varargs" in {
SexpData(
fookey -> barsym,
barkey -> foosym
) should ===(
SexpList(
fookey,
barsym,
barkey,
foosym
)
)
}
it should "unroll as basic" in {
SexpData(
fookey -> barsym,
barkey -> foosym
) should ===(
SexpCons(
fookey,
SexpCons(
barsym,
SexpCons(
barkey,
SexpCons(
foosym,
SexpNil
)
)
)
)
)
}
it should "match SexpData" in {
SexpCons(
fookey,
SexpCons(
barsym,
SexpCons(
barkey,
SexpCons(
foosym,
SexpNil
)
)
)
) match {
case SexpData(kvs) if kvs.size == 2 =>
case _ => fail()
}
SexpNil match {
case SexpData(_) => fail()
case _ =>
}
}
"SexpCons" should "unroll as fully basic" in {
val a = SexpList(foosym)
val b = SexpList(barsym)
SexpCons(a, b) should ===(
SexpCons(
SexpCons(foosym, SexpNil),
SexpCons(barsym, SexpNil)
)
)
}
}
|
jozi-k/ensime-server
|
s-express/src/test/scala/org/ensime/sexp/SexpPackageSpec.scala
|
Scala
|
gpl-3.0
| 2,603 |
package co.theasi.plotly
import org.scalatest.{Matchers, FlatSpec}
class BarOptionsSpec extends FlatSpec with Matchers {
"BarOptions" should "support setting marker options via updater" in {
val testColor = Color.rgba(1, 2, 3, 0.2)
val expectedOptions = MarkerOptions().color(testColor)
val barOptions = BarOptions().updatedMarker(_.color(testColor))
barOptions.marker shouldEqual expectedOptions
}
}
|
ASIDataScience/scala-plotly-client
|
src/test/scala/co/theasi/plotly/BarOptionsSpec.scala
|
Scala
|
mit
| 423 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.stream
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalSink
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamExecSink
import org.apache.flink.table.planner.sinks.DataStreamTableSink
import org.apache.flink.table.sinks.PartitionableTableSink
import org.apache.calcite.plan.RelOptRule
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import scala.collection.JavaConversions._
class StreamExecSinkRule extends ConverterRule(
classOf[FlinkLogicalSink],
FlinkConventions.LOGICAL,
FlinkConventions.STREAM_PHYSICAL,
"StreamExecSinkRule") {
def convert(rel: RelNode): RelNode = {
val sinkNode = rel.asInstanceOf[FlinkLogicalSink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
var requiredTraitSet = sinkNode.getInput.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
sinkNode.sink match {
case partitionSink: PartitionableTableSink
if partitionSink.getPartitionFieldNames != null &&
partitionSink.getPartitionFieldNames.nonEmpty =>
val partitionFields = partitionSink.getPartitionFieldNames
val partitionIndices = partitionFields
.map(partitionSink.getTableSchema.getFieldNames.indexOf(_))
// validate
partitionIndices.foreach { idx =>
if (idx < 0) {
throw new TableException(s"Partitionable sink ${sinkNode.sinkName} field " +
s"${partitionFields.get(idx)} must be in the schema.")
}
}
if (partitionSink.configurePartitionGrouping(false)) {
throw new TableException("Partition grouping in stream mode is not supported yet!")
}
if (!partitionSink.isInstanceOf[DataStreamTableSink[_]]) {
requiredTraitSet = requiredTraitSet.plus(
FlinkRelDistribution.hash(partitionIndices
.map(Integer.valueOf), requireStrict = false))
}
case _ =>
}
val newInput = RelOptRule.convert(sinkNode.getInput, requiredTraitSet)
new StreamExecSink(
rel.getCluster,
newTrait,
newInput,
sinkNode.sink,
sinkNode.sinkName)
}
}
object StreamExecSinkRule {
val INSTANCE: RelOptRule = new StreamExecSinkRule
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamExecSinkRule.scala
|
Scala
|
apache-2.0
| 3,333 |
/*
* #%L
* MITH General Utilities
* %%
* Copyright (C) 2011 - 2012 Maryland Institute for Technology in the Humanities
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package edu.umd.mith.util.aws.s3
import scala.collection.JavaConversions._
import scala.io.Source
import org.jets3t.service._
import org.jets3t.service.impl.rest.httpclient.RestS3Service
// This is a very messy one-off script at this point, but we may need it
// again.
object HathiCleanup extends App {
def escape(id: String): (String, String) = {
val first = id.indexOf('.')
val collection = id.substring(0, first)
val remainder = id.substring(first + 1)
val dirName = remainder.replace('.', ',')
.replace(':', '+')
.replace('/', '=')
(collection, dirName)
}
def unescape(dirName: String) =
dirName.replace(',', '.')
.replace('+', ':')
.replace('=', '/')
def path(id: (String, String)) =
"non_google_pd_pdus/" + id._1 + "/pairtree_root/" +
id._2.grouped(2).mkString("/") + "/" + id._2
def ids = Source.fromFile(args(2)).getLines
def printFileScript {
ids.foreach(l => println("rm -r " + path(escape(l))))
}
def deleteS3Files {
val cred = new security.AWSCredentials(args(0), args(1))
val service = new RestS3Service(cred)
val bucket = service.getBucket("hathi")
this.ids.map(
id => service.listObjects("hathi", path(escape(id)), null)
).foreach(_.foreach {
o => println("deleting " + o.getKey); service.deleteObject(bucket, o.getKey)
})
//this.ids.map(
// id => service.listObjects("hathi", path(escape(id)), null)
//).foreach(_.foreach(println(_)))
}
}
|
umd-mith/mith-jvm-lib
|
util/src/main/scala/edu/umd/mith/util/aws/s3/HathiCleanup.scala
|
Scala
|
apache-2.0
| 2,242 |
package reftree.svg
import reftree.svg.animation._
import reftree.svg.api.{SvgWrapper, OptimizedSvgApi, BaseSvgApi}
case class GraphAnimation[Svg](api: BaseSvgApi[Svg]) {
val alignment = GraphAlignment(api)
val cleanup = GraphCleanup(api)
val accentuation = GraphAccentuation(api)
val interpolation = GraphInterpolation(api)
def animate(keyFrames: Int, interpolationFrames: Int)(svgs: Seq[Svg]): Stream[Frame[Svg]] = {
if (svgs.length < 2) {
cleanup.cleanup(svgs.toStream).map(Frame(_))
} else {
scribe.trace("Aligning...")
val aligned = alignment.alignPairwise(svgs)
val resized = alignment.unifyDimensions(aligned)
scribe.trace("Cleaning up...")
val clean = cleanup.cleanup(resized)
scribe.trace("Accentuating differences between adjacent frames...")
val accentuated = accentuation.accentuatePairwise(clean)
scribe.trace("Interpolating...")
interpolation.interpolatePairwise(accentuated, keyFrames, interpolationFrames)
}
}
}
case class OptimizedGraphAnimation[Svg](api: BaseSvgApi[Svg]) {
val optimizedApi = OptimizedSvgApi(api)
val animation = GraphAnimation(optimizedApi)
def animate(keyFrames: Int, interpolationFrames: Int)(svgs: Seq[Svg]): Stream[Frame[Svg]] = {
scribe.trace("Optimizing...")
val wrapped = svgs.map(SvgWrapper.wrap(api))
val wrappedFrames = animation.animate(keyFrames, interpolationFrames)(wrapped)
scribe.trace("Optimizing...")
wrappedFrames.map(_.map(_.unwrap))
}
}
|
stanch/reftree
|
core/shared/src/main/scala/reftree/svg/GraphAnimation.scala
|
Scala
|
gpl-3.0
| 1,516 |
package org.apache.mesos.chronos.scheduler.jobs
import org.apache.mesos.chronos.scheduler.api.{DependentJobResource, Iso8601JobResource}
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.state.PersistenceStore
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{DateTime, DateTimeZone, Hours, Minutes}
import org.specs2.mock.Mockito
import org.specs2.mutable._
import MockJobUtils._
class JobSchedulerIntegrationTest extends SpecificationWithJUnit with Mockito {
"JobScheduler" should {
"A job creates a failed task and then a successful task from a synchronous job" in {
val epsilon = Hours.hours(2).toPeriod
val job1 = new ScheduleBasedJob("R5/2012-01-01T00:00:00.000Z/P1D", "job1", "CMD", epsilon)
val jobGraph = new JobGraph
val persistenceStore = mock[PersistenceStore]
val mockTaskManager = mock[TaskManager]
val scheduler = mockScheduler(epsilon, mockTaskManager, jobGraph, persistenceStore)
val startTime = DateTime.parse("2012-01-01T01:00:00.000Z")
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, startTime)
val newStreams = scheduler.iteration(startTime, scheduler.streams)
newStreams.head.schedule must_== "R4/2012-01-02T00:00:00.000Z/P1D"
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, startTime, 0))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, startTime, 0))
there was one(persistenceStore)
.persistJob(new ScheduleBasedJob("R5/2012-01-01T00:00:00.000Z/P1D", "job1", "CMD", epsilon))
there was one(persistenceStore)
.persistJob(new ScheduleBasedJob("R4/2012-01-02T00:00:00.000Z/P1D", "job1", "CMD", epsilon))
}
"Executing a job updates the job counts and errors" in {
val epsilon = Minutes.minutes(20).toPeriod
val jobName = "FOO"
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = jobName, command = "fooo", epsilon = epsilon, retries = 0)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val mockJobsObserver = mockFullObserver
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore, mockJobsObserver)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.run(() => {
DateTime.parse("2012-01-01T00:05:01.000Z")
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
val job2 = graph.lookupVertex(jobName).get
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
val job3 = graph.lookupVertex(jobName).get
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
graph.lookupVertex(jobName).get.successCount must_== 2
graph.lookupVertex(jobName).get.errorCount must_== 1
there was one(mockJobsObserver).apply(JobFinished(job1, TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
there was one(mockJobsObserver).apply(JobFinished(job2, TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
there was one(mockJobsObserver).apply(JobFailed(Right(job3), TaskUtils.getTaskStatus(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0), 0))
}
"Marking a job successful updates the success and error counts and triggers children" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1D",
name = "job1", command = "fooo", epsilon = epsilon, retries = 0)
val dependentJob = new DependencyBasedJob(Set("job1", "job3"), name = "dependentJob", command = "CMD", disabled = false)
val job3 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1D",
name = "job3", command = "fooo", epsilon = epsilon, retries = 0)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val jobGraph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val mockJobsObserver = mockFullObserver
val scheduler = mockScheduler(horizon, mockTaskManager, jobGraph, mockPersistenceStore, mockJobsObserver)
val date = DateTime.parse("2011-01-01T00:05:01.000Z")
val edgeInvocationCount = jobGraph.edgeInvocationCount
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(dependentJob, persist = true, date)
scheduler.run(() => {
date
})
val failedDate = date.plusMinutes(1)
val passingDate = date.plusMinutes(1)
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job1, failedDate, 0))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, passingDate, 0))
val failedJob = jobGraph.lookupVertex("job1").get
failedJob.errorCount must_== 1
failedJob.successCount must_== 0
failedJob.errorsSinceLastSuccess must_== 1
scheduler.markJobSuccessAndFireOffDependencies("job1")
val jobMarkedSuccess = jobGraph.lookupVertex("job1").get
jobMarkedSuccess.errorCount must_== 1
jobMarkedSuccess.successCount must_== 1
jobMarkedSuccess.errorsSinceLastSuccess must_== 0
val lastSuccess = DateTime.parse(jobMarkedSuccess.lastSuccess)
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(dependentJob, lastSuccess, 0, None),
highPriority = false)
scheduler.handleStartedTask(TaskUtils.getTaskStatus(dependentJob, lastSuccess, 0))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(dependentJob, lastSuccess, 0))
edgeInvocationCount.get(jobGraph.dag.getEdge("job1", "dependentJob")) must_== Some(0L)
jobGraph.lookupVertex("dependentJob").get.successCount must_== 1
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, passingDate, 0))
scheduler.markJobSuccessAndFireOffDependencies("dependentJob")
jobGraph.lookupVertex("dependentJob").get.successCount must_== 2
edgeInvocationCount.get(jobGraph.dag.getEdge("job1", "dependentJob")) must_== Some(0L)
}
"Tests that a disabled job does not run and does not execute dependant children." in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = true)
val job2 = new DependencyBasedJob(Set("job1"), name = "job2", command = "CMD", disabled = true)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.registerJob(job2, persist = true, DateTime.parse("2011-01-01T00:05:01.000Z"))
scheduler.run(() => {
DateTime.parse("2012-01-01T00:05:01.000Z")
})
/*
scheduler.handleFinishedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
scheduler.handleFinishedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
scheduler.handleFailedTask(TaskUtils.getTaskId(job1, DateTime.parse("2012-01-03T00:00:01.000Z"), 0))
*/
graph.lookupVertex("job1").get.successCount must_== 0
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 0
graph.lookupVertex("job2").get.errorCount must_== 0
}
"Tests that dependent jobs runs when they should" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2011-01-01T00:05:01.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
}
"Tests that dependent jobs run even if their parents fail but have softError enabled" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false, retries = 0, softError = true)
val job3 = new DependencyBasedJob(Set("job1", "job2"), name = "job3", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.now(DateTimeZone.UTC)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job2, date, 0))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
val vJob2 = graph.lookupVertex("job2").get
vJob2.successCount must_== 0
vJob2.errorCount must_== 1
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, DateTime.parse(vJob2.lastError), 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, date, 0), Some(DateTime.parse(vJob2.lastError)))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
}
"Tests that dependent jobs don't run if their parents fail without softError enabled" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false, retries = 0, softError = false)
val job3 = new DependencyBasedJob(Set("job1", "job2"), name = "job3", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.now(DateTimeZone.UTC)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFailedTask(TaskUtils.getTaskStatus(job2, date, 0))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
val vJob2 = graph.lookupVertex("job2").get
vJob2.successCount must_== 0
vJob2.errorCount must_== 1
there was no(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, DateTime.parse(vJob2.lastError), 0), highPriority = false)
}
"Tests that scheduled jobs changed to dependent jobs remove their schedules" in {
val epsilon = Minutes.minutes(1).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:05:00.000Z/PT10M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:05:00.000Z/PT10M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val mockedScheduler = mock[JobScheduler]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2012-01-01T00:00:00.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
val dependentJob2 = new DependencyBasedJob(Set("job1"), name = "job2", command = "CMD", disabled = false)
val jobResource = new DependentJobResource(jobScheduler = mockedScheduler, jobGraph = graph)
jobResource.handleRequest(dependentJob2)
there was one(mockedScheduler).removeSchedule(job2)
there was one(mockedScheduler).updateJob(job2, dependentJob2)
}
"Tests that dependent jobs runs when they should after changing the jobgraph" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:01:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5_1 = new DependencyBasedJob(Set("job1", "job2"), name = "job5", command = "CMD", disabled = false)
val job5_2 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2012-01-01T00:00:00.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5_1, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plusMinutes(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_1, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_1, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
val jobResource = new DependentJobResource(jobScheduler = scheduler, jobGraph = graph)
jobResource.handleRequest(job5_2)
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 2
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 2
graph.lookupVertex("job2").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 2
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 2
graph.lookupVertex("job4").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_2, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_2, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
}
"Tests that complex dependent jobs run when they should" in {
val epsilon = Minutes.minutes(20).toPeriod
val job1 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = "R/2012-01-01T00:00:00.000Z/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val job6 = new DependencyBasedJob(Set("job4", "job5", "job1"), name = "job6", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
val date = DateTime.parse("2011-01-01T00:05:01.000Z")
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5, persist = true, date)
scheduler.registerJob(job6, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 2
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 2
graph.lookupVertex("job2").get.errorCount must_== 0
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job6, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job6, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job6").get.successCount must_== 1
graph.lookupVertex("job6").get.errorCount must_== 0
}
"Tests that dependent jobs run when parents are updated" in {
val epsilon = Minutes.minutes(20).toPeriod
val date = DateTime.now(DateTimeZone.UTC)
val fmt = ISODateTimeFormat.dateTime()
val job1 = new ScheduleBasedJob(schedule = s"R/${fmt.print(date)}/PT1M",
name = "job1", command = "fooo", epsilon = epsilon, disabled = false)
val job2 = new ScheduleBasedJob(schedule = s"R/${fmt.print(date)}/PT1M",
name = "job2", command = "fooo", epsilon = epsilon, disabled = false)
val job3 = new DependencyBasedJob(Set("job1"), name = "job3", command = "CMD", disabled = false)
val job4 = new DependencyBasedJob(Set("job1", "job2"), name = "job4", command = "CMD", disabled = false)
val job5_1 = new DependencyBasedJob(Set("job1", "job2"), name = "job5", command = "CMD", disabled = false)
val job5_2 = new DependencyBasedJob(Set("job1", "job2", "job3"), name = "job5", command = "CMD", disabled = false)
val horizon = Minutes.minutes(5).toPeriod
val mockTaskManager = mock[TaskManager]
val graph = new JobGraph()
val mockPersistenceStore = mock[PersistenceStore]
val scheduler = mockScheduler(horizon, mockTaskManager, graph, mockPersistenceStore)
scheduler.leader.set(true)
scheduler.registerJob(job1, persist = true, date)
scheduler.registerJob(job2, persist = true, date)
scheduler.registerJob(job3, persist = true, date)
scheduler.registerJob(job4, persist = true, date)
scheduler.registerJob(job5_1, persist = true, date)
scheduler.run(() => {
date
})
val finishedDate = date.plus(1)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 1
graph.lookupVertex("job3").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 1
graph.lookupVertex("job4").get.errorCount must_== 0
there was one(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_1, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_1, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 1
graph.lookupVertex("job5").get.errorCount must_== 0
val jobResource = new Iso8601JobResource(jobScheduler = scheduler, jobGraph = graph)
jobResource.handleRequest(job1)
jobResource.handleRequest(job2)
scheduler.run(() => {
date
})
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job1, date, 0), Some(finishedDate))
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job2, date, 0), Some(finishedDate))
graph.lookupVertex("job1").get.successCount must_== 1
graph.lookupVertex("job1").get.errorCount must_== 0
graph.lookupVertex("job2").get.successCount must_== 1
graph.lookupVertex("job2").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job3, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job3, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job3").get.successCount must_== 2
graph.lookupVertex("job3").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job4, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job4, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job4").get.successCount must_== 2
graph.lookupVertex("job4").get.errorCount must_== 0
there was two(mockTaskManager).enqueue(TaskUtils.getTaskId(job5_2, finishedDate, 0), highPriority = false)
scheduler.handleFinishedTask(TaskUtils.getTaskStatus(job5_2, finishedDate, 0), Some(finishedDate))
graph.lookupVertex("job5").get.successCount must_== 2
graph.lookupVertex("job5").get.errorCount must_== 0
}
}
}
|
mikkokupsu/chronos
|
src/test/scala/org/apache/mesos/chronos/scheduler/jobs/JobSchedulerIntegrationTest.scala
|
Scala
|
apache-2.0
| 29,602 |
package org.jboss.shrinkwrap.resolver.api
import org.jboss.shrinkwrap.resolver.api.maven.ConfigurableMavenResolverSystem
object MavenResolver {
def apply() = ResolverSystemFactory
.createFromUserView(
classOf[ConfigurableMavenResolverSystem],
//Class which the resolver breaks on
Class.forName("org.jboss.shrinkwrap.resolver.spi.loader.SpiServiceLoader").getClassLoader
)
}
|
ToIthaca/sbt-robovm
|
src/main/scala/org/jboss/shrinkwrap/resolver/api/MavenResolver.scala
|
Scala
|
bsd-2-clause
| 404 |
/**
* Copyright (C) 2015-2016 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.stage.fileitem
import java.io.File
import nl.knaw.dans.easy.stage.lib.Util.loadXML
import org.apache.commons.io.FileUtils.readFileToString
import org.json4s.native._
import scala.reflect.io.Path
/**
* Gets filenames of and SDO set or essential contents of SDO files
* with as little knowledge as possible yet insensitive to
* varying whitespace, order of elements and namespace prefixes.
*/
object SdoFiles {
def getRelativeFiles(dir: Path): Set[String] =
dir.walk.map(_.toString.replaceAll(dir.toString() + "/", "")).toSet
/** Gets (label,text) of elements in the root of the document */
def readFlatXml(file: String): Set[(String, String)] =
(loadXML(new File(file)) \\ "_")
.map(n => n.label -> n.text)
.toSet
/** Gets (label,text) of dc elements and (name,value)-attributes of object properties,
* labels are prefixed with "dc_" and names are prefixed with "prop_". */
def readDatastreamFoxml(file: String): Set[(String, String)] = {
val xml = loadXML(new File(file))
(xml \\\\ "dc" \\ "_")
.map(node => "dc_" + node.label -> node.text)
.toSet ++
(xml \\\\ "property")
.map(node =>
"prop_" + (node \\ "@NAME").toString().replaceAll(".*#", "")
-> (node \\ "@VALUE").toString()
).toSet
}
type S2S = Map[String, String]
type S2A = Map[String, Any]
def readCfgJson(file: String): (Option[String], Option[Set[S2S]], Option[Set[S2S]]) = {
val content = readFileToString(new File(file),"UTF-8")
val map = parseJson(content).values.asInstanceOf[S2A]
val namespace = map.get("namespace").map(_.asInstanceOf[String])
val datastreams = map.get("datastreams").map(_.asInstanceOf[List[S2S]].toSet[S2S])
val relations = map.get("relations").map(_.asInstanceOf[List[S2S]].toSet[S2S])
(namespace, datastreams, relations)
}
}
|
PaulBoon/easy-stage-dataset
|
src/test/scala/nl/knaw/dans/easy/stage/fileitem/SdoFiles.scala
|
Scala
|
apache-2.0
| 2,534 |
package im.mange.backdoor.server.kryo
import com.twitter.chill.ScalaKryoInstantiator
import im.mange.backdoor.BackdoorMessage
import im.mange.backdoor.server.kryo.serialiser.{DateTimeSerializer, LocalDateSerializer}
import org.joda.time.{DateTime, LocalDate}
object Cryopreservation {
import java.util.Base64
import net.liftweb.json.Serialization._
import net.liftweb.json._
private val kryo = RegisteringKryoPool.withByteArrayOutputStream(10,
new ScalaKryoInstantiator(),
Seq(
ClassAndSerializer(classOf[LocalDate], new LocalDateSerializer()),
ClassAndSerializer(classOf[DateTime], new DateTimeSerializer())
)
)
def freeze(thing: Any): String = {
val bytes = kryo.toBytesWithClass(thing)
val base64Encoded = Base64.getEncoder.encodeToString(bytes)
val message = BackdoorMessage(base64Encoded)
implicit val formats = Serialization.formats(NoTypeHints)
pretty(render(JsonParser.parse(write(message))))
}
def thaw(json: String): Any = {
implicit val formats = Serialization.formats(NoTypeHints)
val reloaded = parse(json).extract[BackdoorMessage]
kryo.fromBytes(Base64.getDecoder.decode(reloaded.data))
}
}
|
alltonp/backdoor-liftweb
|
src/main/scala/im/mange/backdoor/server/kryo/Cryopreservation.scala
|
Scala
|
apache-2.0
| 1,186 |
package com.github.diegopacheco.scala3.playground.features
@main def ImplicitConversionsApp():Unit = {
given Conversion[String, Int] with
def apply(s:String):Int = Integer.parseInt(s)
// a method that expects an Int
def plus1(i:Int) = i + 1
// pass it a String that converts to an Int
println(plus1("1"))
}
|
diegopacheco/scala-playground
|
scala-3-playground/scala-3-playground/src/main/scala/com/github/diegopacheco/scala3/playground/features/ImplicitConversionsApp.scala
|
Scala
|
unlicense
| 330 |
package com.twitter.inject.requestscope
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.util.Future
import javax.inject.Inject
class FinagleRequestScopeFilter[Req, Rep] @Inject()(
finagleScope: FinagleRequestScope)
extends SimpleFilter[Req, Rep] {
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
finagleScope.enter()
service.apply(request) ensure {
finagleScope.exit()
}
}
}
|
deanh/finatra
|
inject/inject-request-scope/src/main/scala/com/twitter/inject/requestscope/FinagleRequestScopeFilter.scala
|
Scala
|
apache-2.0
| 447 |
package fr.univ.nantes.roomanager.dao.salle
import fr.univ.nantes.roomanager.bean.SalleBean
/**
* @author Pierre Gaultier
* @author Alexis Giraudet
*/
class SalleDaoImpl extends SalleDao {
private var increment: Int = 0
private var salles: Set[SalleBean] = Set()
override def get(id: Int): SalleBean = {
val salle: SalleBean = salles.find((salle: SalleBean) => salle.getId() == id).get
new Salle(salle.getId(), salle)
}
override def update(salle: SalleBean): Unit = {
if (salles.contains(salle)) {
var newSalle: Salle = new Salle(salle.getId(), salle)
if (!salles.exists((other: SalleBean) => newSalle.uniqueConstraint(other)))
salles += newSalle
else
throw new Exception()
}
else
throw new Exception()
}
override def delete(salle: SalleBean): Unit = salles -= salle
override def find(predicate: (SalleBean) => Boolean): Traversable[SalleBean] = {
var retSalles: Set[SalleBean] = Set()
salles.filter(predicate).foreach((salle: SalleBean) => retSalles += new Salle(salle.getId(), salle))
retSalles
}
override def create(salle: SalleBean): SalleBean = {
var newSalle: Salle = new Salle(increment, salle)
if (salles.exists((other: SalleBean) => newSalle.uniqueConstraint(other)))
throw new Exception()
salles += newSalle
increment += 1
new Salle(newSalle.getId(), newSalle)
}
override def getAll(): Traversable[SalleBean] = {
var retSalles: Set[SalleBean] = Set()
salles.foreach((salle: SalleBean) => retSalles += new Salle(salle.getId(), salle))
retSalles
}
}
|
Giraudux/roomanager
|
src/main/scala/fr/univ/nantes/roomanager/dao/salle/SalleDaoImpl.scala
|
Scala
|
mit
| 1,605 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.index.encoders
import java.util.Date
import com.vividsolutions.jts.geom.{Geometry, Point}
import org.locationtech.geomesa.filter.function.{BasicValues, Convert2ViewerFunction}
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* Encodes features in 'bin' format
*/
class BinEncoder(sft: SimpleFeatureType, trackIdField: String) {
private val trackIdIndex = sft.indexOf(trackIdField)
private val geomIndex = sft.getGeomIndex
private val dtgIndex = sft.getDtgIndex.getOrElse(-1)
private val getLatLon = if (sft.isPoints) getLatLonPoints _ else getLatLonNonPoints _
private val getDtg: (SimpleFeature) => Long =
if (dtgIndex == -1) (sf: SimpleFeature) => 0L else getDtgWithIndex
/**
* Encode a feature to bytes
*/
def encode(sf: SimpleFeature): Array[Byte] = {
val (lat, lon) = getLatLon(sf)
val dtg = getDtg(sf)
val trackIdVal = sf.getAttribute(trackIdIndex)
val trackId = if (trackIdVal == null) { 0 } else { trackIdVal.hashCode }
Convert2ViewerFunction.encodeToByteArray(BasicValues(lat, lon, dtg, trackId))
}
private def getLatLonPoints(sf: SimpleFeature): (Float, Float) = {
val geom = sf.getAttribute(geomIndex).asInstanceOf[Point]
(geom.getY.toFloat, geom.getX.toFloat)
}
private def getLatLonNonPoints(sf: SimpleFeature): (Float, Float) = {
import org.locationtech.geomesa.utils.geotools.Conversions.RichGeometry
val geom = sf.getAttribute(geomIndex).asInstanceOf[Geometry].safeCentroid()
(geom.getY.toFloat, geom.getX.toFloat)
}
private def getDtgWithIndex(sf: SimpleFeature): Long = {
val dtg = sf.getAttribute(dtgIndex).asInstanceOf[Date]
if (dtg == null) 0L else dtg.getTime
}
}
object BinEncoder {
def apply(sft: SimpleFeatureType): Option[BinEncoder] = sft.getBinTrackId.map(new BinEncoder(sft, _))
}
|
tkunicki/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/encoders/BinEncoder.scala
|
Scala
|
apache-2.0
| 2,429 |
package com.truecar.mleap.demo.server
import java.io.File
import ml.bundle.fs.DirectoryBundle
import com.truecar.mleap.runtime.transformer.Transformer
import com.truecar.mleap.serialization.ml.v1.MlJsonSerializer
/**
* Created by hwilkins on 1/20/16.
*/
object Boot extends App {
val transformerPath = args(0)
val bundleReader = DirectoryBundle(new File(transformerPath))
val serializer = MlJsonSerializer
val transformer = serializer.deserializeWithClass(bundleReader).model.asInstanceOf[Transformer]
val port = if(args.length == 2) {
args(1).toInt
} else {
8080
}
MleapServer(transformer, port).start()
}
|
TrueCar/mleap-demo
|
server/src/main/scala/com/truecar/mleap/demo/server/Boot.scala
|
Scala
|
mit
| 640 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.net._
import java.nio.channels._
import kafka.api._
import kafka.message._
import kafka.network._
import kafka.utils._
/**
* A consumer of kafka messages
*/
@threadsafe
class SimpleConsumer(val host: String,
val port: Int,
val soTimeout: Int,
val bufferSize: Int) extends Logging {
private var channel : SocketChannel = null
private val lock = new Object()
private def connect(): SocketChannel = {
close
val address = new InetSocketAddress(host, port)
val channel = SocketChannel.open
debug("Connected to " + address + " for fetching.")
channel.configureBlocking(true)
channel.socket.setReceiveBufferSize(bufferSize)
channel.socket.setSoTimeout(soTimeout)
channel.socket.setKeepAlive(true)
channel.connect(address)
trace("requested receive buffer size=" + bufferSize + " actual receive buffer size= " + channel.socket.getReceiveBufferSize)
trace("soTimeout=" + soTimeout + " actual soTimeout= " + channel.socket.getSoTimeout)
channel
}
private def close(channel: SocketChannel) = {
debug("Disconnecting from " + channel.socket.getRemoteSocketAddress())
Utils.swallow(logger.warn, channel.close())
Utils.swallow(logger.warn, channel.socket.close())
}
def close() {
lock synchronized {
if (channel != null)
close(channel)
channel = null
}
}
/**
* Fetch a set of messages from a topic.
*
* @param request specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
* @return a set of fetched messages
*/
def fetch(request: FetchRequest): ByteBufferMessageSet = {
lock synchronized {
val startTime = SystemTime.nanoseconds
getOrMakeConnection()
var response: Tuple2[Receive,Int] = null
try {
sendRequest(request)
response = getResponse
} catch {
case e : java.io.IOException =>
info("Reconnect in fetch request due to socket error: ", e)
// retry once
try {
channel = connect
sendRequest(request)
response = getResponse
}catch {
case ioe: java.io.IOException => channel = null; throw ioe;
}
case e => throw e
}
val endTime = SystemTime.nanoseconds
SimpleConsumerStats.recordFetchRequest(endTime - startTime)
SimpleConsumerStats.recordConsumptionThroughput(response._1.buffer.limit)
new ByteBufferMessageSet(response._1.buffer, request.offset, response._2)
}
}
/**
* Combine multiple fetch requests in one call.
*
* @param fetches a sequence of fetch requests.
* @return a sequence of fetch responses
*/
def multifetch(fetches: FetchRequest*): MultiFetchResponse = {
lock synchronized {
val startTime = SystemTime.nanoseconds
getOrMakeConnection()
var response: Tuple2[Receive,Int] = null
try {
sendRequest(new MultiFetchRequest(fetches.toArray))
response = getResponse
} catch {
case e : java.io.IOException =>
info("Reconnect in multifetch due to socket error: ", e)
// retry once
try {
channel = connect
sendRequest(new MultiFetchRequest(fetches.toArray))
response = getResponse
}catch {
case ioe: java.io.IOException => channel = null; throw ioe;
}
case e => throw e
}
val endTime = SystemTime.nanoseconds
SimpleConsumerStats.recordFetchRequest(endTime - startTime)
SimpleConsumerStats.recordConsumptionThroughput(response._1.buffer.limit)
// error code will be set on individual messageset inside MultiFetchResponse
new MultiFetchResponse(response._1.buffer, fetches.length, fetches.toArray.map(f => f.offset))
}
}
/**
* Get a list of valid offsets (up to maxSize) before the given time.
* The result is a list of offsets, in descending order.
*
* @param time: time in millisecs (-1, from the latest offset available, -2 from the smallest offset available)
* @return an array of offsets
*/
def getOffsetsBefore(topic: String, partition: Int, time: Long, maxNumOffsets: Int): Array[Long] = {
lock synchronized {
getOrMakeConnection()
var response: Tuple2[Receive,Int] = null
try {
sendRequest(new OffsetRequest(topic, partition, time, maxNumOffsets))
response = getResponse
} catch {
case e : java.io.IOException =>
info("Reconnect in get offetset request due to socket error: ", e)
// retry once
try {
channel = connect
sendRequest(new OffsetRequest(topic, partition, time, maxNumOffsets))
response = getResponse
}catch {
case ioe: java.io.IOException => channel = null; throw ioe;
}
}
OffsetRequest.deserializeOffsetArray(response._1.buffer)
}
}
private def sendRequest(request: Request) = {
val send = new BoundedByteBufferSend(request)
send.writeCompletely(channel)
}
private def getResponse(): Tuple2[Receive,Int] = {
val response = new BoundedByteBufferReceive()
response.readCompletely(channel)
// this has the side effect of setting the initial position of buffer correctly
val errorCode: Int = response.buffer.getShort
(response, errorCode)
}
private def getOrMakeConnection() {
if(channel == null) {
channel = connect()
}
}
}
trait SimpleConsumerStatsMBean {
def getFetchRequestsPerSecond: Double
def getAvgFetchRequestMs: Double
def getMaxFetchRequestMs: Double
def getNumFetchRequests: Long
def getConsumerThroughput: Double
}
@threadsafe
class SimpleConsumerStats extends SimpleConsumerStatsMBean {
private val fetchRequestStats = new SnapshotStats
def recordFetchRequest(requestNs: Long) = fetchRequestStats.recordRequestMetric(requestNs)
def recordConsumptionThroughput(data: Long) = fetchRequestStats.recordThroughputMetric(data)
def getFetchRequestsPerSecond: Double = fetchRequestStats.getRequestsPerSecond
def getAvgFetchRequestMs: Double = fetchRequestStats.getAvgMetric / (1000.0 * 1000.0)
def getMaxFetchRequestMs: Double = fetchRequestStats.getMaxMetric / (1000.0 * 1000.0)
def getNumFetchRequests: Long = fetchRequestStats.getNumRequests
def getConsumerThroughput: Double = fetchRequestStats.getThroughput
}
object SimpleConsumerStats extends Logging {
private val simpleConsumerstatsMBeanName = "kafka:type=kafka.SimpleConsumerStats"
private val stats = new SimpleConsumerStats
Utils.registerMBean(stats, simpleConsumerstatsMBeanName)
def recordFetchRequest(requestMs: Long) = stats.recordFetchRequest(requestMs)
def recordConsumptionThroughput(data: Long) = stats.recordConsumptionThroughput(data)
}
|
tnachen/kafka
|
core/src/main/scala/kafka/consumer/SimpleConsumer.scala
|
Scala
|
apache-2.0
| 7,744 |
/** Test matrix multiplication with specialization.
*/
@deprecated("Suppress warnings", since="2.11")
object Test {
import scala.reflect.ClassManifest
class Matrix[@specialized A: ClassManifest](val rows: Int, val cols: Int) {
private val arr: Array[Array[A]] = Array.ofDim[A](rows, cols)
def apply(i: Int, j: Int): A = {
if (i < 0 || i >= rows || j < 0 || j >= cols)
throw new NoSuchElementException("Indexes out of bounds: " + (i, j))
arr(i)(j)
}
def update(i: Int, j: Int, e: A): Unit = {
arr(i)(j) = e
}
def rowsIterator: Iterator[Array[A]] = new Iterator[Array[A]] {
var idx = 0;
def hasNext = idx < rows
def next() = {
idx += 1
arr(idx - 1)
}
}
}
def main(args: Array[String]): Unit = {
val m = randomMatrix(200, 100)
val n = randomMatrix(100, 200)
val p = mult(m, n)
println(p(0, 0))
println("Boxed doubles: " + runtime.BoxesRunTime.doubleBoxCount)
// println("Boxed integers: " + runtime.BoxesRunTime.integerBoxCount)
}
def randomMatrix(n: Int, m: Int) = {
val r = new util.Random(10)
val x = new Matrix[Double](n, m)
for (i <- 0 until n; j <- 0 until m)
x(i, j) = (r.nextInt() % 1000).toDouble
x
}
def printMatrix[Double](m: Matrix[Double]): Unit = {
for (i <- 0 until m.rows) {
for (j <- 0 until m.cols)
print("%5.3f ".format(m(i, j)))
println()
}
}
def multManifest[@specialized(Int) T](m: Matrix[T], n: Matrix[T])(implicit cm: ClassManifest[T], num: Numeric[T]): Unit = {
val p = new Matrix[T](m.rows, n.cols)
import num._
for (i <- 0 until m.rows)
for (j <- 0 until n.cols) {
var sum = num.zero
for (k <- 0 until n.rows)
sum += m(i, k) * n(k, j)
p(i, j) = sum
}
}
def mult(m: Matrix[Double], n: Matrix[Double]) = {
val p = new Matrix[Double](m.rows, n.cols)
for (i <- 0 until m.rows)
for (j <- 0 until n.cols) {
var sum = 0.0
for (k <- 0 until n.rows)
sum += m(i, k) * n(k, j)
p(i, j) = sum
}
p
}
}
|
lrytz/scala
|
test/files/specialized/spec-matrix-old.scala
|
Scala
|
apache-2.0
| 2,096 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.